From 648dd7c38ba2a2a7caac6931a66cf50d0bc59a03 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 13 Feb 2025 11:34:00 +0900 Subject: [PATCH 001/117] Add python process how-to guides --- .../process/examples/example-cycles.md | 222 ++++++++++++++++- .../process/examples/example-first-process.md | 227 ++++++++++++++++-- 2 files changed, 421 insertions(+), 28 deletions(-) diff --git a/semantic-kernel/Frameworks/process/examples/example-cycles.md b/semantic-kernel/Frameworks/process/examples/example-cycles.md index cfff1cb3..d46a2424 100644 --- a/semantic-kernel/Frameworks/process/examples/example-cycles.md +++ b/semantic-kernel/Frameworks/process/examples/example-cycles.md @@ -87,16 +87,74 @@ public class ProofreadStep : KernelProcessStep } ``` +A new step named `ProofreadStep` has been created. This step uses the LLM to grade the generated documentation as discussed above. Notice that this step conditionally emits either the `DocumentationApproved` event or the `DocumentationRejected` event based on the response from the LLM. In the case of `DocumentationApproved`, the event will include the approved documentation as it's payload and in the case of `DocumentationRejected` it will include the suggestions from the proofreader. ::: zone-end ::: zone pivot="programming-language-python" +```python +# A sample response model for the ProofreadingStep structured output +class ProofreadingResponse(BaseModel): + """A class to represent the response from the proofreading step.""" + + meets_expectations: bool = Field(description="Specifies if the proposed docs meets the standards for publishing.") + explanation: str = Field(description="An explanation of why the documentation does or does not meet expectations.") + suggestions: list[str] = Field(description="List of suggestions, empty if there no suggestions for improvement.") + + +# A process step to proofread documentation +class ProofreadStep(KernelProcessStep): + @kernel_function + async def proofread_documentation(self, docs: str, context: KernelProcessStepContext, kernel: Kernel) -> None: + print(f"{ProofreadStep.__name__}\n\t Proofreading product documentation...") + + system_prompt = """ + Your job is to proofread customer facing documentation for a new product from Contoso. You will be provide with + proposed documentation for a product and you must do the following things: + + 1. Determine if the documentation is passes the following criteria: + 1. Documentation must use a professional tone. + 1. Documentation should be free of spelling or grammar mistakes. + 1. Documentation should be free of any offensive or inappropriate language. + 1. Documentation should be technically accurate. + 2. If the documentation does not pass 1, you must write detailed feedback of the changes that are needed to + improve the documentation. + """ + + chat_history = ChatHistory(system_message=system_prompt) + chat_history.add_user_message(docs) + + # Use structured output to ensure the response format is easily parsable + chat_service, settings = kernel.select_ai_service(type=ChatCompletionClientBase) + assert isinstance(chat_service, ChatCompletionClientBase) # nosec + assert isinstance(settings, OpenAIChatPromptExecutionSettings) # nosec + + settings.response_format = ProofreadingResponse + + response = await chat_service.get_chat_message_content(chat_history=chat_history, settings=settings) + + formatted_response: ProofreadingResponse = ProofreadingResponse.model_validate_json(response.content) + + suggestions_text = "\n\t\t".join(formatted_response.suggestions) + print( + f"\n\tGrade: {'Pass' if formatted_response.meets_expectations else 'Fail'}\n\t" + f"Explanation: {formatted_response.explanation}\n\t" + f"Suggestions: {suggestions_text}" + ) + + if formatted_response.meets_expectations: + await context.emit_event(process_event="documentation_approved", data=docs) + else: + await context.emit_event( + process_event="documentation_rejected", + data={"explanation": formatted_response.explanation, "suggestions": formatted_response.suggestions}, + ) +``` +A new step named `ProofreadStep` has been created. This step uses the LLM to grade the generated documentation as discussed above. Notice that this step conditionally emits either the `documentation_approved` event or the `documentation_rejected` event based on the response from the LLM. In the case of `documentation_approved`, the event will include the approved documentation as it's payload and in the case of `documentation_rejected` it will include the suggestions from the proofreader. ::: zone-end ::: zone pivot="programming-language-java" ::: zone-end -A new step named `ProofreadStep` has been created. This step uses the LLM to grade the generated documentation as discussed above. Notice that this step conditionally emits either the `DocumentationApproved` event or the `DocumentationRejected` event based on the response from the LLM. In the case of `DocumentationApproved`, the event will include the approved documentation as it's payload and in the case of `DocumentationRejected` it will include the suggestions from the proofreader. - ### Update the documentation generation step ::: zone pivot="programming-language-csharp" @@ -160,16 +218,68 @@ public class GenerateDocumentationStep : KernelProcessStep None: + print(f"{GenerateDocumentationStep.__name__}\n\t Generating documentation for provided product_info...") + + self.state.chat_history.add_user_message(f"Product Information:\n{product_info}") + + chat_service, settings = kernel.select_ai_service(type=ChatCompletionClientBase) + assert isinstance(chat_service, ChatCompletionClientBase) # nosec + + response = await chat_service.get_chat_message_content(chat_history=self.state.chat_history, settings=settings) + + await context.emit_event(process_event="documentation_generated", data=str(response)) + + @kernel_function + async def apply_suggestions(self, suggestions: str, context: KernelProcessStepContext, kernel: Kernel) -> None: + print(f"{GenerateDocumentationStep.__name__}\n\t Rewriting documentation with provided suggestions...") + + self.state.chat_history.add_user_message( + f"Rewrite the documentation with the following suggestions:\n\n{suggestions}" + ) + + chat_service, settings = kernel.select_ai_service(type=ChatCompletionClientBase) + assert isinstance(chat_service, ChatCompletionClientBase) # nosec + + generated_documentation_response = await chat_service.get_chat_message_content( + chat_history=self.state.chat_history, settings=settings + ) + + await context.emit_event(process_event="documentation_generated", data=str(generated_documentation_response)) +``` + +The `GenerateDocumentationStep` has been updated to include a new KernelFunction. The new function will be used to apply suggested changes to the documentation if our proofreading step requires them. Notice that both functions for generating or rewriting documentation emit the same event named `documentation_generated` indicating that new documentation is available. ::: zone-end ::: zone pivot="programming-language-java" ::: zone-end -The `GenerateDocumentationStep` has been updated to include a new KernelFunction. The new function will be used to apply suggested changes to the documentation if our proofreading step requires them. Notice that both functions for generating or rewriting documentation emit the same event named `DocumentationGenerated` indicating that new documentation is available. - ### Flow updates ::: zone pivot="programming-language-csharp" @@ -209,25 +319,64 @@ var process = processBuilder.Build(); return process; ``` +Our updated process routing now does the following: +- When an external event with `id = Start` is sent to the process, this event and its associated data will be sent to the `infoGatheringStep`. +- When the `infoGatheringStep` finishes running, send the returned object to the `docsGenerationStep`. +- When the `docsGenerationStep` finishes running, send the generated docs to the `docsProofreadStep`. +- When the `docsProofreadStep` rejects our documentation and provides suggestions, send the suggestions back to the `docsGenerationStep`. +- Finally, when the `docsProofreadStep` approves our documentation, send the returned object to the `docsPublishStep`. ::: zone-end ::: zone pivot="programming-language-python" +```python +# Create the process builder +process_builder = ProcessBuilder(name="DocumentationGeneration") + +# Add the steps +info_gathering_step = process_builder.add_step(GatherProductInfoStep) +docs_generation_step = process_builder.add_step(GenerateDocumentationStep) +docs_proofread_step = process_builder.add_step(ProofreadStep) # Add new step here +docs_publish_step = process_builder.add_step(PublishDocumentationStep) + +# Orchestrate the events +process_builder.on_input_event("Start").send_event_to(target=info_gathering_step) + +info_gathering_step.on_function_result().send_event_to( + target=docs_generation_step, function_name="generate_documentation", parameter_name="product_info" +) + +docs_generation_step.on_event("documentation_generated").send_event_to( + target=docs_proofread_step, parameter_name="docs" +) + +docs_proofread_step.on_event("documentation_rejected").send_event_to( + target=docs_generation_step, + function_name="apply_suggestions", + parameter_name="suggestions", +) + +docs_proofread_step.on_event("documentation_approved").send_event_to(target=docs_publish_step) +``` + +Our updated process routing now does the following: +- When an external event with `id = Start` is sent to the process, this event and its associated data will be sent to the `info_gathering_step`. +- When the `info_gathering_step` finishes running, send the returned object to the `docs_generation_step`. +- When the `docs_generation_step` finishes running, send the generated docs to the `docs_proofread_step`. +- When the `docs_proofread_step` rejects our documentation and provides suggestions, send the suggestions back to the `docs_generation_step`. +- Finally, when the `docs_proofread_step` approves our documentation, send the returned object to the `docs_publish_step`. ::: zone-end ::: zone pivot="programming-language-java" ::: zone-end -Our updated process routing now does the following: -- When an external event with `id = Start` is sent to the process, this event and its associated data will be sent to the `infoGatheringStep` step. -- When the `infoGatheringStep` finishes running, send the returned object to the `docsGenerationStep` step. -- When the `docsGenerationStep` finishes running, send the generated docs to the `docsProofreadStep` step. -- When the `docsProofreadStep` rejects our documentation and provides suggestions, send the suggestions back to the `docsGenerationStep`. -- Finally, when the `docsProofreadStep` approves our documentation, send the returned object to the `docsPublishStep` step. + ### Build and run the Process Running our updated process shows the following output in the console: +::: zone pivot="programming-language-csharp" + ```md GatherProductInfoStep: Gathering product information for product named Contoso GlowBrew @@ -289,6 +438,59 @@ Introducing GlowBrew-your new partner in coffee brewing that brings together adv We hope you enjoy your GlowBrew experience and that it brings a delightful blend of flavor and brightness to your coffee moments! ``` +::: zone-end + +::: zone pivot="programming-language-python" + +```md +GatherProductInfoStep + Gathering product information for Product Name: Contoso GlowBrew +GenerateDocumentationStep + Generating documentation for provided product_info... +ProofreadStep + Proofreading product documentation... + + Grade: Pass + Explanation: The GlowBrew AI Coffee Machine User Guide meets all the required criteria for publishing. The document maintains a professional tone throughout, is free from spelling and grammatical errors, contains no offensive or inappropriate content, and appears to be technically accurate in its description of the product features and troubleshooting advice. + Suggestions: +PublishDocumentationStep + Publishing product documentation: + +# GlowBrew AI Coffee Machine User Guide + +Welcome to the future of coffee making with the GlowBrew AI Coffee Machine! Step into a world where cutting-edge technology meets exquisite taste, creating a coffee experience like no other. Designed for coffee aficionados and tech enthusiasts alike, the GlowBrew promises not just a cup of coffee, but an adventure for your senses. + +## Key Features + +### Luminous Brew Technology +Illuminate your mornings with the GlowBrew's mesmerizing programmable LED light shows. With an unmatched number of LEDs, the GlowBrew can transform your kitchen ambiance to sync perfectly with each stage of the brewing process. Choose from a spectrum of colors and patterns to set the perfect mood, whether you're winding down with a rich decaf or kick-starting your day with a bold espresso. + +### AI Taste Assistant +Expand your coffee horizons with the AI Taste Assistant, your personal barista that learns and evolves with your palate. Over time, GlowBrew adapts to your preferences, suggesting new and exciting brew combinations. Experience a variety of flavors, from single-origin specialties to intricate blend recipes, tailored to your unique taste. + +### Gourmet Aroma Diffusion +Enhance your coffee experience with unrivaled aromatic pleasure. The GlowBrew's built-in aroma diffusers release a carefully calibrated scent profile that awakens your senses, heightening anticipation for your first sip. It's not just a coffee machine, it's an indulgent sensory journey. + +## Troubleshooting + +### LED Lights Malfunctioning +If you experience issues with your LED lights: + +1. **Reset the LED Settings**: Use the GlowBrew app to navigate to the lighting settings and perform a reset. +2. **Check LED Connections**: Open the GlowBrew machine and ensure all LED wiring connections are secure. +3. **Perform a Factory Reset**: As a last resort, a full factory reset can resolve persistent issues. Follow the instructions in the user manual to perform this reset safely. + +## Experience the Glow + +With GlowBrew, every cup of coffee is an art form that combines luminous aesthetics, an intuitive learning AI, and the intoxicating allure of rich aromas. Make each morning magical and every break a celebration with the GlowBrew AI Coffee Machine. Brew brilliantly, taste innovatively, and glow endlessly. + +For more support, explore our comprehensive FAQ section or contact our dedicated customer service team. +``` +::: zone-end + +::: zone pivot="programming-language-java" +::: zone-end + ## What's Next? diff --git a/semantic-kernel/Frameworks/process/examples/example-first-process.md b/semantic-kernel/Frameworks/process/examples/example-first-process.md index 2fbe00d7..447fa929 100644 --- a/semantic-kernel/Frameworks/process/examples/example-first-process.md +++ b/semantic-kernel/Frameworks/process/examples/example-first-process.md @@ -34,6 +34,7 @@ dotnet add package Microsoft.SemanticKernel.Process.LocalRuntime --version 1.33. ::: zone-end ::: zone pivot="programming-language-python" +pip install semantic-kernel==1.20.0 ::: zone-end ::: zone pivot="programming-language-java" @@ -144,15 +145,109 @@ public class PublishDocumentationStep : KernelProcessStep } ``` +The code above defines the three steps we need for our Process. There are a few points to call out here: +- In Semantic Kernel, a `KernelFunction` defines a block of code that is invocable by native code or by an LLM. In the case of the Process framework, `KernelFunction`s are the invocable members of a Step and each step requires at least one KernelFunction to be defined. +- The Process Framework has support for stateless and stateful steps. Stateful steps automatically checkpoint their progress and maintain state over multiple invocations. The `GenerateDocumentationStep` provides an example of this where the `GeneratedDocumentationState` class is used to persist the `ChatHistory` object. +- Steps can manually emit events by calling `EmitEventAsync` on the `KernelProcessStepContext` object. To get an instance of `KernelProcessStepContext` just add it as a parameter on your KernelFunction and the framework will automatically inject it. ::: zone-end ::: zone pivot="programming-language-python" -::: zone-end +```python +import asyncio +from typing import ClassVar + +from pydantic import BaseModel, Field + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistory +from semantic_kernel.functions import kernel_function +from semantic_kernel.processes import ProcessBuilder +from semantic_kernel.processes.kernel_process import KernelProcessStep, KernelProcessStepContext, KernelProcessStepState +from semantic_kernel.processes.local_runtime import KernelProcessEvent, start + + +# A process step to gather information about a product +class GatherProductInfoStep(KernelProcessStep): + @kernel_function + def gather_product_information(self, product_name: str) -> str: + print(f"{GatherProductInfoStep.__name__}\n\t Gathering product information for Product Name: {product_name}") + + return """ +Product Description: + +GlowBrew is a revolutionary AI driven coffee machine with industry leading number of LEDs and +programmable light shows. The machine is also capable of brewing coffee and has a built in grinder. + +Product Features: +1. **Luminous Brew Technology**: Customize your morning ambiance with programmable LED lights that sync + with your brewing process. +2. **AI Taste Assistant**: Learns your taste preferences over time and suggests new brew combinations + to explore. +3. **Gourmet Aroma Diffusion**: Built-in aroma diffusers enhance your coffee's scent profile, energizing + your senses before the first sip. + +Troubleshooting: +- **Issue**: LED Lights Malfunctioning + - **Solution**: Reset the lighting settings via the app. Ensure the LED connections inside the + GlowBrew are secure. Perform a factory reset if necessary. + """ + + +# A sample step state model for the GenerateDocumentationStep +class GeneratedDocumentationState(BaseModel): + """State for the GenerateDocumentationStep.""" + + chat_history: ChatHistory | None = None + + +# A process step to generate documentation for a product +class GenerateDocumentationStep(KernelProcessStep[GeneratedDocumentationState]): + state: GeneratedDocumentationState = Field(default_factory=GeneratedDocumentationState) + + system_prompt: ClassVar[str] = """ +Your job is to write high quality and engaging customer facing documentation for a new product from Contoso. You will +be provide with information about the product in the form of internal documentation, specs, and troubleshooting guides +and you must use this information and nothing else to generate the documentation. If suggestions are provided on the +documentation you create, take the suggestions into account and rewrite the documentation. Make sure the product +sounds amazing. +""" + + async def activate(self, state: KernelProcessStepState[GeneratedDocumentationState]): + self.state = state.state + if self.state.chat_history is None: + self.state.chat_history = ChatHistory(system_message=self.system_prompt) + self.state.chat_history + + @kernel_function + async def generate_documentation( + self, context: KernelProcessStepContext, product_info: str, kernel: Kernel + ) -> None: + print(f"{GenerateDocumentationStep.__name__}\n\t Generating documentation for provided product_info...") + + self.state.chat_history.add_user_message(f"Product Information:\n{product_info}") + + chat_service, settings = kernel.select_ai_service(type=ChatCompletionClientBase) + assert isinstance(chat_service, ChatCompletionClientBase) # nosec + + response = await chat_service.get_chat_message_content(chat_history=self.state.chat_history, settings=settings) + + await context.emit_event(process_event="documentation_generated", data=str(response)) + + +# A process step to publish documentation +class PublishDocumentationStep(KernelProcessStep): + @kernel_function + async def publish_documentation(self, docs: str) -> None: + print(f"{PublishDocumentationStep.__name__}\n\t Publishing product documentation:\n\n{docs}") +``` The code above defines the three steps we need for our Process. There are a few points to call out here: - In Semantic Kernel, a `KernelFunction` defines a block of code that is invocable by native code or by an LLM. In the case of the Process framework, `KernelFunction`s are the invocable members of a Step and each step requires at least one KernelFunction to be defined. - The Process Framework has support for stateless and stateful steps. Stateful steps automatically checkpoint their progress and maintain state over multiple invocations. The `GenerateDocumentationStep` provides an example of this where the `GeneratedDocumentationState` class is used to persist the `ChatHistory` object. -- Steps can manually emit events by calling `EmitEventAsync` on the `KernelProcessStepContext` object. To get an instance of `KernelProcessStepContext` just add it as a parameter on your KernelFunction and the framework will automatically inject it. +- Steps can manually emit events by calling `emit_event` on the `KernelProcessStepContext` object. To get an instance of `KernelProcessStepContext` just add it as a parameter on your KernelFunction and the framework will automatically inject it. +::: zone-end ### Define the process flow @@ -181,13 +276,51 @@ docsGenerationStep .SendEventTo(new(docsPublishStep)); ``` +There are a few things going on here so let's break it down step by step. + +1. Create the builder: +Processes use a builder pattern to simplify wiring everything up. The builder provides methods for managing the steps within a process and for managing the lifecycle of the process. + +1. Add the steps: +Steps are added to the process by calling the `AddStepFromType` method of the builder. This allows the Process Framework to manage the lifecycle of steps by instantiating instances as needed. In this case we've added three steps to the process and created a variable for each one. These variables give us a handle to the unique instance of each step that we can use next to define the orchestration of events. + +1. Orchestrate the events: +This is where the routing of events from step to step are defined. In this case we have the following routes: + - When an external event with `id = Start` is sent to the process, this event and its associated data will be sent to the `infoGatheringStep` step. + - When the `infoGatheringStep` finishes running, send the returned object to the `docsGenerationStep` step. + - Finally, when the `docsGenerationStep` finishes running, send the returned object to the `docsPublishStep` step. + +> [!TIP] +> **_Event Routing in Process Framework:_** You may be wondering how events that are sent to steps are routed to KernelFunctions within the step. In the code above, each step has only defined a single KernelFunction and each KernelFunction has only a single parameter (other than Kernel and the step context which are special, more on that later). When the event containing the generated documentation is sent to the `docsPublishStep` it will be passed to the `docs` parameter of the `PublishDocumentation` KernelFunction of the `docsGenerationStep` step because there is no other choice. However, steps can have multiple KernelFunctions and KernelFunctions can have multiple parameters, in these advanced scenarios you need to specify the target function and parameter. + ::: zone-end ::: zone pivot="programming-language-python" -::: zone-end +```python +# Create the process builder +process_builder = ProcessBuilder(name="DocumentationGeneration") -::: zone pivot="programming-language-java" -::: zone-end +# Add the steps +info_gathering_step = process_builder.add_step(GatherProductInfoStep) +docs_generation_step = process_builder.add_step(GenerateDocumentationStep) +docs_publish_step = process_builder.add_step(PublishDocumentationStep) + +# Orchestrate the events +process_builder.on_input_event("Start").send_event_to(target=info_gathering_step) + +info_gathering_step.on_function_result().send_event_to( + target=docs_generation_step, function_name="generate_documentation", parameter_name="product_info" +) + +docs_generation_step.on_event("documentation_generated").send_event_to(target=docs_publish_step) + +# Configure the kernel with an AI Service and connection details, if necessary +kernel = Kernel() +kernel.add_service(AzureChatCompletion()) + +# Build the process +kernel_process = process_builder.build() +``` There are a few things going on here so let's break it down step by step. @@ -195,16 +328,21 @@ There are a few things going on here so let's break it down step by step. Processes use a builder pattern to simplify wiring everything up. The builder provides methods for managing the steps within a process and for managing the lifecycle of the process. 1. Add the steps: -Steps are added to the process by calling the `AddStepFromType` method of the builder. This allows the Process Framework to manage the lifecycle of steps by instantiating instances as needed. In this case we've added three steps to the process and created a variable for each one. These variables give us a handle to the unique instance of each step that we can use next to define the orchestration of events. +Steps are added to the process by calling the `add_step` method of the builder, which adds the step type to the builder. This allows the Process Framework to manage the lifecycle of steps by instantiating instances as needed. In this case we've added three steps to the process and created a variable for each one. These variables give us a handle to the unique instance of each step that we can use next to define the orchestration of events. 1. Orchestrate the events: This is where the routing of events from step to step are defined. In this case we have the following routes: - - When an external event with `id = Start` is sent to the process, this event and its associated data will be sent to the `infoGatheringStep` step. - - When the `infoGatheringStep` finishes running, send the returned object to the `docsGenerationStep` step. - - Finally, when the `docsGenerationStep` finishes running, send the returned object to the `docsPublishStep` step. + - When an external event with `id = Start` is sent to the process, this event and its associated data will be sent to the `info_gathering_step`. + - When the `info_gathering_step` finishes running, send the returned object to the `docs_generation_step`. + - Finally, when the `docs_generation_step` finishes running, send the returned object to the `docs_publish_step`. > [!TIP] -> **_Event Routing in Process Framework:_** You may be wondering how events that are sent to steps are routed to KernelFunctions within the step. In the code above, each step has only defined a single KernelFunction and each KernelFunction has only a single parameter (other than Kernel and the step context which are special, more on that later). When the event containing the generated documentation is sent to the `docsPublishStep` it will be passed to the `docs` parameter of the `PublishDocumentation` KernelFunction of the `docsGenerationStep` step because there is no other choice. However, steps can have multiple KernelFunctions and KernelFunctions can have multiple parameters in in these advanced scenarios you need to specify the target function and parameter. +> **_Event Routing in Process Framework:_** You may be wondering how events that are sent to steps are routed to KernelFunctions within the step. In the code above, each step has only defined a single KernelFunction and each KernelFunction has only a single parameter (other than Kernel and the step context which are special, more on that later). When the event containing the generated documentation is sent to the `docs_publish_step` it will be passed to the `docs` parameter of the `publish_documentation` KernelFunction of the `docs_generation_step` because there is no other choice. However, steps can have multiple KernelFunctions and KernelFunctions can have multiple parameters, in these advanced scenarios you need to specify the target function and parameter. + +::: zone-end + +::: zone pivot="programming-language-java" +::: zone-end ### Build and run the Process @@ -221,14 +359,6 @@ var process = processBuilder.Build(); await process.StartAsync(kernel, new KernelProcessEvent { Id = "Start", Data = "Contoso GlowBrew" }); ``` -::: zone-end - -::: zone pivot="programming-language-python" -::: zone-end - -::: zone pivot="programming-language-java" -::: zone-end - We build the process and call `StartAsync` to run it. Our process is expecting an initial external event called `Start` to kick things off and so we provide that as well. Running this process shows the following output in the Console: ``` @@ -277,6 +407,67 @@ Join the growing community of GlowBrew enthusiasts today, and redefine how you e Ready to embark on an extraordinary coffee journey? Discover the perfect blend of technology and flavor with Contoso's GlowBrew. Your coffee awaits! ``` +::: zone-end + +::: zone pivot="programming-language-python" + +```python +# Configure the kernel with an AI Service and connection details, if necessary +kernel = Kernel() +kernel.add_service(AzureChatCompletion()) + +# Build the process +kernel_process = process_builder.build() + +# Start the process +async with await start( + process=kernel_process, + kernel=kernel, + initial_event=KernelProcessEvent(id="Start", data="Contoso GlowBrew"), +) as process_context: + _ = await process_context.get_state() +``` + +We build the process and call `start` with the asynchronous context manager to run it. Our process is expecting an initial external event called `Start` to kick things off and so we provide that as well. Running this process shows the following output in the Console: + +``` +GatherProductInfoStep + Gathering product information for Product Name: Contoso GlowBrew +GenerateDocumentationStep + Generating documentation for provided product_info... +PublishDocumentationStep + Publishing product documentation: + +# GlowBrew AI-Driven Coffee Machine: Elevate Your Coffee Experience + +Welcome to the future of coffee enjoyment with GlowBrew, the AI-driven coffee machine that not only crafts the perfect cup but does so with a light show that brightens your day. Designed for coffee enthusiasts and tech aficionados alike, GlowBrew combines cutting-edge brewing technology with an immersive lighting experience to start every day on a bright note. + +## Unleash the Power of Luminous Brew Technology + +With GlowBrew, your mornings will never be dull. The industry-leading number of programmable LEDs offers endless possibilities for customizing your coffee-making ritual. Sync the light show with the brewing process to create a visually stimulating ambiance that transforms your kitchen into a vibrant café each morning. + +## Discover New Flavor Dimensions with the AI Taste Assistant + +Leave the traditional coffee routines behind and say hello to personalization sophistication. The AI Taste Assistant learns and adapts to your unique preferences over time. Whether you prefer a strong espresso or a light latte, the assistant suggests new brew combinations tailored to your palate, inviting you to explore a world of flavors you never knew existed. + +## Heighten Your Senses with Gourmet Aroma Diffusion + +The moment you step into the room, let the GlowBrew’s built-in aroma diffusers captivate your senses. This feature is designed to enrich your coffee’s scent profile, ensuring every cup you brew is a multi-sensory delight. Let the burgeoning aroma energize you before the very first sip. + +## Troubleshooting Guide: LED Lights Malfunctioning + +Occasionally, you might encounter an issue with the LED lights not functioning as intended. Here’s how to resolve it efficiently: + +- **Reset Lighting Settings**: Start by using the GlowBrew app to reset the lighting configurations to their default state. +- **Check Connections**: Ensure that all LED connections inside your GlowBrew machine are secure and properly connected. +- **Perform a Factory Reset**: If the problem persists, perform a factory reset on your GlowBrew to restore all settings to their original state. + +Experience the art of coffee making like never before with the GlowBrew AI-driven coffee machine. From captivating light shows to aromatic sensations, every feature is engineered to enhance your daily brew. Brew, savor, and glow with GlowBrew. +``` +::: zone-end + +::: zone pivot="programming-language-java" +::: zone-end ## What's Next? From 2b565e2266090787f9024af9923818f8d7c46a9e Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 13 Feb 2025 19:47:59 +0900 Subject: [PATCH 002/117] Improve Python agent learn site samples. --- .../examples/example-agent-collaboration.md | 418 ++++++++++-------- .../agent/examples/example-assistant-code.md | 83 ++-- .../examples/example-assistant-search.md | 5 +- 3 files changed, 285 insertions(+), 221 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index 3206ad89..39b1ea49 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -26,6 +26,8 @@ The approach will be broken down step-by-step to high-light the key parts of the Before proceeding with feature coding, make sure your development environment is fully set up and configured. +This sample uses an optional text file as part of processing. If you'd like to use it, you may download it [here](https://github.com/microsoft/semantic-kernel/blob/3f22587de5a6f42b41bd268f237547e1034de7df/dotnet/samples/LearnResources/Resources/WomensSuffrage.txt). Place the file in your code working directory. + ::: zone pivot="programming-language-csharp" Start by creating a _Console_ project. Then, include the following package references to ensure all required dependencies are available. @@ -68,11 +70,16 @@ The _Agent Framework_ is experimental and requires warning suppression. This ma ::: zone-end ::: zone pivot="programming-language-python" +Start by installing the Semantic Kernel Python package. + +```bash +pip install semantic-kernel +``` + ```python import asyncio import os import copy -import pyperclip # Install via pip from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( @@ -167,7 +174,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -254,11 +261,6 @@ toolKernel.Plugins.AddFromType(); ``` ::: zone-end -::: zone pivot="programming-language-python" -```python -tool_kernel = copy.deepcopy(kernel) -tool_kernel.add_plugin(ClipboardAccess(), plugin_name="clipboard") -``` ::: zone-end ::: zone pivot="programming-language-java" @@ -267,9 +269,9 @@ tool_kernel.add_plugin(ClipboardAccess(), plugin_name="clipboard") ::: zone-end +::: zone pivot="programming-language-csharp" The _Clipboard_ plugin may be defined as part of the sample. -::: zone pivot="programming-language-csharp" ```csharp private sealed class ClipboardAccess { @@ -297,21 +299,6 @@ private sealed class ClipboardAccess ``` ::: zone-end -::: zone pivot="programming-language-python" - -Note: we are leveraging a Python package called pyperclip. Please install is using pip. - -```python -class ClipboardAccess: - @kernel_function - def set_clipboard(content: str): - if not content.strip(): - return - - pyperclip.copy(content) -``` -::: zone-end - ::: zone pivot="programming-language-java" > Agents are currently unavailable in Java. @@ -320,9 +307,9 @@ class ClipboardAccess: ### Agent Definition +::: zone pivot="programming-language-csharp" Let's declare the agent names as `const` so they might be referenced in _Agent Group Chat_ strategies: -::: zone pivot="programming-language-csharp" ```csharp const string ReviewerName = "Reviewer"; const string WriterName = "Writer"; @@ -330,6 +317,9 @@ const string WriterName = "Writer"; ::: zone-end ::: zone pivot="programming-language-python" + +We will declare the agent names as "Reviewer" and "Writer." + ```python REVIEWER_NAME = "Reviewer" COPYWRITER_NAME = "Writer" @@ -379,23 +369,20 @@ ChatCompletionAgent agentReviewer = ::: zone pivot="programming-language-python" ```python agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, - kernel=_create_kernel_with_chat_completion(REVIEWER_NAME), - name=REVIEWER_NAME, - instructions=""" - Your responsiblity is to review and identify how to improve user provided content. - If the user has providing input or direction for content already provided, specify how to - address this input. - Never directly perform the correction or provide example. - Once the content has been updated in a subsequent response, you will review the content - again until satisfactory. - Always copy satisfactory content to the clipboard using available tools and inform user. - - RULES: - - Only identify suggestions that are specific and actionable. - - Verify previous suggestions have been addressed. - - Never repeat previous suggestions. - """, + service_id=REVIEWER_NAME, + kernel=kernel, + name=REVIEWER_NAME, + instructions=""" +Your responsibility is to review and identify how to improve user provided content. +If the user has provided input or direction for content already provided, specify how to address this input. +Never directly perform the correction or provide an example. +Once the content has been updated in a subsequent response, review it again until it is satisfactory. + +RULES: +- Only identify suggestions that are specific and actionable. +- Verify previous suggestions have been addressed. +- Never repeat previous suggestions. +""", ) ``` ::: zone-end @@ -406,11 +393,11 @@ agent_reviewer = ChatCompletionAgent( ::: zone-end -The _Writer_ agent is is similiar, but doesn't require the specification of _Execution Settings_ since it isn't configured with a plug-in. +::: zone pivot="programming-language-csharp" +The _Writer_ agent is similiar, but doesn't require the specification of _Execution Settings_ since it isn't configured with a plug-in. Here the _Writer_ is given a single-purpose task, follow direction and rewrite the content. -::: zone pivot="programming-language-csharp" ```csharp ChatCompletionAgent agentWriter = new() @@ -430,19 +417,19 @@ ChatCompletionAgent agentWriter = ::: zone-end ::: zone pivot="programming-language-python" +The _Writer_ agent is similiar. It is given a single-purpose task, follow direction and rewrite the content. ```python agent_writer = ChatCompletionAgent( - service_id=COPYWRITER_NAME, - kernel=_create_kernel_with_chat_completion(COPYWRITER_NAME), - name=COPYWRITER_NAME, - instructions=""" - Your sole responsiblity is to rewrite content according to review suggestions. - - - Always apply all review direction. - - Always revise the content in its entirety without explanation. - - Never address the user. - """, -) + service_id=WRITER_NAME, + kernel=kernel, + name=WRITER_NAME, + instructions=""" +Your sole responsibility is to rewrite content according to review suggestions. +- Always apply all review directions. +- Always revise the content in its entirety without explanation. +- Never address the user. +""", + ) ``` ::: zone-end @@ -489,25 +476,25 @@ KernelFunction selectionFunction = ::: zone pivot="programming-language-python" ```python selection_function = KernelFunctionFromPrompt( - function_name="selection", - prompt=f""" - Determine which participant takes the next turn in a conversation based on the the most recent participant. - State only the name of the participant to take the next turn. - No participant should take more than one turn in a row. - - Choose only from these participants: - - {REVIEWER_NAME} - - {COPYWRITER_NAME} - - Always follow these rules when selecting the next participant: - - After user input, it is {COPYWRITER_NAME}'s turn. - - After {COPYWRITER_NAME} replies, it is {REVIEWER_NAME}'s turn. - - After {REVIEWER_NAME} provides feedback, it is {COPYWRITER_NAME}'s turn. - - History: - {{{{$history}}}} - """, -) + function_name="selection", + prompt=f""" +Examine the provided RESPONSE and choose the next participant. +State only the name of the chosen participant without explanation. +Never choose the participant named in the RESPONSE. + +Choose only from these participants: +- {REVIEWER_NAME} +- {WRITER_NAME} + +Rules: +- If RESPONSE is user input, it is {REVIEWER_NAME}'s turn. +- If RESPONSE is by {REVIEWER_NAME}, it is {WRITER_NAME}'s turn. +- If RESPONSE is by {WRITER_NAME}, it is {REVIEWER_NAME}'s turn. + +RESPONSE: +{{{{$lastmessage}}}} +""" + ) ``` ::: zone-end @@ -540,20 +527,20 @@ KernelFunction terminationFunction = ::: zone pivot="programming-language-python" ```python -TERMINATION_KEYWORD = "yes" - -termination_function = KernelFunctionFromPrompt( - function_name="termination", - prompt=f""" - Examine the RESPONSE and determine whether the content has been deemed satisfactory. - If content is satisfactory, respond with a single word without explanation: {TERMINATION_KEYWORD}. - If specific suggestions are being provided, it is not satisfactory. - If no correction is suggested, it is satisfactory. + termination_keyword = "yes" - RESPONSE: - {{{{$history}}}} - """, -) + termination_function = KernelFunctionFromPrompt( + function_name="termination", + prompt=f""" +Examine the RESPONSE and determine whether the content has been deemed satisfactory. +If the content is satisfactory, respond with a single word without explanation: {termination_keyword}. +If specific suggestions are being provided, it is not satisfactory. +If no correction is suggested, it is satisfactory. + +RESPONSE: +{{{{$lastmessage}}}} +""" + ) ``` ::: zone-end @@ -573,7 +560,7 @@ ChatHistoryTruncationReducer historyReducer = new(1); ::: zone pivot="programming-language-python" ```python -**ChatHistoryReducer is coming soon to Python.** +history_reducer = ChatHistoryTruncationReducer(target_count=1) ``` ::: zone-end @@ -644,26 +631,28 @@ Creating `AgentGroupChat` involves: Notice that each strategy is responsible for parsing the `KernelFunction` result. ```python chat = AgentGroupChat( - agents=[agent_writer, agent_reviewer], + agents=[agent_reviewer, agent_writer], selection_strategy=KernelFunctionSelectionStrategy( + initial_agent=agent_reviewer, function=selection_function, - kernel=_create_kernel_with_chat_completion("selection"), - result_parser=lambda result: str(result.value[0]) if result.value is not None else COPYWRITER_NAME, - agent_variable_name="agents", - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: str(result.value[0]).strip() if result.value[0] is not None else WRITER_NAME, + history_variable_name="lastmessage", history_reducer=history_reducer, ), termination_strategy=KernelFunctionTerminationStrategy( agents=[agent_reviewer], function=termination_function, - kernel=_create_kernel_with_chat_completion("termination"), - result_parser=lambda result: TERMINATION_KEYWORD in str(result.value[0]).lower(), - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: termination_keyword in str(result.value[0]).lower(), + history_variable_name="lastmessage", maximum_iterations=10, history_reducer=history_reducer, ), ) ``` + +The `lastmessage` `history_variable_name` corresponds with the `KernelFunctionSelectionStrategy` and the `KernelFunctionTerminationStrategy` prompt that was defined above. This is where the last message is placed when rendering the prompt. ::: zone-end ::: zone pivot="programming-language-java" @@ -702,15 +691,14 @@ while not is_complete: ::: zone-end +::: zone pivot="programming-language-csharp" Now let's capture user input within the previous loop. In this case: - Empty input will be ignored - The term `EXIT` will signal that the conversation is completed - The term `RESET` will clear the _Agent Group Chat_ history - Any term starting with `@` will be treated as a file-path whose content will be provided as input -- Valid input will be added to the _Agent Group Chaty_ as a _User_ message. - +- Valid input will be added to the _Agent Group Chat_ as a _User_ message. -::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine(); Console.Write("> "); @@ -757,8 +745,18 @@ chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, input)); ::: zone-end ::: zone pivot="programming-language-python" +Now let's capture user input within the previous loop. In this case: +- Empty input will be ignored. +- The term `exit` will signal that the conversation is complete. +- The term `reset` will clear the _Agent Group Chat_ history. +- Any term starting with `@` will be treated as a file-path whose content will be provided as input. +- Valid input will be added to the _Agent Group Chat_ as a _User_ message. + +The operation logic inside the while loop looks like: + ```python -user_input = input("User:> ") +print() +user_input = input("User > ").strip() if not user_input: continue @@ -771,18 +769,22 @@ if user_input.lower() == "reset": print("[Conversation has been reset]") continue -if user_input.startswith("@") and len(input) > 1: - file_path = input[1:] +# Try to grab files from the script's current directory +if user_input.startswith("@") and len(user_input) > 1: + file_name = user_input[1:] + script_dir = os.path.dirname(os.path.abspath(__file__)) + file_path = os.path.join(script_dir, file_name) try: if not os.path.exists(file_path): print(f"Unable to access file: {file_path}") continue - with open(file_path) as file: + with open(file_path, "r", encoding="utf-8") as file: user_input = file.read() except Exception: print(f"Unable to access file: {file_path}") continue +# Add the current user_input to the chat await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) ``` ::: zone-end @@ -826,13 +828,17 @@ catch (HttpOperationException exception) ::: zone pivot="programming-language-python" ```python -chat.is_complete = False -async for response in chat.invoke(): - print(f"# {response.role} - {response.name or '*'}: '{response.content}'") +try: + async for response in chat.invoke(): + if response is None or not response.name: + continue + print() + print(f"# {response.name.upper()}:\n{response.content}") +except Exception as e: + print(f"Error during chat invocation: {e}") -if chat.is_complete: - is_complete = True - break +# Reset the chat's complete flag for the new conversation round. +chat.is_complete = False ``` ::: zone-end @@ -845,6 +851,8 @@ if chat.is_complete: ## Final +::: zone pivot="programming-language-csharp" + Bringing all the steps together, we have the final code for this example. The complete implementation is provided below. Try using these suggested inputs: @@ -852,14 +860,12 @@ Try using these suggested inputs: 1. Hi 2. {"message: "hello world"} 3. {"message": "hello world"} -4. Semantic Kernel (SK) is an open-source SDK that enables developers to build and orchestrate complex AI workflows that involve natural language processing (NLP) and machine learning models. It provies a flexible platform for integrating AI capabilities such as semantic search, text summarization, and dialogue systems into applications. With SK, you can easily combine different AI services and models, define thei relationships, and orchestrate interactions between them. +4. Semantic Kernel (SK) is an open-source SDK that enables developers to build and orchestrate complex AI workflows that involve natural language processing (NLP) and machine learning models. It provies a flexible platform for integrating AI capabilities such as semantic search, text summarization, and dialogue systems into applications. With SK, you can easily combine different AI services and models, define their relationships, and orchestrate interactions between them. 5. make this two paragraphs 6. thank you 7. @.\WomensSuffrage.txt 8. its good, but is it ready for my college professor? - -::: zone pivot="programming-language-csharp" ```csharp // Copyright (c) Microsoft. All rights reserved. @@ -1114,12 +1120,28 @@ public static class Program ::: zone-end ::: zone pivot="programming-language-python" + +Bringing all the steps together, we now have the final code for this example. The complete implementation is shown below. + +You can try using one of the suggested inputs. As the agent chat begins, the agents will exchange messages for several iterations until the reviewer agent is satisfied with the copywriter's work. The `while` loop ensures the conversation continues, even if the chat is initially considered complete, by resetting the `is_complete` flag to `False`. + +1. Rozes are red, violetz are blue. +2. Semantic Kernel (SK) is an open-source SDK that enables developers to build and orchestrate complex AI workflows that involve natural language processing (NLP) and machine learning models. It provies a flexible platform for integrating AI capabilities such as semantic search, text summarization, and dialogue systems into applications. With SK, you can easily combine different AI services and models, define their relationships, and orchestrate interactions between them. +4. Make this two paragraphs +5. thank you +7. @WomensSuffrage.txt +8. It's good, but is it ready for my college professor? + +> [!TIP] +> You can reference any file by providing `@`. To reference the "WomensSuffrage" text from above, download it [here](https://github.com/microsoft/semantic-kernel/blob/3f22587de5a6f42b41bd268f237547e1034de7df/dotnet/samples/LearnResources/Resources/WomensSuffrage.txt) and place it in your current working directory. You can then reference it with `@WomensSuffrage.txt`. + ```python # Copyright (c) Microsoft. All rights reserved. import asyncio import os +from semantic_kernel import Kernel from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( KernelFunctionSelectionStrategy, @@ -1128,12 +1150,10 @@ from semantic_kernel.agents.strategies.termination.kernel_function_termination_s KernelFunctionTerminationStrategy, ) from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents import ChatHistoryTruncationReducer from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.history_reducer.chat_history_truncation_reducer import ChatHistoryTruncationReducer from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_decorator import kernel_function from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt -from semantic_kernel.kernel import Kernel ################################################################### # The following sample demonstrates how to create a simple, # @@ -1142,122 +1162,123 @@ from semantic_kernel.kernel import Kernel # complete a user's task. # ################################################################### - -class ClipboardAccess: - @kernel_function - def set_clipboard(content: str): - if not content.strip(): - return - - pyperclip.copy(content) - - +# Define agent names REVIEWER_NAME = "Reviewer" -COPYWRITER_NAME = "Writer" +WRITER_NAME = "Writer" -def _create_kernel_with_chat_completion(service_id: str) -> Kernel: +def create_kernel() -> Kernel: + """Creates a Kernel instance with an Azure OpenAI ChatCompletion service.""" kernel = Kernel() - kernel.add_service(AzureChatCompletion(service_id=service_id)) + kernel.add_service(service=AzureChatCompletion()) return kernel async def main(): + # Create a single kernel instance for all agents. + kernel = create_kernel() + + # Create ChatCompletionAgents using the same kernel. agent_reviewer = ChatCompletionAgent( service_id=REVIEWER_NAME, - kernel=_create_kernel_with_chat_completion(REVIEWER_NAME), + kernel=kernel, name=REVIEWER_NAME, instructions=""" - Your responsiblity is to review and identify how to improve user provided content. - If the user has providing input or direction for content already provided, specify how to - address this input. - Never directly perform the correction or provide example. - Once the content has been updated in a subsequent response, you will review the content - again until satisfactory. - Always copy satisfactory content to the clipboard using available tools and inform user. - - RULES: - - Only identify suggestions that are specific and actionable. - - Verify previous suggestions have been addressed. - - Never repeat previous suggestions. - """, +Your responsibility is to review and identify how to improve user provided content. +If the user has provided input or direction for content already provided, specify how to address this input. +Never directly perform the correction or provide an example. +Once the content has been updated in a subsequent response, review it again until it is satisfactory. + +RULES: +- Only identify suggestions that are specific and actionable. +- Verify previous suggestions have been addressed. +- Never repeat previous suggestions. +""", ) agent_writer = ChatCompletionAgent( - service_id=COPYWRITER_NAME, - kernel=_create_kernel_with_chat_completion(COPYWRITER_NAME), - name=COPYWRITER_NAME, + service_id=WRITER_NAME, + kernel=kernel, + name=WRITER_NAME, instructions=""" - Your sole responsiblity is to rewrite content according to review suggestions. - - - Always apply all review direction. - - Always revise the content in its entirety without explanation. - - Never address the user. - """, +Your sole responsibility is to rewrite content according to review suggestions. +- Always apply all review directions. +- Always revise the content in its entirety without explanation. +- Never address the user. +""", ) + # Define a selection function to determine which agent should take the next turn. selection_function = KernelFunctionFromPrompt( function_name="selection", prompt=f""" - Determine which participant takes the next turn in a conversation based on the the most recent participant. - State only the name of the participant to take the next turn. - No participant should take more than one turn in a row. - - Choose only from these participants: - - {REVIEWER_NAME} - - {COPYWRITER_NAME} - - Always follow these rules when selecting the next participant: - - After user input, it is {COPYWRITER_NAME}'s turn. - - After {COPYWRITER_NAME} replies, it is {REVIEWER_NAME}'s turn. - - After {REVIEWER_NAME} provides feedback, it is {COPYWRITER_NAME}'s turn. - - History: - {{{{$history}}}} - """, +Examine the provided RESPONSE and choose the next participant. +State only the name of the chosen participant without explanation. +Never choose the participant named in the RESPONSE. + +Choose only from these participants: +- {REVIEWER_NAME} +- {WRITER_NAME} + +Rules: +- If RESPONSE is user input, it is {REVIEWER_NAME}'s turn. +- If RESPONSE is by {REVIEWER_NAME}, it is {WRITER_NAME}'s turn. +- If RESPONSE is by {WRITER_NAME}, it is {REVIEWER_NAME}'s turn. + +RESPONSE: +{{{{$lastmessage}}}} +""", ) - TERMINATION_KEYWORD = "yes" + # Define a termination function where the reviewer signals completion with "yes". + termination_keyword = "yes" termination_function = KernelFunctionFromPrompt( function_name="termination", prompt=f""" - Examine the RESPONSE and determine whether the content has been deemed satisfactory. - If content is satisfactory, respond with a single word without explanation: {TERMINATION_KEYWORD}. - If specific suggestions are being provided, it is not satisfactory. - If no correction is suggested, it is satisfactory. - - RESPONSE: - {{{{$history}}}} - """, +Examine the RESPONSE and determine whether the content has been deemed satisfactory. +If the content is satisfactory, respond with a single word without explanation: {termination_keyword}. +If specific suggestions are being provided, it is not satisfactory. +If no correction is suggested, it is satisfactory. + +RESPONSE: +{{{{$lastmessage}}}} +""", ) - history_reducer = ChatHistoryTruncationReducer(target_count=1) + history_reducer = ChatHistoryTruncationReducer(target_count=5) + # Create the AgentGroupChat with selection and termination strategies. chat = AgentGroupChat( - agents=[agent_writer, agent_reviewer], + agents=[agent_reviewer, agent_writer], selection_strategy=KernelFunctionSelectionStrategy( + initial_agent=agent_reviewer, function=selection_function, - kernel=_create_kernel_with_chat_completion("selection"), - result_parser=lambda result: str(result.value[0]) if result.value is not None else COPYWRITER_NAME, - agent_variable_name="agents", - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: str(result.value[0]).strip() if result.value[0] is not None else WRITER_NAME, + history_variable_name="lastmessage", history_reducer=history_reducer, ), termination_strategy=KernelFunctionTerminationStrategy( agents=[agent_reviewer], function=termination_function, - kernel=_create_kernel_with_chat_completion("termination"), - result_parser=lambda result: TERMINATION_KEYWORD in str(result.value[0]).lower(), - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: termination_keyword in str(result.value[0]).lower(), + history_variable_name="lastmessage", maximum_iterations=10, history_reducer=history_reducer, ), ) - is_complete: bool = False + print( + "Ready! Type your input, or 'exit' to quit, 'reset' to restart the conversation. " + "You may pass in a file path using @." + ) + + is_complete = False while not is_complete: - user_input = input("User:> ") + print() + user_input = input("User > ").strip() if not user_input: continue @@ -1270,26 +1291,35 @@ async def main(): print("[Conversation has been reset]") continue - if user_input.startswith("@") and len(input) > 1: - file_path = input[1:] + # Try to grab files from the script's current directory + if user_input.startswith("@") and len(user_input) > 1: + file_name = user_input[1:] + script_dir = os.path.dirname(os.path.abspath(__file__)) + file_path = os.path.join(script_dir, file_name) try: if not os.path.exists(file_path): print(f"Unable to access file: {file_path}") continue - with open(file_path) as file: + with open(file_path, "r", encoding="utf-8") as file: user_input = file.read() except Exception: print(f"Unable to access file: {file_path}") continue + # Add the current user_input to the chat await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) - async for response in chat.invoke(): - print(f"# {response.role} - {response.name or '*'}: '{response.content}'") + try: + async for response in chat.invoke(): + if response is None or not response.name: + continue + print() + print(f"# {response.name.upper()}:\n{response.content}") + except Exception as e: + print(f"Error during chat invocation: {e}") - if chat.is_complete: - is_complete = True - break + # Reset the chat's complete flag for the new conversation round. + chat.is_complete = False if __name__ == "__main__": diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 67aa0290..2f3f2c81 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -172,7 +172,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -181,6 +181,9 @@ OPENAI_ORG_ID="" OPENAI_CHAT_MODEL_ID="" ``` +[!TIP] +Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation). + Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end @@ -239,14 +242,18 @@ OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByC # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByAdmin1.csv", ) csv_file_path_2 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByCountry.csv", ) ``` +You may need to modify the path creation code based on the storage location of your CSV files. + ::: zone-end ::: zone pivot="programming-language-java" @@ -285,18 +292,18 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", - instructions=""" - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - enable_code_interpreter=True, - code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], - ) + kernel=Kernel(), + service_id="agent", + name="SampleAssistantAgent", + instructions=""" + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + enable_code_interpreter=True, + code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], +) ``` ::: zone-end @@ -722,7 +729,10 @@ public static class Program ::: zone pivot="programming-language-python" ```python +# Copyright (c) Microsoft. All rights reserved. + import asyncio +import logging import os from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent @@ -731,19 +741,29 @@ from semantic_kernel.contents.streaming_file_reference_content import StreamingF from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.kernel import Kernel +logging.basicConfig(level=logging.ERROR) + +################################################################### +# The following sample demonstrates how to create a simple, # +# OpenAI assistant agent that utilizes the code interpreter # +# to analyze uploaded files. # +################################################################### + # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByAdmin1.csv", ) csv_file_path_2 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByCountry.csv", ) -async def download_file_content(agent, file_id: str): +async def download_file_content(agent: AzureAssistantAgent, file_id: str): try: # Fetch the content of the file using the provided method response_content = await agent.client.files.content(file_id) @@ -766,7 +786,7 @@ async def download_file_content(agent, file_id: str): print(f"An error occurred while downloading file {file_id}: {str(e)}") -async def download_response_image(agent, file_ids: list[str]): +async def download_response_image(agent: AzureAssistantAgent, file_ids: list[str]): if file_ids: # Iterate over file_ids and download each one for file_id in file_ids: @@ -801,30 +821,41 @@ async def main(): if user_input.lower() == "exit": is_complete = True - break await agent.add_chat_message( thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) ) - is_code: bool = False - async for response in agent.invoke_stream(thread_id=thread_id): - if is_code != response.metadata.get("code"): - print() - is_code = not is_code - - print(f"{response.content}", end="", flush=True) + is_code = False + last_role = None + async for response in agent.invoke_stream(thread_id=thread_id): + current_is_code = response.metadata.get("code", False) + + if current_is_code: + if not is_code: + print("\n\n```python") + is_code = True + print(response.content, end="", flush=True) + else: + if is_code: + print("\n```") + is_code = False + last_role = None + if hasattr(response, "role") and response.role is not None and last_role != response.role: + print(f"\n# {response.role}: ", end="", flush=True) + last_role = response.role + print(response.content, end="", flush=True) file_ids.extend([ item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent) ]) - - print() + if is_code: + print("```\n") await download_response_image(agent, file_ids) file_ids.clear() finally: - print("Cleaning up resources...") + print("\nCleaning up resources...") if agent is not None: [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] await agent.delete_thread(thread_id) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index ca3c70b3..cf6e0f6f 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -174,7 +174,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -183,6 +183,9 @@ OPENAI_ORG_ID="" OPENAI_CHAT_MODEL_ID="" ``` +> [!TIP] +> Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation). + Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end From f1b1b6c382e81010ca527244ea4b0bae61c48e40 Mon Sep 17 00:00:00 2001 From: Genevieve Warren <24882762+gewarren@users.noreply.github.com> Date: Thu, 13 Feb 2025 11:54:20 -0800 Subject: [PATCH 003/117] Fix spurious zone-end tag --- .../get-started/quick-start-guide.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/get-started/quick-start-guide.md b/semantic-kernel/get-started/quick-start-guide.md index 767fba6f..ed58aa72 100644 --- a/semantic-kernel/get-started/quick-start-guide.md +++ b/semantic-kernel/get-started/quick-start-guide.md @@ -300,11 +300,25 @@ To make it easier to get started building enterprise apps with Semantic Kernel, In the following sections, we'll unpack the above sample by walking through steps **1**, **2**, **3**, **4**, **6**, **9**, and **10**. Everything you need to build a simple agent that is powered by an AI service and can run your code. + ::: zone pivot="programming-language-csharp,programming-language-python" + - [Import packages](#1-import-packages) - [Add AI services](#2-add-ai-services) - ::: zone pivot="programming-language-csharp,programming-language-python" - [Enterprise components](#3-add-enterprise-services) +- [Build the kernel](#4-build-the-kernel-and-retrieve-services) +- Add memory (skipped) +- [Add plugins](#6-add-plugins) +- Create kernel arguments (skipped) +- Create prompts (skipped) +- [Planning](#9-planning) +- [Invoke](#10-invoke) + ::: zone-end + + ::: zone pivot="programming-language-java" + +- [Import packages](#1-import-packages) +- [Add AI services](#2-add-ai-services) - [Build the kernel](#4-build-the-kernel-and-retrieve-services) - Add memory (skipped) - [Add plugins](#6-add-plugins) @@ -313,6 +327,8 @@ In the following sections, we'll unpack the above sample by walking through step - [Planning](#9-planning) - [Invoke](#10-invoke) + ::: zone-end + ### 1) Import packages For this sample, we first started by importing the following packages: From b5eb15b8dc2746c227e9994936c4c8ba3d182bbc Mon Sep 17 00:00:00 2001 From: Genevieve Warren <24882762+gewarren@users.noreply.github.com> Date: Thu, 13 Feb 2025 12:09:40 -0800 Subject: [PATCH 004/117] Ingestion -> injection --- semantic-kernel/concepts/kernel.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/kernel.md b/semantic-kernel/concepts/kernel.md index 69de031c..a3758b3c 100644 --- a/semantic-kernel/concepts/kernel.md +++ b/semantic-kernel/concepts/kernel.md @@ -32,7 +32,7 @@ Before building a kernel, you should first understand the two types of component | | Components | Description | |---|---|---| -| 1 | **Services** | These consist of both AI services (e.g., chat completion) and other services (e.g., logging and HTTP clients) that are necessary to run your application. This was modelled after the Service Provider pattern in .NET so that we could support dependency ingestion across all languages. | +| 1 | **Services** | These consist of both AI services (e.g., chat completion) and other services (e.g., logging and HTTP clients) that are necessary to run your application. This was modelled after the Service Provider pattern in .NET so that we could support dependency injection across all languages. | | 2 | **Plugins** | These are the components that are used by your AI services and prompt templates to perform work. AI services, for example, can use plugins to retrieve data from a database or call an external API to perform actions. | ::: zone pivot="programming-language-csharp" From 34650880bd37f63ababf0e7e598106ebbc6ac5f0 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 12:30:51 +0900 Subject: [PATCH 005/117] Include links to repo code. --- .../Frameworks/agent/examples/example-agent-collaboration.md | 2 ++ .../Frameworks/agent/examples/example-assistant-code.md | 2 ++ .../Frameworks/agent/examples/example-assistant-search.md | 2 ++ semantic-kernel/Frameworks/agent/examples/example-chat-agent.md | 2 ++ 4 files changed, 8 insertions(+) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index 39b1ea49..c140cf0b 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -1325,6 +1325,8 @@ RESPONSE: if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/agent_collaboration.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 2f3f2c81..d07d4538 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -865,6 +865,8 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/assistant_code.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index cf6e0f6f..1150e49e 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -783,6 +783,8 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/assistant_search.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index 7870d9ed..1905fb7c 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -669,6 +669,8 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/chat_agent.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" From a2d9cd051156291f8822760a7f3fa6595d9b8a33 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 12:39:15 +0900 Subject: [PATCH 006/117] Remove fixed locale from link --- .../Frameworks/agent/examples/example-assistant-code.md | 2 +- .../Frameworks/agent/examples/example-assistant-search.md | 2 +- .../concepts/ai-services/chat-completion/index.md | 6 +++--- .../telemetry-with-azure-ai-foundry-tracing.md | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index d07d4538..2e35a34d 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -182,7 +182,7 @@ OPENAI_CHAT_MODEL_ID="" ``` [!TIP] -Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation). +Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/azure/ai-services/openai/api-version-deprecation). Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index 1150e49e..3c7fbf76 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -184,7 +184,7 @@ OPENAI_CHAT_MODEL_ID="" ``` > [!TIP] -> Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation). +> Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/azure/ai-services/openai/api-version-deprecation). Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end diff --git a/semantic-kernel/concepts/ai-services/chat-completion/index.md b/semantic-kernel/concepts/ai-services/chat-completion/index.md index c8218409..c468e7dc 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/index.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/index.md @@ -920,7 +920,7 @@ chat_completion_service = AzureChatCompletion(service_id="my-service-id") ``` > [!NOTE] -> The `AzureChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/en-us/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. +> The `AzureChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. # [OpenAI](#tab/python-OpenAI) @@ -967,7 +967,7 @@ chat_completion_service = AzureAIInferenceChatCompletion( ``` > [!NOTE] -> The `AzureAIInferenceChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/en-us/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. +> The `AzureAIInferenceChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. # [Anthropic](#tab/python-Anthropic) @@ -1274,7 +1274,7 @@ execution_settings = OnnxGenAIPromptExecutionSettings() --- > [!TIP] -> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel.connectors.ai?view=semantic-kernel-python). +> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](https://learn.microsoft.com/python/api/semantic-kernel/semantic_kernel.connectors.ai?view=semantic-kernel-python). ::: zone-end diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md index 36153162..f9943d9d 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md @@ -10,7 +10,7 @@ ms.service: semantic-kernel # Visualize traces on Azure AI Foundry Tracing UI -[Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-studio/) Tracing UI is a web-based user interface that allows you to visualize traces and logs generated by your applications. This article provides a step-by-step guide on how to visualize traces on Azure AI Foundry Tracing UI. +[Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/) Tracing UI is a web-based user interface that allows you to visualize traces and logs generated by your applications. This article provides a step-by-step guide on how to visualize traces on Azure AI Foundry Tracing UI. > [!IMPORTANT] > Before you start, make sure you have completed the tutorial on [inspecting telemetry data with Application Insights](./telemetry-with-app-insights.md). @@ -20,8 +20,8 @@ ms.service: semantic-kernel Prerequisites: -- An Azure AI Foundry project. Follow this [guide](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/create-projects) to create one if you don't have one. -- A serverless inference API. Follow this [guide](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/deploy-models-serverless) to create one if you don't have one. +- An Azure AI Foundry project. Follow this [guide](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects) to create one if you don't have one. +- A serverless inference API. Follow this [guide](https://learn.microsoft.com/azure/ai-studio/how-to/deploy-models-serverless) to create one if you don't have one. - Alternatively, you can attach an Azure OpenAI resource to the project, in which case you don't need to create a serverless API. ## Attach an Application Insights resource to the project From 40075fb94080e29cf00641cb44fdba654df8a63b Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 12:42:22 +0900 Subject: [PATCH 007/117] Fix python sample resource link --- .../Frameworks/agent/examples/example-agent-collaboration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index c140cf0b..802a309a 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -1133,7 +1133,7 @@ You can try using one of the suggested inputs. As the agent chat begins, the age 8. It's good, but is it ready for my college professor? > [!TIP] -> You can reference any file by providing `@`. To reference the "WomensSuffrage" text from above, download it [here](https://github.com/microsoft/semantic-kernel/blob/3f22587de5a6f42b41bd268f237547e1034de7df/dotnet/samples/LearnResources/Resources/WomensSuffrage.txt) and place it in your current working directory. You can then reference it with `@WomensSuffrage.txt`. +> You can reference any file by providing `@`. To reference the "WomensSuffrage" text from above, download it [here](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/resources/WomensSuffrage.txt) and place it in your current working directory. You can then reference it with `@WomensSuffrage.txt`. ```python # Copyright (c) Microsoft. All rights reserved. From 4bea51e314cb9b574927099422571e5115ad6bdd Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 12:48:47 +0900 Subject: [PATCH 008/117] Use site relative links for learn site links. They don't need to be absolute. --- README.md | 6 +++--- .../agent/examples/example-assistant-code.md | 2 +- .../agent/examples/example-assistant-search.md | 2 +- .../Frameworks/process/process-deployment.md | 2 +- .../concepts/ai-services/chat-completion/index.md | 6 +++--- .../enterprise-readiness/observability/index.md | 8 ++++---- .../observability/telemetry-advanced.md | 4 ++-- .../observability/telemetry-with-aspire-dashboard.md | 10 +++++----- .../telemetry-with-azure-ai-foundry-tracing.md | 6 +++--- .../observability/telemetry-with-console.md | 2 +- 10 files changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 75500b2b..e6f7a7f1 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # Microsoft Semantic Kernel Documentation -This is the GitHub repository for the technical product documentation for **Semantic Kernel**. This documentation is published at [Microsoft Semantic Kernel documentation](https://learn.microsoft.com/semantic-kernel). +This is the GitHub repository for the technical product documentation for **Semantic Kernel**. This documentation is published at [Microsoft Semantic Kernel documentation](/semantic-kernel). ## How to contribute -Thanks for your interest in [contributing](https://learn.microsoft.com/), home of technical content for Microsoft products and services. +Thanks for your interest in contributing to the home of technical content for Microsoft products and services. -To learn how to make contributions to the content in this repository, start with our [Docs contributor guide](https://learn.microsoft.com/contribute). +To learn how to make contributions to the content in this repository, start with our [Docs contributor guide](/contribute). ## Code of conduct diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 2e35a34d..b39a5eb8 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -182,7 +182,7 @@ OPENAI_CHAT_MODEL_ID="" ``` [!TIP] -Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/azure/ai-services/openai/api-version-deprecation). +Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](/azure/ai-services/openai/api-version-deprecation). Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index 3c7fbf76..f38a28d4 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -184,7 +184,7 @@ OPENAI_CHAT_MODEL_ID="" ``` > [!TIP] -> Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](https://learn.microsoft.com/azure/ai-services/openai/api-version-deprecation). +> Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](/azure/ai-services/openai/api-version-deprecation). Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end diff --git a/semantic-kernel/Frameworks/process/process-deployment.md b/semantic-kernel/Frameworks/process/process-deployment.md index 3be467a4..8ba8aa0e 100644 --- a/semantic-kernel/Frameworks/process/process-deployment.md +++ b/semantic-kernel/Frameworks/process/process-deployment.md @@ -18,7 +18,7 @@ The Process Framework provides an in-process runtime that allows developers to r ## Cloud Runtimes -For scenarios requiring scalability and distributed processing, the Process Framework supports cloud runtimes such as [**Orleans**](https://learn.microsoft.com/dotnet/orleans/overview) and [**Dapr**](https://dapr.io/). These options empower developers to deploy processes in a distributed manner, facilitating high availability and load balancing across multiple instances. By leveraging these cloud runtimes, organizations can streamline their operations and manage substantial workloads with ease. +For scenarios requiring scalability and distributed processing, the Process Framework supports cloud runtimes such as [**Orleans**](/dotnet/orleans/overview) and [**Dapr**](https://dapr.io/). These options empower developers to deploy processes in a distributed manner, facilitating high availability and load balancing across multiple instances. By leveraging these cloud runtimes, organizations can streamline their operations and manage substantial workloads with ease. - **Orleans Runtime:** This framework provides a programming model for building distributed applications and is particularly well-suited for handling virtual actors in a resilient manner, complementing the Process Framework’s event-driven architecture. diff --git a/semantic-kernel/concepts/ai-services/chat-completion/index.md b/semantic-kernel/concepts/ai-services/chat-completion/index.md index c468e7dc..27305c4d 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/index.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/index.md @@ -920,7 +920,7 @@ chat_completion_service = AzureChatCompletion(service_id="my-service-id") ``` > [!NOTE] -> The `AzureChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. +> The `AzureChatCompletion` service also supports [Microsoft Entra](/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. # [OpenAI](#tab/python-OpenAI) @@ -967,7 +967,7 @@ chat_completion_service = AzureAIInferenceChatCompletion( ``` > [!NOTE] -> The `AzureAIInferenceChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. +> The `AzureAIInferenceChatCompletion` service also supports [Microsoft Entra](/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. # [Anthropic](#tab/python-Anthropic) @@ -1274,7 +1274,7 @@ execution_settings = OnnxGenAIPromptExecutionSettings() --- > [!TIP] -> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](https://learn.microsoft.com/python/api/semantic-kernel/semantic_kernel.connectors.ai?view=semantic-kernel-python). +> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](/python/api/semantic-kernel/semantic_kernel.connectors.ai?view=semantic-kernel-python). ::: zone-end diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/index.md b/semantic-kernel/concepts/enterprise-readiness/observability/index.md index 41583f59..79b13241 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/index.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/index.md @@ -20,8 +20,8 @@ Observability is typically achieved through logging, metrics, and tracing. They Useful materials for further reading: - [Observability defined by Cloud Native Computing Foundation](https://glossary.cncf.io/observability/) -- [Distributed tracing](https://learn.microsoft.com/dotnet/core/diagnostics/distributed-tracing) -- [Observability in .Net](https://learn.microsoft.com/dotnet/core/diagnostics/observability-with-otel) +- [Distributed tracing](/dotnet/core/diagnostics/distributed-tracing) +- [Observability in .Net](/dotnet/core/diagnostics/observability-with-otel) - [OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) ## Observability in Semantic Kernel @@ -33,7 +33,7 @@ Specifically, Semantic Kernel provides the following observability features: - **Logging**: Semantic Kernel logs meaningful events and errors from the kernel, kernel plugins and functions, as well as the AI connectors. ![Logs and events](../../../media/telemetry-log-events-overview-app-insights.png) > [!IMPORTANT] - > [Traces in Application Insights](https://learn.microsoft.com/azure/azure-monitor/app/data-model-complete#trace) represent traditional log entries and [OpenTelemetry span events](https://opentelemetry.io/docs/concepts/signals/traces/#span-events). They are not the same as distributed traces. + > [Traces in Application Insights](/azure/azure-monitor/app/data-model-complete#trace) represent traditional log entries and [OpenTelemetry span events](https://opentelemetry.io/docs/concepts/signals/traces/#span-events). They are not the same as distributed traces. - **Metrics**: Semantic Kernel emits metrics from kernel functions and AI connectors. You will be able to monitor metrics such as the kernel function execution time, the token consumption of AI connectors, etc. ![Metrics](../../../media/telemetry-metrics-overview-app-insights.png) - **Tracing**: Semantic Kernel supports distributed tracing. You can track activities across different services and within Semantic Kernel. @@ -44,7 +44,7 @@ Specifically, Semantic Kernel provides the following observability features: | Telemetry | Description | |-----------|---------------------------------------| -| Log | Logs are recorded throughout the Kernel. For more information on Logging in .Net, please refer to this [document](https://learn.microsoft.com/dotnet/core/extensions/logging). Sensitive data, such as kernel function arguments and results, are logged at the trace level. Please refer to this [table](https://learn.microsoft.com/dotnet/core/extensions/logging?tabs=command-line#log-level) for more information on log levels. | +| Log | Logs are recorded throughout the Kernel. For more information on Logging in .Net, please refer to this [document](/dotnet/core/extensions/logging). Sensitive data, such as kernel function arguments and results, are logged at the trace level. Please refer to this [table](/dotnet/core/extensions/logging?tabs=command-line#log-level) for more information on log levels. | | Activity | Each kernel function execution and each call to an AI model are recorded as an activity. All activities are generated by an activity source named "Microsoft.SemanticKernel". | | Metric | Semantic Kernel captures the following metrics from kernel functions:
  • `semantic_kernel.function.invocation.duration` (Histogram) - function execution time (in seconds)
  • `semantic_kernel.function.streaming.duration` (Histogram) - function streaming execution time (in seconds)
  • `semantic_kernel.function.invocation.token_usage.prompt` (Histogram) - number of prompt token usage (only for `KernelFunctionFromPrompt`)
  • `semantic_kernel.function.invocation.token_usage.completion` (Histogram) - number of completion token usage (only for `KernelFunctionFromPrompt`)
  • | diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md index bb604e6b..2efebe69 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md @@ -12,7 +12,7 @@ ms.service: semantic-kernel # More advanced scenarios for telemetry > [!NOTE] -> This article will use [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) for illustration. If you prefer to use other tools, please refer to the documentation of the tool you are using on setup instructions. +> This article will use [Aspire Dashboard](/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) for illustration. If you prefer to use other tools, please refer to the documentation of the tool you are using on setup instructions. ## Auto Function Calling @@ -375,7 +375,7 @@ Please refer to this [article](./telemetry-with-console.md#environment-variables ### Start the Aspire Dashboard -Follow the instructions [here](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. +Follow the instructions [here](/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. ### Run diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md index b83abbfb..6bd39dfe 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md @@ -11,9 +11,9 @@ ms.service: semantic-kernel # Inspection of telemetry data with Aspire Dashboard -[Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) is part of the [.NET Aspire](https://learn.microsoft.com/dotnet/aspire/get-started/aspire-overview) offering. The dashboard allows developers to monitor and inspect their distributed applications. +[Aspire Dashboard](/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) is part of the [.NET Aspire](/dotnet/aspire/get-started/aspire-overview) offering. The dashboard allows developers to monitor and inspect their distributed applications. -In this example, we will use the [standalone mode](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) and learn how to export telemetry data to Aspire Dashboard, and inspect the data there. +In this example, we will use the [standalone mode](/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) and learn how to export telemetry data to Aspire Dashboard, and inspect the data there. ## Exporter @@ -330,7 +330,7 @@ Please refer to this [article](./telemetry-with-console.md#add-telemetry-1) for ## Start the Aspire Dashboard -Follow the instructions [here](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. +Follow the instructions [here](/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. ## Run @@ -366,7 +366,7 @@ python telemetry_aspire_dashboard_quickstart.py After running the application, head over to the dashboard to inspect the telemetry data. > [!TIP] -> Follow this [guide](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/explore) to explore the Aspire Dashboard interface. +> Follow this [guide](/dotnet/aspire/fundamentals/dashboard/explore) to explore the Aspire Dashboard interface. ### Traces @@ -383,7 +383,7 @@ In the trace details, you can see the span that represents the prompt function a ### Logs -Head over to the `Structured` tab to view the logs emitted by the application. Please refer to this [guide](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/explore#structured-logs-page) on how to work with structured logs in the dashboard. +Head over to the `Structured` tab to view the logs emitted by the application. Please refer to this [guide](/dotnet/aspire/fundamentals/dashboard/explore#structured-logs-page) on how to work with structured logs in the dashboard. ## Next steps diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md index f9943d9d..00ab843c 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md @@ -10,7 +10,7 @@ ms.service: semantic-kernel # Visualize traces on Azure AI Foundry Tracing UI -[Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/) Tracing UI is a web-based user interface that allows you to visualize traces and logs generated by your applications. This article provides a step-by-step guide on how to visualize traces on Azure AI Foundry Tracing UI. +[Azure AI Foundry](/azure/ai-studio/) Tracing UI is a web-based user interface that allows you to visualize traces and logs generated by your applications. This article provides a step-by-step guide on how to visualize traces on Azure AI Foundry Tracing UI. > [!IMPORTANT] > Before you start, make sure you have completed the tutorial on [inspecting telemetry data with Application Insights](./telemetry-with-app-insights.md). @@ -20,8 +20,8 @@ ms.service: semantic-kernel Prerequisites: -- An Azure AI Foundry project. Follow this [guide](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects) to create one if you don't have one. -- A serverless inference API. Follow this [guide](https://learn.microsoft.com/azure/ai-studio/how-to/deploy-models-serverless) to create one if you don't have one. +- An Azure AI Foundry project. Follow this [guide](/azure/ai-studio/how-to/create-projects) to create one if you don't have one. +- A serverless inference API. Follow this [guide](/azure/ai-studio/how-to/deploy-models-serverless) to create one if you don't have one. - Alternatively, you can attach an Azure OpenAI resource to the project, in which case you don't need to create a serverless API. ## Attach an Application Insights resource to the project diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md index 56354c16..f066c170 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md @@ -478,7 +478,7 @@ Value: 16 Here you can see the name, the description, the unit, the time range, the type, the value of the metric, and the meter that the metric belongs to. > [!NOTE] -> The above metric is a Counter metric. For a full list of metric types, see [here](https://learn.microsoft.com/dotnet/core/diagnostics/metrics-instrumentation#types-of-instruments). Depending on the type of metric, the output may vary. +> The above metric is a Counter metric. For a full list of metric types, see [here](/dotnet/core/diagnostics/metrics-instrumentation#types-of-instruments). Depending on the type of metric, the output may vary. ::: zone-end From d635b4565fa5e0aa866a9d1456460a18d88a0982 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 12:59:27 +0900 Subject: [PATCH 009/117] Fix media link --- .../concepts/enterprise-readiness/observability/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/index.md b/semantic-kernel/concepts/enterprise-readiness/observability/index.md index 79b13241..c88edbe5 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/index.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/index.md @@ -37,7 +37,7 @@ Specifically, Semantic Kernel provides the following observability features: - **Metrics**: Semantic Kernel emits metrics from kernel functions and AI connectors. You will be able to monitor metrics such as the kernel function execution time, the token consumption of AI connectors, etc. ![Metrics](../../../media/telemetry-metrics-overview-app-insights.png) - **Tracing**: Semantic Kernel supports distributed tracing. You can track activities across different services and within Semantic Kernel. - ![Complete end-to-end transaction of a request](../../media/telemetry-trace-overview-app-insights.png) + ![Complete end-to-end transaction of a request](../../../media/telemetry-trace-overview-app-insights.png) ::: zone pivot="programming-language-csharp" From 78ad2e7ceb9b97500515f645efcfcee615be6998 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 13:14:22 +0900 Subject: [PATCH 010/117] Scope link per language --- .../agent/examples/example-agent-collaboration.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index 802a309a..87fe4167 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -26,10 +26,11 @@ The approach will be broken down step-by-step to high-light the key parts of the Before proceeding with feature coding, make sure your development environment is fully set up and configured. -This sample uses an optional text file as part of processing. If you'd like to use it, you may download it [here](https://github.com/microsoft/semantic-kernel/blob/3f22587de5a6f42b41bd268f237547e1034de7df/dotnet/samples/LearnResources/Resources/WomensSuffrage.txt). Place the file in your code working directory. - ::: zone pivot="programming-language-csharp" +> [!TIP] +> This sample uses an optional text file as part of processing. If you'd like to use it, you may download it [here](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/LearnResources/Resources/WomensSuffrage.txt). Place the file in your code working directory. + Start by creating a _Console_ project. Then, include the following package references to ensure all required dependencies are available. To add package dependencies from the command-line use the `dotnet` command: @@ -70,6 +71,10 @@ The _Agent Framework_ is experimental and requires warning suppression. This ma ::: zone-end ::: zone pivot="programming-language-python" + +> [!TIP] +> This sample uses an optional text file as part of processing. If you'd like to use it, you may download it [here](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/resources/WomensSuffrage.txt). Place the file in your code working directory. + Start by installing the Semantic Kernel Python package. ```bash From fa534a9ebc07895986ad804ccb6c028f2ef12dce Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 14 Feb 2025 13:34:57 +0900 Subject: [PATCH 011/117] More cleanup --- .../agent/examples/example-assistant-code.md | 12 ++++++++++-- .../Frameworks/agent/examples/example-chat-agent.md | 4 ++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index b39a5eb8..c9b79287 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -92,7 +92,7 @@ from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.kernel import Kernel ``` -Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` data files from [_Semantic Kernel_ `LearnResources` Project](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files in your project folder. +Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` data files from the [_Semantic Kernel_ `learn_resources/resources` directory](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files to your working directory. ::: zone-end ::: zone pivot="programming-language-java" @@ -238,6 +238,10 @@ OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByC ::: zone-end ::: zone pivot="programming-language-python" + +> [!TIP] +> You may need to adjust the file paths depending upon where your files are located. + ```python # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( @@ -264,9 +268,10 @@ You may need to modify the path creation code based on the storage location of y ### Agent Definition +::: zone pivot="programming-language-csharp" + We are now ready to instantiate an _OpenAI Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. -::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine("Defining agent..."); OpenAIAssistantAgent agent = @@ -290,6 +295,9 @@ OpenAIAssistantAgent agent = ::: zone-end ::: zone pivot="programming-language-python" + +We are now ready to instantiate an _Azure Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. + ```python agent = await AzureAssistantAgent.create( kernel=Kernel(), diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index 1905fb7c..589526ef 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -177,7 +177,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -343,7 +343,7 @@ agent = ChatCompletionAgent( The current date and time is: {{$now}}. """, arguments=KernelArguments( - settings=AzureAIPromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()), + settings=AzureChatPromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()), repository="microsoft/semantic-kernel", ), ) From 7666b338a0d27be026485e33203a903e14670fc4 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Mon, 17 Feb 2025 08:33:50 +0900 Subject: [PATCH 012/117] Add prompt template config import. Remove view from link in Python code pivot. --- semantic-kernel/Frameworks/agent/agent-templates.md | 2 ++ semantic-kernel/concepts/ai-services/chat-completion/index.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index 49cb8ae2..36d0ad38 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -179,6 +179,8 @@ ChatCompletionAgent agent = ```python import yaml +from semantic_kernel.prompt_template import PromptTemplateConfig + # Read the YAML file with open("./GenerateStory.yaml", "r", encoding="utf-8") as file: generate_story_yaml = file.read() diff --git a/semantic-kernel/concepts/ai-services/chat-completion/index.md b/semantic-kernel/concepts/ai-services/chat-completion/index.md index 27305c4d..88d3eaea 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/index.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/index.md @@ -1274,7 +1274,7 @@ execution_settings = OnnxGenAIPromptExecutionSettings() --- > [!TIP] -> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](/python/api/semantic-kernel/semantic_kernel.connectors.ai?view=semantic-kernel-python). +> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](/python/api/semantic-kernel/semantic_kernel.connectors.ai). ::: zone-end From c03c0589e399d57f308a4a767d01ac33ecb14bcb Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Mon, 17 Feb 2025 09:42:50 +0900 Subject: [PATCH 013/117] Updates to callout reserved param names with Python function calling. --- .../chat-completion/function-calling/index.md | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md b/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md index f11b8df5..f122da5b 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md @@ -207,6 +207,41 @@ kernel.add_plugin(OrderPizzaPlugin(pizza_service, user_context, payment_service) > [!NOTE] > Only functions with the `kernel_function` decorator will be serialized and sent to the model. This allows you to have helper functions that are not exposed to the model. +## Reserved Parameter Names for Auto Function Calling + +When using auto function calling in KernelFunctions, certain parameter names are **reserved** and receive special handling. These reserved names allow you to automatically access key objects required for function execution. + +### Reserved Names + +The following parameter names are reserved: +- `kernel` +- `service` +- `execution_settings` +- `arguments` + +### How They Work + +During function invocation, the method [`gather_function_parameters`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/functions/kernel_function_from_method.py#L148) inspects each parameter. If the parameter's name matches one of the reserved names, it is populated with specific objects: + +- **`kernel`**: Injected with the kernel object. +- **`service`**: Populated with the AI service selected based on the provided arguments. +- **`execution_settings`**: Contains settings pertinent to the function's execution. +- **`arguments`**: Receives the entire set of kernel arguments passed during invocation. + +This design ensures that these parameters are automatically managed, eliminating the need for manual extraction or assignment. + +### Example Usage + +Consider the following example: + +```python +class SimplePlugin: + @kernel_function(name="GetWeather", description="Get the weather for a location.") + async def get_the_weather(self, location: str, arguments: KernelArguments) -> str: + # The 'arguments' parameter is reserved and automatically populated with KernelArguments. + return f"Received user input: {location}, the weather is nice!" +``` + ::: zone-end ::: zone pivot="programming-language-java" From 039d3cd570c79cb8c8a23b016cda99aa3e63f2f9 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Mon, 17 Feb 2025 14:55:06 +0100 Subject: [PATCH 014/117] updated filters page --- .../concepts/enterprise-readiness/filters.md | 154 ++++++++++++++++-- 1 file changed, 141 insertions(+), 13 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 73adc461..286d3673 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -9,13 +9,11 @@ ms.date: 09/10/2024 ms.service: semantic-kernel --- -::: zone pivot="programming-language-csharp" - # What are Filters? Filters enhance security by providing control and visibility over how and when functions run. This is needed to instill responsible AI principles into your work so that you feel confident your solution is enterprise ready. -For example, filters are leveraged to validate permissions before an approval flow begins. The `IFunctionInvocationFilter` is run to check the permissions of the person that’s looking to submit an approval. This means that only a select group of people will be able to kick off the process. +For example, filters are leveraged to validate permissions before an approval flow begins. The filter runs to check the permissions of the person that’s looking to submit an approval. This means that only a select group of people will be able to kick off the process. A good example of filters is provided [here](https://devblogs.microsoft.com/semantic-kernel/filters-in-semantic-kernel/) in our detailed Semantic Kernel blog post on Filters.   @@ -45,7 +43,9 @@ For cases where filter order is important, it is recommended to add filters dire ## Function Invocation Filter -This filter is triggered every time a Semantic Kernel function is invoked, regardless of whether it is a function created from a prompt or a C# method. +This filter is triggered every time a Semantic Kernel function is invoked, regardless of whether it is a function created from a prompt or a method. + +::: zone pivot="programming-language-csharp" ```csharp /// @@ -80,14 +80,94 @@ Add filter using `Kernel` property: kernel.FunctionInvocationFilters.Add(new LoggingFilter(logger)); ``` + ### Code examples * [Function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/FunctionInvocationFiltering.cs) +* +::: zone-end +::: zone pivot="programming-language-python" + +```python + +import logging +from typing import Awaitable, Callable +from semantic_kernel.filters import FunctionInvocationContext + +logger = logging.getLogger(__name__) + +async def logger_filter(context: FunctionInvocationContext, next: Callable[[FunctionInvocationContext], Awaitable[None]]) -> None: + logger.info(f"FunctionInvoking - {context.function.plugin_name}.{context.function.name}") + + await next(context) + + logger.info(f"FunctionInvoked - {context.function.plugin_name}.{context.function.name}") + +# Add filter to the kernel +kernel.add_filter('function_invocation', logger_filter) + +``` + +You can also add a filter directly to the kernel: + +```python + +@kernel.filter('function_invocation') +async def logger_filter(context: FunctionInvocationContext, next: Callable[[FunctionInvocationContext], Awaitable[None]]) -> None: + logger.info(f"FunctionInvoking - {context.function.plugin_name}.{context.function.name}") + + await next(context) + + logger.info(f"FunctionInvoked - {context.function.plugin_name}.{context.function.name}") +``` + + +### Streaming invocation + +Functions in Semantic Kernel can be invoked in two ways: streaming and non-streaming. In streaming mode, a function typically returns `AsyncGenerator`, while in non-streaming mode, it returns `FunctionResult`. This distinction affects how results can be overridden in the filter: in streaming mode, the new function result value must be of type `AsyncGenerator`, whereas in non-streaming mode, it can simply be of type `T`. + +So to build a simple logger filter for streaming, you would use something like this: + +```python +@kernel.filter(FilterTypes.FUNCTION_INVOCATION) +async def streaming_exception_handling( + context: FunctionInvocationContext, + next: Callable[[FunctionInvocationContext], Coroutine[Any, Any, None]], +): + await next(context) + + async def override_stream(stream): + try: + async for partial in stream: + yield partial + except Exception as e: + yield [ + StreamingChatMessageContent(role=AuthorRole.ASSISTANT, content=f"Exception caught: {e}", choice_index=0) + ] + + stream = context.result.value + context.result = FunctionResult(function=context.result.function, value=override_stream(stream)) +``` + +### Code examples +* [Function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/function_invocation_filters.py) +* [Streaming function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/function_invocation_filters_stream.py) + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +More info coming soon. + +::: zone-end ## Prompt Render Filter This filter is invoked only during a prompt rendering operation, such as when a function created from a prompt is called. It will not be triggered for Semantic Kernel functions created from methods. +::: zone pivot="programming-language-csharp" + ```csharp /// /// Example of prompt render filter which overrides rendered prompt before sending it to AI. @@ -127,10 +207,37 @@ kernel.PromptRenderFilters.Add(new SafePromptFilter()); * [Prompt render filter examples](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/PromptRenderFiltering.cs) + +::: zone-end +::: zone pivot="programming-language-python" + +```python +from semantic_kernel.filters import FilterTypes, PromptRenderContext + +@kernel.filter(FilterTypes.PROMPT_RENDERING) +async def prompt_rendering_filter(context: PromptRenderContext, next): + await next(context) + context.rendered_prompt = f"You pretend to be Mosscap, but you are Papssom who is the opposite of Moscapp in every way {context.rendered_prompt or ''}" +``` + +### Code examples +* [Prompt render filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/prompt_filters.py) + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +More info coming soon. + +::: zone-end + ## Auto Function Invocation Filter This filter is invoked only during an automatic function calling process. It will not be triggered when a function is invoked outside of this process. +::: zone pivot="programming-language-csharp" + ```csharp /// /// Example of auto function invocation filter which terminates function calling process as soon as we have the desired result. @@ -175,6 +282,34 @@ kernel.AutoFunctionInvocationFilters.Add(new EarlyTerminationFilter()); * [Auto function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/AutoFunctionInvocationFiltering.cs) +::: zone-end +::: zone pivot="programming-language-python" + +```python + +from semantic_kernel.filters import FilterTypes, AutoFunctionInvocationContext + +@kernel.filter(FilterTypes.AUTO_FUNCTION_INVOCATION) +async def auto_function_invocation_filter(context: AutoFunctionInvocationContext, next): + await next(context) + if context.function_result == "desired result": + context.terminate = True +``` + +### Code examples +* [Auto function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/auto_function_invoke_filters.py) + + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +More info coming soon. + +::: zone-end +::: zone pivot="programming-language-csharp" + ## Streaming and non-streaming invocation Functions in Semantic Kernel can be invoked in two ways: streaming and non-streaming. In streaming mode, a function typically returns `IAsyncEnumerable`, while in non-streaming mode, it returns `FunctionResult`. This distinction affects how results can be overridden in the filter: in streaming mode, the new function result value must be of type `IAsyncEnumerable`, whereas in non-streaming mode, it can simply be of type `T`. To determine which result type needs to be returned, the `context.IsStreaming` flag is available in the filter context model. @@ -245,15 +380,8 @@ ChatMessageContent result = await chatCompletionService.GetChatMessageContentAsy ::: zone-end ::: zone pivot="programming-language-python" -## Coming soon - -More info coming soon. +## More examples: -::: zone-end -::: zone pivot="programming-language-java" - -## Coming soon - -More info coming soon. +* [Retry logic with a filter](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/retry_with_filters.py) ::: zone-end \ No newline at end of file From 39f9534ac03a308084509fa21ac7608a18d2c37a Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Tue, 18 Feb 2025 08:17:22 +0900 Subject: [PATCH 015/117] Python: merge Python docs updates from live to main (#464) * Improve Python agent learn site samples. * Include links to repo code. * Remove fixed locale from link * Fix python sample resource link * Use site relative links for learn site links. They don't need to be absolute. * Fix media link * Scope link per language * More cleanup * Add prompt template config import. Remove view from link in Python code pivot. * Updates to callout reserved param names with Python function calling. --- README.md | 6 +- .../Frameworks/agent/agent-templates.md | 2 + .../examples/example-agent-collaboration.md | 425 ++++++++++-------- .../agent/examples/example-assistant-code.md | 97 ++-- .../examples/example-assistant-search.md | 7 +- .../agent/examples/example-chat-agent.md | 6 +- .../Frameworks/process/process-deployment.md | 2 +- .../chat-completion/function-calling/index.md | 35 ++ .../ai-services/chat-completion/index.md | 6 +- .../observability/index.md | 10 +- .../observability/telemetry-advanced.md | 4 +- .../telemetry-with-aspire-dashboard.md | 10 +- ...telemetry-with-azure-ai-foundry-tracing.md | 6 +- .../observability/telemetry-with-console.md | 2 +- 14 files changed, 370 insertions(+), 248 deletions(-) diff --git a/README.md b/README.md index 75500b2b..e6f7a7f1 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # Microsoft Semantic Kernel Documentation -This is the GitHub repository for the technical product documentation for **Semantic Kernel**. This documentation is published at [Microsoft Semantic Kernel documentation](https://learn.microsoft.com/semantic-kernel). +This is the GitHub repository for the technical product documentation for **Semantic Kernel**. This documentation is published at [Microsoft Semantic Kernel documentation](/semantic-kernel). ## How to contribute -Thanks for your interest in [contributing](https://learn.microsoft.com/), home of technical content for Microsoft products and services. +Thanks for your interest in contributing to the home of technical content for Microsoft products and services. -To learn how to make contributions to the content in this repository, start with our [Docs contributor guide](https://learn.microsoft.com/contribute). +To learn how to make contributions to the content in this repository, start with our [Docs contributor guide](/contribute). ## Code of conduct diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index 49cb8ae2..36d0ad38 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -179,6 +179,8 @@ ChatCompletionAgent agent = ```python import yaml +from semantic_kernel.prompt_template import PromptTemplateConfig + # Read the YAML file with open("./GenerateStory.yaml", "r", encoding="utf-8") as file: generate_story_yaml = file.read() diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index 3206ad89..87fe4167 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -28,6 +28,9 @@ Before proceeding with feature coding, make sure your development environment is ::: zone pivot="programming-language-csharp" +> [!TIP] +> This sample uses an optional text file as part of processing. If you'd like to use it, you may download it [here](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/LearnResources/Resources/WomensSuffrage.txt). Place the file in your code working directory. + Start by creating a _Console_ project. Then, include the following package references to ensure all required dependencies are available. To add package dependencies from the command-line use the `dotnet` command: @@ -68,11 +71,20 @@ The _Agent Framework_ is experimental and requires warning suppression. This ma ::: zone-end ::: zone pivot="programming-language-python" + +> [!TIP] +> This sample uses an optional text file as part of processing. If you'd like to use it, you may download it [here](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/resources/WomensSuffrage.txt). Place the file in your code working directory. + +Start by installing the Semantic Kernel Python package. + +```bash +pip install semantic-kernel +``` + ```python import asyncio import os import copy -import pyperclip # Install via pip from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( @@ -167,7 +179,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -254,11 +266,6 @@ toolKernel.Plugins.AddFromType(); ``` ::: zone-end -::: zone pivot="programming-language-python" -```python -tool_kernel = copy.deepcopy(kernel) -tool_kernel.add_plugin(ClipboardAccess(), plugin_name="clipboard") -``` ::: zone-end ::: zone pivot="programming-language-java" @@ -267,9 +274,9 @@ tool_kernel.add_plugin(ClipboardAccess(), plugin_name="clipboard") ::: zone-end +::: zone pivot="programming-language-csharp" The _Clipboard_ plugin may be defined as part of the sample. -::: zone pivot="programming-language-csharp" ```csharp private sealed class ClipboardAccess { @@ -297,21 +304,6 @@ private sealed class ClipboardAccess ``` ::: zone-end -::: zone pivot="programming-language-python" - -Note: we are leveraging a Python package called pyperclip. Please install is using pip. - -```python -class ClipboardAccess: - @kernel_function - def set_clipboard(content: str): - if not content.strip(): - return - - pyperclip.copy(content) -``` -::: zone-end - ::: zone pivot="programming-language-java" > Agents are currently unavailable in Java. @@ -320,9 +312,9 @@ class ClipboardAccess: ### Agent Definition +::: zone pivot="programming-language-csharp" Let's declare the agent names as `const` so they might be referenced in _Agent Group Chat_ strategies: -::: zone pivot="programming-language-csharp" ```csharp const string ReviewerName = "Reviewer"; const string WriterName = "Writer"; @@ -330,6 +322,9 @@ const string WriterName = "Writer"; ::: zone-end ::: zone pivot="programming-language-python" + +We will declare the agent names as "Reviewer" and "Writer." + ```python REVIEWER_NAME = "Reviewer" COPYWRITER_NAME = "Writer" @@ -379,23 +374,20 @@ ChatCompletionAgent agentReviewer = ::: zone pivot="programming-language-python" ```python agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, - kernel=_create_kernel_with_chat_completion(REVIEWER_NAME), - name=REVIEWER_NAME, - instructions=""" - Your responsiblity is to review and identify how to improve user provided content. - If the user has providing input or direction for content already provided, specify how to - address this input. - Never directly perform the correction or provide example. - Once the content has been updated in a subsequent response, you will review the content - again until satisfactory. - Always copy satisfactory content to the clipboard using available tools and inform user. - - RULES: - - Only identify suggestions that are specific and actionable. - - Verify previous suggestions have been addressed. - - Never repeat previous suggestions. - """, + service_id=REVIEWER_NAME, + kernel=kernel, + name=REVIEWER_NAME, + instructions=""" +Your responsibility is to review and identify how to improve user provided content. +If the user has provided input or direction for content already provided, specify how to address this input. +Never directly perform the correction or provide an example. +Once the content has been updated in a subsequent response, review it again until it is satisfactory. + +RULES: +- Only identify suggestions that are specific and actionable. +- Verify previous suggestions have been addressed. +- Never repeat previous suggestions. +""", ) ``` ::: zone-end @@ -406,11 +398,11 @@ agent_reviewer = ChatCompletionAgent( ::: zone-end -The _Writer_ agent is is similiar, but doesn't require the specification of _Execution Settings_ since it isn't configured with a plug-in. +::: zone pivot="programming-language-csharp" +The _Writer_ agent is similiar, but doesn't require the specification of _Execution Settings_ since it isn't configured with a plug-in. Here the _Writer_ is given a single-purpose task, follow direction and rewrite the content. -::: zone pivot="programming-language-csharp" ```csharp ChatCompletionAgent agentWriter = new() @@ -430,19 +422,19 @@ ChatCompletionAgent agentWriter = ::: zone-end ::: zone pivot="programming-language-python" +The _Writer_ agent is similiar. It is given a single-purpose task, follow direction and rewrite the content. ```python agent_writer = ChatCompletionAgent( - service_id=COPYWRITER_NAME, - kernel=_create_kernel_with_chat_completion(COPYWRITER_NAME), - name=COPYWRITER_NAME, - instructions=""" - Your sole responsiblity is to rewrite content according to review suggestions. - - - Always apply all review direction. - - Always revise the content in its entirety without explanation. - - Never address the user. - """, -) + service_id=WRITER_NAME, + kernel=kernel, + name=WRITER_NAME, + instructions=""" +Your sole responsibility is to rewrite content according to review suggestions. +- Always apply all review directions. +- Always revise the content in its entirety without explanation. +- Never address the user. +""", + ) ``` ::: zone-end @@ -489,25 +481,25 @@ KernelFunction selectionFunction = ::: zone pivot="programming-language-python" ```python selection_function = KernelFunctionFromPrompt( - function_name="selection", - prompt=f""" - Determine which participant takes the next turn in a conversation based on the the most recent participant. - State only the name of the participant to take the next turn. - No participant should take more than one turn in a row. - - Choose only from these participants: - - {REVIEWER_NAME} - - {COPYWRITER_NAME} - - Always follow these rules when selecting the next participant: - - After user input, it is {COPYWRITER_NAME}'s turn. - - After {COPYWRITER_NAME} replies, it is {REVIEWER_NAME}'s turn. - - After {REVIEWER_NAME} provides feedback, it is {COPYWRITER_NAME}'s turn. - - History: - {{{{$history}}}} - """, -) + function_name="selection", + prompt=f""" +Examine the provided RESPONSE and choose the next participant. +State only the name of the chosen participant without explanation. +Never choose the participant named in the RESPONSE. + +Choose only from these participants: +- {REVIEWER_NAME} +- {WRITER_NAME} + +Rules: +- If RESPONSE is user input, it is {REVIEWER_NAME}'s turn. +- If RESPONSE is by {REVIEWER_NAME}, it is {WRITER_NAME}'s turn. +- If RESPONSE is by {WRITER_NAME}, it is {REVIEWER_NAME}'s turn. + +RESPONSE: +{{{{$lastmessage}}}} +""" + ) ``` ::: zone-end @@ -540,20 +532,20 @@ KernelFunction terminationFunction = ::: zone pivot="programming-language-python" ```python -TERMINATION_KEYWORD = "yes" - -termination_function = KernelFunctionFromPrompt( - function_name="termination", - prompt=f""" - Examine the RESPONSE and determine whether the content has been deemed satisfactory. - If content is satisfactory, respond with a single word without explanation: {TERMINATION_KEYWORD}. - If specific suggestions are being provided, it is not satisfactory. - If no correction is suggested, it is satisfactory. + termination_keyword = "yes" - RESPONSE: - {{{{$history}}}} - """, -) + termination_function = KernelFunctionFromPrompt( + function_name="termination", + prompt=f""" +Examine the RESPONSE and determine whether the content has been deemed satisfactory. +If the content is satisfactory, respond with a single word without explanation: {termination_keyword}. +If specific suggestions are being provided, it is not satisfactory. +If no correction is suggested, it is satisfactory. + +RESPONSE: +{{{{$lastmessage}}}} +""" + ) ``` ::: zone-end @@ -573,7 +565,7 @@ ChatHistoryTruncationReducer historyReducer = new(1); ::: zone pivot="programming-language-python" ```python -**ChatHistoryReducer is coming soon to Python.** +history_reducer = ChatHistoryTruncationReducer(target_count=1) ``` ::: zone-end @@ -644,26 +636,28 @@ Creating `AgentGroupChat` involves: Notice that each strategy is responsible for parsing the `KernelFunction` result. ```python chat = AgentGroupChat( - agents=[agent_writer, agent_reviewer], + agents=[agent_reviewer, agent_writer], selection_strategy=KernelFunctionSelectionStrategy( + initial_agent=agent_reviewer, function=selection_function, - kernel=_create_kernel_with_chat_completion("selection"), - result_parser=lambda result: str(result.value[0]) if result.value is not None else COPYWRITER_NAME, - agent_variable_name="agents", - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: str(result.value[0]).strip() if result.value[0] is not None else WRITER_NAME, + history_variable_name="lastmessage", history_reducer=history_reducer, ), termination_strategy=KernelFunctionTerminationStrategy( agents=[agent_reviewer], function=termination_function, - kernel=_create_kernel_with_chat_completion("termination"), - result_parser=lambda result: TERMINATION_KEYWORD in str(result.value[0]).lower(), - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: termination_keyword in str(result.value[0]).lower(), + history_variable_name="lastmessage", maximum_iterations=10, history_reducer=history_reducer, ), ) ``` + +The `lastmessage` `history_variable_name` corresponds with the `KernelFunctionSelectionStrategy` and the `KernelFunctionTerminationStrategy` prompt that was defined above. This is where the last message is placed when rendering the prompt. ::: zone-end ::: zone pivot="programming-language-java" @@ -702,15 +696,14 @@ while not is_complete: ::: zone-end +::: zone pivot="programming-language-csharp" Now let's capture user input within the previous loop. In this case: - Empty input will be ignored - The term `EXIT` will signal that the conversation is completed - The term `RESET` will clear the _Agent Group Chat_ history - Any term starting with `@` will be treated as a file-path whose content will be provided as input -- Valid input will be added to the _Agent Group Chaty_ as a _User_ message. - +- Valid input will be added to the _Agent Group Chat_ as a _User_ message. -::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine(); Console.Write("> "); @@ -757,8 +750,18 @@ chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, input)); ::: zone-end ::: zone pivot="programming-language-python" +Now let's capture user input within the previous loop. In this case: +- Empty input will be ignored. +- The term `exit` will signal that the conversation is complete. +- The term `reset` will clear the _Agent Group Chat_ history. +- Any term starting with `@` will be treated as a file-path whose content will be provided as input. +- Valid input will be added to the _Agent Group Chat_ as a _User_ message. + +The operation logic inside the while loop looks like: + ```python -user_input = input("User:> ") +print() +user_input = input("User > ").strip() if not user_input: continue @@ -771,18 +774,22 @@ if user_input.lower() == "reset": print("[Conversation has been reset]") continue -if user_input.startswith("@") and len(input) > 1: - file_path = input[1:] +# Try to grab files from the script's current directory +if user_input.startswith("@") and len(user_input) > 1: + file_name = user_input[1:] + script_dir = os.path.dirname(os.path.abspath(__file__)) + file_path = os.path.join(script_dir, file_name) try: if not os.path.exists(file_path): print(f"Unable to access file: {file_path}") continue - with open(file_path) as file: + with open(file_path, "r", encoding="utf-8") as file: user_input = file.read() except Exception: print(f"Unable to access file: {file_path}") continue +# Add the current user_input to the chat await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) ``` ::: zone-end @@ -826,13 +833,17 @@ catch (HttpOperationException exception) ::: zone pivot="programming-language-python" ```python -chat.is_complete = False -async for response in chat.invoke(): - print(f"# {response.role} - {response.name or '*'}: '{response.content}'") +try: + async for response in chat.invoke(): + if response is None or not response.name: + continue + print() + print(f"# {response.name.upper()}:\n{response.content}") +except Exception as e: + print(f"Error during chat invocation: {e}") -if chat.is_complete: - is_complete = True - break +# Reset the chat's complete flag for the new conversation round. +chat.is_complete = False ``` ::: zone-end @@ -845,6 +856,8 @@ if chat.is_complete: ## Final +::: zone pivot="programming-language-csharp" + Bringing all the steps together, we have the final code for this example. The complete implementation is provided below. Try using these suggested inputs: @@ -852,14 +865,12 @@ Try using these suggested inputs: 1. Hi 2. {"message: "hello world"} 3. {"message": "hello world"} -4. Semantic Kernel (SK) is an open-source SDK that enables developers to build and orchestrate complex AI workflows that involve natural language processing (NLP) and machine learning models. It provies a flexible platform for integrating AI capabilities such as semantic search, text summarization, and dialogue systems into applications. With SK, you can easily combine different AI services and models, define thei relationships, and orchestrate interactions between them. +4. Semantic Kernel (SK) is an open-source SDK that enables developers to build and orchestrate complex AI workflows that involve natural language processing (NLP) and machine learning models. It provies a flexible platform for integrating AI capabilities such as semantic search, text summarization, and dialogue systems into applications. With SK, you can easily combine different AI services and models, define their relationships, and orchestrate interactions between them. 5. make this two paragraphs 6. thank you 7. @.\WomensSuffrage.txt 8. its good, but is it ready for my college professor? - -::: zone pivot="programming-language-csharp" ```csharp // Copyright (c) Microsoft. All rights reserved. @@ -1114,12 +1125,28 @@ public static class Program ::: zone-end ::: zone pivot="programming-language-python" + +Bringing all the steps together, we now have the final code for this example. The complete implementation is shown below. + +You can try using one of the suggested inputs. As the agent chat begins, the agents will exchange messages for several iterations until the reviewer agent is satisfied with the copywriter's work. The `while` loop ensures the conversation continues, even if the chat is initially considered complete, by resetting the `is_complete` flag to `False`. + +1. Rozes are red, violetz are blue. +2. Semantic Kernel (SK) is an open-source SDK that enables developers to build and orchestrate complex AI workflows that involve natural language processing (NLP) and machine learning models. It provies a flexible platform for integrating AI capabilities such as semantic search, text summarization, and dialogue systems into applications. With SK, you can easily combine different AI services and models, define their relationships, and orchestrate interactions between them. +4. Make this two paragraphs +5. thank you +7. @WomensSuffrage.txt +8. It's good, but is it ready for my college professor? + +> [!TIP] +> You can reference any file by providing `@`. To reference the "WomensSuffrage" text from above, download it [here](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/resources/WomensSuffrage.txt) and place it in your current working directory. You can then reference it with `@WomensSuffrage.txt`. + ```python # Copyright (c) Microsoft. All rights reserved. import asyncio import os +from semantic_kernel import Kernel from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( KernelFunctionSelectionStrategy, @@ -1128,12 +1155,10 @@ from semantic_kernel.agents.strategies.termination.kernel_function_termination_s KernelFunctionTerminationStrategy, ) from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents import ChatHistoryTruncationReducer from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.history_reducer.chat_history_truncation_reducer import ChatHistoryTruncationReducer from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_decorator import kernel_function from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt -from semantic_kernel.kernel import Kernel ################################################################### # The following sample demonstrates how to create a simple, # @@ -1142,122 +1167,123 @@ from semantic_kernel.kernel import Kernel # complete a user's task. # ################################################################### - -class ClipboardAccess: - @kernel_function - def set_clipboard(content: str): - if not content.strip(): - return - - pyperclip.copy(content) - - +# Define agent names REVIEWER_NAME = "Reviewer" -COPYWRITER_NAME = "Writer" +WRITER_NAME = "Writer" -def _create_kernel_with_chat_completion(service_id: str) -> Kernel: +def create_kernel() -> Kernel: + """Creates a Kernel instance with an Azure OpenAI ChatCompletion service.""" kernel = Kernel() - kernel.add_service(AzureChatCompletion(service_id=service_id)) + kernel.add_service(service=AzureChatCompletion()) return kernel async def main(): + # Create a single kernel instance for all agents. + kernel = create_kernel() + + # Create ChatCompletionAgents using the same kernel. agent_reviewer = ChatCompletionAgent( service_id=REVIEWER_NAME, - kernel=_create_kernel_with_chat_completion(REVIEWER_NAME), + kernel=kernel, name=REVIEWER_NAME, instructions=""" - Your responsiblity is to review and identify how to improve user provided content. - If the user has providing input or direction for content already provided, specify how to - address this input. - Never directly perform the correction or provide example. - Once the content has been updated in a subsequent response, you will review the content - again until satisfactory. - Always copy satisfactory content to the clipboard using available tools and inform user. - - RULES: - - Only identify suggestions that are specific and actionable. - - Verify previous suggestions have been addressed. - - Never repeat previous suggestions. - """, +Your responsibility is to review and identify how to improve user provided content. +If the user has provided input or direction for content already provided, specify how to address this input. +Never directly perform the correction or provide an example. +Once the content has been updated in a subsequent response, review it again until it is satisfactory. + +RULES: +- Only identify suggestions that are specific and actionable. +- Verify previous suggestions have been addressed. +- Never repeat previous suggestions. +""", ) agent_writer = ChatCompletionAgent( - service_id=COPYWRITER_NAME, - kernel=_create_kernel_with_chat_completion(COPYWRITER_NAME), - name=COPYWRITER_NAME, + service_id=WRITER_NAME, + kernel=kernel, + name=WRITER_NAME, instructions=""" - Your sole responsiblity is to rewrite content according to review suggestions. - - - Always apply all review direction. - - Always revise the content in its entirety without explanation. - - Never address the user. - """, +Your sole responsibility is to rewrite content according to review suggestions. +- Always apply all review directions. +- Always revise the content in its entirety without explanation. +- Never address the user. +""", ) + # Define a selection function to determine which agent should take the next turn. selection_function = KernelFunctionFromPrompt( function_name="selection", prompt=f""" - Determine which participant takes the next turn in a conversation based on the the most recent participant. - State only the name of the participant to take the next turn. - No participant should take more than one turn in a row. - - Choose only from these participants: - - {REVIEWER_NAME} - - {COPYWRITER_NAME} - - Always follow these rules when selecting the next participant: - - After user input, it is {COPYWRITER_NAME}'s turn. - - After {COPYWRITER_NAME} replies, it is {REVIEWER_NAME}'s turn. - - After {REVIEWER_NAME} provides feedback, it is {COPYWRITER_NAME}'s turn. - - History: - {{{{$history}}}} - """, +Examine the provided RESPONSE and choose the next participant. +State only the name of the chosen participant without explanation. +Never choose the participant named in the RESPONSE. + +Choose only from these participants: +- {REVIEWER_NAME} +- {WRITER_NAME} + +Rules: +- If RESPONSE is user input, it is {REVIEWER_NAME}'s turn. +- If RESPONSE is by {REVIEWER_NAME}, it is {WRITER_NAME}'s turn. +- If RESPONSE is by {WRITER_NAME}, it is {REVIEWER_NAME}'s turn. + +RESPONSE: +{{{{$lastmessage}}}} +""", ) - TERMINATION_KEYWORD = "yes" + # Define a termination function where the reviewer signals completion with "yes". + termination_keyword = "yes" termination_function = KernelFunctionFromPrompt( function_name="termination", prompt=f""" - Examine the RESPONSE and determine whether the content has been deemed satisfactory. - If content is satisfactory, respond with a single word without explanation: {TERMINATION_KEYWORD}. - If specific suggestions are being provided, it is not satisfactory. - If no correction is suggested, it is satisfactory. - - RESPONSE: - {{{{$history}}}} - """, +Examine the RESPONSE and determine whether the content has been deemed satisfactory. +If the content is satisfactory, respond with a single word without explanation: {termination_keyword}. +If specific suggestions are being provided, it is not satisfactory. +If no correction is suggested, it is satisfactory. + +RESPONSE: +{{{{$lastmessage}}}} +""", ) - history_reducer = ChatHistoryTruncationReducer(target_count=1) + history_reducer = ChatHistoryTruncationReducer(target_count=5) + # Create the AgentGroupChat with selection and termination strategies. chat = AgentGroupChat( - agents=[agent_writer, agent_reviewer], + agents=[agent_reviewer, agent_writer], selection_strategy=KernelFunctionSelectionStrategy( + initial_agent=agent_reviewer, function=selection_function, - kernel=_create_kernel_with_chat_completion("selection"), - result_parser=lambda result: str(result.value[0]) if result.value is not None else COPYWRITER_NAME, - agent_variable_name="agents", - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: str(result.value[0]).strip() if result.value[0] is not None else WRITER_NAME, + history_variable_name="lastmessage", history_reducer=history_reducer, ), termination_strategy=KernelFunctionTerminationStrategy( agents=[agent_reviewer], function=termination_function, - kernel=_create_kernel_with_chat_completion("termination"), - result_parser=lambda result: TERMINATION_KEYWORD in str(result.value[0]).lower(), - history_variable_name="history", + kernel=kernel, + result_parser=lambda result: termination_keyword in str(result.value[0]).lower(), + history_variable_name="lastmessage", maximum_iterations=10, history_reducer=history_reducer, ), ) - is_complete: bool = False + print( + "Ready! Type your input, or 'exit' to quit, 'reset' to restart the conversation. " + "You may pass in a file path using @." + ) + + is_complete = False while not is_complete: - user_input = input("User:> ") + print() + user_input = input("User > ").strip() if not user_input: continue @@ -1270,31 +1296,42 @@ async def main(): print("[Conversation has been reset]") continue - if user_input.startswith("@") and len(input) > 1: - file_path = input[1:] + # Try to grab files from the script's current directory + if user_input.startswith("@") and len(user_input) > 1: + file_name = user_input[1:] + script_dir = os.path.dirname(os.path.abspath(__file__)) + file_path = os.path.join(script_dir, file_name) try: if not os.path.exists(file_path): print(f"Unable to access file: {file_path}") continue - with open(file_path) as file: + with open(file_path, "r", encoding="utf-8") as file: user_input = file.read() except Exception: print(f"Unable to access file: {file_path}") continue + # Add the current user_input to the chat await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) - async for response in chat.invoke(): - print(f"# {response.role} - {response.name or '*'}: '{response.content}'") + try: + async for response in chat.invoke(): + if response is None or not response.name: + continue + print() + print(f"# {response.name.upper()}:\n{response.content}") + except Exception as e: + print(f"Error during chat invocation: {e}") - if chat.is_complete: - is_complete = True - break + # Reset the chat's complete flag for the new conversation round. + chat.is_complete = False if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/agent_collaboration.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 67aa0290..c9b79287 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -92,7 +92,7 @@ from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.kernel import Kernel ``` -Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` data files from [_Semantic Kernel_ `LearnResources` Project](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files in your project folder. +Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` data files from the [_Semantic Kernel_ `learn_resources/resources` directory](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files to your working directory. ::: zone-end ::: zone pivot="programming-language-java" @@ -172,7 +172,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -181,6 +181,9 @@ OPENAI_ORG_ID="" OPENAI_CHAT_MODEL_ID="" ``` +[!TIP] +Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](/azure/ai-services/openai/api-version-deprecation). + Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end @@ -235,18 +238,26 @@ OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByC ::: zone-end ::: zone pivot="programming-language-python" + +> [!TIP] +> You may need to adjust the file paths depending upon where your files are located. + ```python # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByAdmin1.csv", ) csv_file_path_2 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByCountry.csv", ) ``` +You may need to modify the path creation code based on the storage location of your CSV files. + ::: zone-end ::: zone pivot="programming-language-java" @@ -257,9 +268,10 @@ csv_file_path_2 = os.path.join( ### Agent Definition +::: zone pivot="programming-language-csharp" + We are now ready to instantiate an _OpenAI Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. -::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine("Defining agent..."); OpenAIAssistantAgent agent = @@ -283,20 +295,23 @@ OpenAIAssistantAgent agent = ::: zone-end ::: zone pivot="programming-language-python" + +We are now ready to instantiate an _Azure Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. + ```python agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", - instructions=""" - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - enable_code_interpreter=True, - code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], - ) + kernel=Kernel(), + service_id="agent", + name="SampleAssistantAgent", + instructions=""" + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + enable_code_interpreter=True, + code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], +) ``` ::: zone-end @@ -722,7 +737,10 @@ public static class Program ::: zone pivot="programming-language-python" ```python +# Copyright (c) Microsoft. All rights reserved. + import asyncio +import logging import os from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent @@ -731,19 +749,29 @@ from semantic_kernel.contents.streaming_file_reference_content import StreamingF from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.kernel import Kernel +logging.basicConfig(level=logging.ERROR) + +################################################################### +# The following sample demonstrates how to create a simple, # +# OpenAI assistant agent that utilizes the code interpreter # +# to analyze uploaded files. # +################################################################### + # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByAdmin1.csv", ) csv_file_path_2 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", "PopulationByCountry.csv", ) -async def download_file_content(agent, file_id: str): +async def download_file_content(agent: AzureAssistantAgent, file_id: str): try: # Fetch the content of the file using the provided method response_content = await agent.client.files.content(file_id) @@ -766,7 +794,7 @@ async def download_file_content(agent, file_id: str): print(f"An error occurred while downloading file {file_id}: {str(e)}") -async def download_response_image(agent, file_ids: list[str]): +async def download_response_image(agent: AzureAssistantAgent, file_ids: list[str]): if file_ids: # Iterate over file_ids and download each one for file_id in file_ids: @@ -801,30 +829,41 @@ async def main(): if user_input.lower() == "exit": is_complete = True - break await agent.add_chat_message( thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) ) - is_code: bool = False - async for response in agent.invoke_stream(thread_id=thread_id): - if is_code != response.metadata.get("code"): - print() - is_code = not is_code - - print(f"{response.content}", end="", flush=True) + is_code = False + last_role = None + async for response in agent.invoke_stream(thread_id=thread_id): + current_is_code = response.metadata.get("code", False) + + if current_is_code: + if not is_code: + print("\n\n```python") + is_code = True + print(response.content, end="", flush=True) + else: + if is_code: + print("\n```") + is_code = False + last_role = None + if hasattr(response, "role") and response.role is not None and last_role != response.role: + print(f"\n# {response.role}: ", end="", flush=True) + last_role = response.role + print(response.content, end="", flush=True) file_ids.extend([ item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent) ]) - - print() + if is_code: + print("```\n") await download_response_image(agent, file_ids) file_ids.clear() finally: - print("Cleaning up resources...") + print("\nCleaning up resources...") if agent is not None: [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] await agent.delete_thread(thread_id) @@ -834,6 +873,8 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/assistant_code.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index ca3c70b3..f38a28d4 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -174,7 +174,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -183,6 +183,9 @@ OPENAI_ORG_ID="" OPENAI_CHAT_MODEL_ID="" ``` +> [!TIP] +> Azure Assistants require an API version of at least 2024-05-01-preview. As new features are introduced, API versions are updated accordingly. As of this writing, the latest version is 2025-01-01-preview. For the most up-to-date versioning details, refer to the [Azure OpenAI API preview lifecycle](/azure/ai-services/openai/api-version-deprecation). + Once configured, the respective AI service classes will pick up the required variables and use them during instantiation. ::: zone-end @@ -780,6 +783,8 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/assistant_search.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index 7870d9ed..589526ef 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -177,7 +177,7 @@ Configure the following settings in your `.env` file for either Azure OpenAI or ```python AZURE_OPENAI_API_KEY="..." -AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." AZURE_OPENAI_API_VERSION="..." @@ -343,7 +343,7 @@ agent = ChatCompletionAgent( The current date and time is: {{$now}}. """, arguments=KernelArguments( - settings=AzureAIPromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()), + settings=AzureChatPromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()), repository="microsoft/semantic-kernel", ), ) @@ -669,6 +669,8 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) ``` + +You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/agent_docs/chat_agent.py), as shown above, in our repo. ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/Frameworks/process/process-deployment.md b/semantic-kernel/Frameworks/process/process-deployment.md index 3be467a4..8ba8aa0e 100644 --- a/semantic-kernel/Frameworks/process/process-deployment.md +++ b/semantic-kernel/Frameworks/process/process-deployment.md @@ -18,7 +18,7 @@ The Process Framework provides an in-process runtime that allows developers to r ## Cloud Runtimes -For scenarios requiring scalability and distributed processing, the Process Framework supports cloud runtimes such as [**Orleans**](https://learn.microsoft.com/dotnet/orleans/overview) and [**Dapr**](https://dapr.io/). These options empower developers to deploy processes in a distributed manner, facilitating high availability and load balancing across multiple instances. By leveraging these cloud runtimes, organizations can streamline their operations and manage substantial workloads with ease. +For scenarios requiring scalability and distributed processing, the Process Framework supports cloud runtimes such as [**Orleans**](/dotnet/orleans/overview) and [**Dapr**](https://dapr.io/). These options empower developers to deploy processes in a distributed manner, facilitating high availability and load balancing across multiple instances. By leveraging these cloud runtimes, organizations can streamline their operations and manage substantial workloads with ease. - **Orleans Runtime:** This framework provides a programming model for building distributed applications and is particularly well-suited for handling virtual actors in a resilient manner, complementing the Process Framework’s event-driven architecture. diff --git a/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md b/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md index f11b8df5..f122da5b 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/function-calling/index.md @@ -207,6 +207,41 @@ kernel.add_plugin(OrderPizzaPlugin(pizza_service, user_context, payment_service) > [!NOTE] > Only functions with the `kernel_function` decorator will be serialized and sent to the model. This allows you to have helper functions that are not exposed to the model. +## Reserved Parameter Names for Auto Function Calling + +When using auto function calling in KernelFunctions, certain parameter names are **reserved** and receive special handling. These reserved names allow you to automatically access key objects required for function execution. + +### Reserved Names + +The following parameter names are reserved: +- `kernel` +- `service` +- `execution_settings` +- `arguments` + +### How They Work + +During function invocation, the method [`gather_function_parameters`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/functions/kernel_function_from_method.py#L148) inspects each parameter. If the parameter's name matches one of the reserved names, it is populated with specific objects: + +- **`kernel`**: Injected with the kernel object. +- **`service`**: Populated with the AI service selected based on the provided arguments. +- **`execution_settings`**: Contains settings pertinent to the function's execution. +- **`arguments`**: Receives the entire set of kernel arguments passed during invocation. + +This design ensures that these parameters are automatically managed, eliminating the need for manual extraction or assignment. + +### Example Usage + +Consider the following example: + +```python +class SimplePlugin: + @kernel_function(name="GetWeather", description="Get the weather for a location.") + async def get_the_weather(self, location: str, arguments: KernelArguments) -> str: + # The 'arguments' parameter is reserved and automatically populated with KernelArguments. + return f"Received user input: {location}, the weather is nice!" +``` + ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/ai-services/chat-completion/index.md b/semantic-kernel/concepts/ai-services/chat-completion/index.md index c8218409..88d3eaea 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/index.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/index.md @@ -920,7 +920,7 @@ chat_completion_service = AzureChatCompletion(service_id="my-service-id") ``` > [!NOTE] -> The `AzureChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/en-us/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. +> The `AzureChatCompletion` service also supports [Microsoft Entra](/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. # [OpenAI](#tab/python-OpenAI) @@ -967,7 +967,7 @@ chat_completion_service = AzureAIInferenceChatCompletion( ``` > [!NOTE] -> The `AzureAIInferenceChatCompletion` service also supports [Microsoft Entra](https://learn.microsoft.com/en-us/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. +> The `AzureAIInferenceChatCompletion` service also supports [Microsoft Entra](/entra/identity/authentication/overview-authentication) authentication. If you don't provide an API key, the service will attempt to authenticate using the Entra token. # [Anthropic](#tab/python-Anthropic) @@ -1274,7 +1274,7 @@ execution_settings = OnnxGenAIPromptExecutionSettings() --- > [!TIP] -> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel.connectors.ai?view=semantic-kernel-python). +> To see what you can configure in the execution settings, you can check the class definition in the [source code](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai) or check out the [API documentation](/python/api/semantic-kernel/semantic_kernel.connectors.ai). ::: zone-end diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/index.md b/semantic-kernel/concepts/enterprise-readiness/observability/index.md index 41583f59..c88edbe5 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/index.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/index.md @@ -20,8 +20,8 @@ Observability is typically achieved through logging, metrics, and tracing. They Useful materials for further reading: - [Observability defined by Cloud Native Computing Foundation](https://glossary.cncf.io/observability/) -- [Distributed tracing](https://learn.microsoft.com/dotnet/core/diagnostics/distributed-tracing) -- [Observability in .Net](https://learn.microsoft.com/dotnet/core/diagnostics/observability-with-otel) +- [Distributed tracing](/dotnet/core/diagnostics/distributed-tracing) +- [Observability in .Net](/dotnet/core/diagnostics/observability-with-otel) - [OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) ## Observability in Semantic Kernel @@ -33,18 +33,18 @@ Specifically, Semantic Kernel provides the following observability features: - **Logging**: Semantic Kernel logs meaningful events and errors from the kernel, kernel plugins and functions, as well as the AI connectors. ![Logs and events](../../../media/telemetry-log-events-overview-app-insights.png) > [!IMPORTANT] - > [Traces in Application Insights](https://learn.microsoft.com/azure/azure-monitor/app/data-model-complete#trace) represent traditional log entries and [OpenTelemetry span events](https://opentelemetry.io/docs/concepts/signals/traces/#span-events). They are not the same as distributed traces. + > [Traces in Application Insights](/azure/azure-monitor/app/data-model-complete#trace) represent traditional log entries and [OpenTelemetry span events](https://opentelemetry.io/docs/concepts/signals/traces/#span-events). They are not the same as distributed traces. - **Metrics**: Semantic Kernel emits metrics from kernel functions and AI connectors. You will be able to monitor metrics such as the kernel function execution time, the token consumption of AI connectors, etc. ![Metrics](../../../media/telemetry-metrics-overview-app-insights.png) - **Tracing**: Semantic Kernel supports distributed tracing. You can track activities across different services and within Semantic Kernel. - ![Complete end-to-end transaction of a request](../../media/telemetry-trace-overview-app-insights.png) + ![Complete end-to-end transaction of a request](../../../media/telemetry-trace-overview-app-insights.png) ::: zone pivot="programming-language-csharp" | Telemetry | Description | |-----------|---------------------------------------| -| Log | Logs are recorded throughout the Kernel. For more information on Logging in .Net, please refer to this [document](https://learn.microsoft.com/dotnet/core/extensions/logging). Sensitive data, such as kernel function arguments and results, are logged at the trace level. Please refer to this [table](https://learn.microsoft.com/dotnet/core/extensions/logging?tabs=command-line#log-level) for more information on log levels. | +| Log | Logs are recorded throughout the Kernel. For more information on Logging in .Net, please refer to this [document](/dotnet/core/extensions/logging). Sensitive data, such as kernel function arguments and results, are logged at the trace level. Please refer to this [table](/dotnet/core/extensions/logging?tabs=command-line#log-level) for more information on log levels. | | Activity | Each kernel function execution and each call to an AI model are recorded as an activity. All activities are generated by an activity source named "Microsoft.SemanticKernel". | | Metric | Semantic Kernel captures the following metrics from kernel functions:
    • `semantic_kernel.function.invocation.duration` (Histogram) - function execution time (in seconds)
    • `semantic_kernel.function.streaming.duration` (Histogram) - function streaming execution time (in seconds)
    • `semantic_kernel.function.invocation.token_usage.prompt` (Histogram) - number of prompt token usage (only for `KernelFunctionFromPrompt`)
    • `semantic_kernel.function.invocation.token_usage.completion` (Histogram) - number of completion token usage (only for `KernelFunctionFromPrompt`)
    • | diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md index bb604e6b..2efebe69 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-advanced.md @@ -12,7 +12,7 @@ ms.service: semantic-kernel # More advanced scenarios for telemetry > [!NOTE] -> This article will use [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) for illustration. If you prefer to use other tools, please refer to the documentation of the tool you are using on setup instructions. +> This article will use [Aspire Dashboard](/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) for illustration. If you prefer to use other tools, please refer to the documentation of the tool you are using on setup instructions. ## Auto Function Calling @@ -375,7 +375,7 @@ Please refer to this [article](./telemetry-with-console.md#environment-variables ### Start the Aspire Dashboard -Follow the instructions [here](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. +Follow the instructions [here](/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. ### Run diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md index b83abbfb..6bd39dfe 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-aspire-dashboard.md @@ -11,9 +11,9 @@ ms.service: semantic-kernel # Inspection of telemetry data with Aspire Dashboard -[Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) is part of the [.NET Aspire](https://learn.microsoft.com/dotnet/aspire/get-started/aspire-overview) offering. The dashboard allows developers to monitor and inspect their distributed applications. +[Aspire Dashboard](/dotnet/aspire/fundamentals/dashboard/overview?tabs=bash) is part of the [.NET Aspire](/dotnet/aspire/get-started/aspire-overview) offering. The dashboard allows developers to monitor and inspect their distributed applications. -In this example, we will use the [standalone mode](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) and learn how to export telemetry data to Aspire Dashboard, and inspect the data there. +In this example, we will use the [standalone mode](/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) and learn how to export telemetry data to Aspire Dashboard, and inspect the data there. ## Exporter @@ -330,7 +330,7 @@ Please refer to this [article](./telemetry-with-console.md#add-telemetry-1) for ## Start the Aspire Dashboard -Follow the instructions [here](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. +Follow the instructions [here](/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash#start-the-dashboard) to start the dashboard. Once the dashboard is running, open a browser and navigate to `http://localhost:18888` to access the dashboard. ## Run @@ -366,7 +366,7 @@ python telemetry_aspire_dashboard_quickstart.py After running the application, head over to the dashboard to inspect the telemetry data. > [!TIP] -> Follow this [guide](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/explore) to explore the Aspire Dashboard interface. +> Follow this [guide](/dotnet/aspire/fundamentals/dashboard/explore) to explore the Aspire Dashboard interface. ### Traces @@ -383,7 +383,7 @@ In the trace details, you can see the span that represents the prompt function a ### Logs -Head over to the `Structured` tab to view the logs emitted by the application. Please refer to this [guide](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/explore#structured-logs-page) on how to work with structured logs in the dashboard. +Head over to the `Structured` tab to view the logs emitted by the application. Please refer to this [guide](/dotnet/aspire/fundamentals/dashboard/explore#structured-logs-page) on how to work with structured logs in the dashboard. ## Next steps diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md index 36153162..00ab843c 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-azure-ai-foundry-tracing.md @@ -10,7 +10,7 @@ ms.service: semantic-kernel # Visualize traces on Azure AI Foundry Tracing UI -[Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-studio/) Tracing UI is a web-based user interface that allows you to visualize traces and logs generated by your applications. This article provides a step-by-step guide on how to visualize traces on Azure AI Foundry Tracing UI. +[Azure AI Foundry](/azure/ai-studio/) Tracing UI is a web-based user interface that allows you to visualize traces and logs generated by your applications. This article provides a step-by-step guide on how to visualize traces on Azure AI Foundry Tracing UI. > [!IMPORTANT] > Before you start, make sure you have completed the tutorial on [inspecting telemetry data with Application Insights](./telemetry-with-app-insights.md). @@ -20,8 +20,8 @@ ms.service: semantic-kernel Prerequisites: -- An Azure AI Foundry project. Follow this [guide](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/create-projects) to create one if you don't have one. -- A serverless inference API. Follow this [guide](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/deploy-models-serverless) to create one if you don't have one. +- An Azure AI Foundry project. Follow this [guide](/azure/ai-studio/how-to/create-projects) to create one if you don't have one. +- A serverless inference API. Follow this [guide](/azure/ai-studio/how-to/deploy-models-serverless) to create one if you don't have one. - Alternatively, you can attach an Azure OpenAI resource to the project, in which case you don't need to create a serverless API. ## Attach an Application Insights resource to the project diff --git a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md index 56354c16..f066c170 100644 --- a/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md +++ b/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console.md @@ -478,7 +478,7 @@ Value: 16 Here you can see the name, the description, the unit, the time range, the type, the value of the metric, and the meter that the metric belongs to. > [!NOTE] -> The above metric is a Counter metric. For a full list of metric types, see [here](https://learn.microsoft.com/dotnet/core/diagnostics/metrics-instrumentation#types-of-instruments). Depending on the type of metric, the output may vary. +> The above metric is a Counter metric. For a full list of metric types, see [here](/dotnet/core/diagnostics/metrics-instrumentation#types-of-instruments). Depending on the type of metric, the output may vary. ::: zone-end From 84cf4bd8d220c54412dd443b8d64d767641ca57e Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Tue, 18 Feb 2025 08:28:15 +0900 Subject: [PATCH 016/117] Add Python processes sample code. --- .../Frameworks/process/examples/example-human-in-loop.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md b/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md index b41da7a1..445f9d10 100644 --- a/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md +++ b/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md @@ -45,6 +45,7 @@ public class PublishDocumentationStep : KernelProcessStep ::: zone-end ::: zone pivot="programming-language-python" +> Support for Python Human-in-the-loop Process behavior is coming soon. ::: zone-end ::: zone pivot="programming-language-java" @@ -81,6 +82,7 @@ Now whenever the newly generated documentation is approved by the proofread agen ::: zone-end ::: zone pivot="programming-language-python" +> Support for Python Human-in-the-loop Process behavior is coming soon. ::: zone-end ::: zone pivot="programming-language-java" @@ -132,6 +134,7 @@ return process; ::: zone-end ::: zone pivot="programming-language-python" +> Support for Python Human-in-the-loop Process behavior is coming soon. ::: zone-end ::: zone pivot="programming-language-java" @@ -154,6 +157,7 @@ await process.StartAsync(kernel, new KernelProcessEvent { Id = "HumanApprovalRes ::: zone-end ::: zone pivot="programming-language-python" +> Support for Python Human-in-the-loop Process behavior is coming soon. ::: zone-end ::: zone pivot="programming-language-java" From 03244eded4c85a9d039ddd28722c2e91bbba2121 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 18 Feb 2025 09:44:59 +0100 Subject: [PATCH 017/117] fixed headings --- .../concepts/enterprise-readiness/filters.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 286d3673..fe2a0f16 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -226,8 +226,6 @@ async def prompt_rendering_filter(context: PromptRenderContext, next): ::: zone-end ::: zone pivot="programming-language-java" -## Coming soon - More info coming soon. ::: zone-end @@ -303,8 +301,6 @@ async def auto_function_invocation_filter(context: AutoFunctionInvocationContext ::: zone-end ::: zone pivot="programming-language-java" -## Coming soon - More info coming soon. ::: zone-end @@ -370,8 +366,12 @@ IChatCompletionService chatCompletionService = kernel.GetRequiredService Date: Tue, 18 Feb 2025 09:50:02 +0100 Subject: [PATCH 018/117] removed heading --- semantic-kernel/concepts/enterprise-readiness/filters.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index fe2a0f16..249d1de4 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -156,8 +156,6 @@ async def streaming_exception_handling( ::: zone-end ::: zone pivot="programming-language-java" -## Coming soon - More info coming soon. ::: zone-end From fa77efcc9e27890efa17ba2a632ae29b17c06b89 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 18 Feb 2025 10:02:09 +0100 Subject: [PATCH 019/117] added notes on ordering --- .../concepts/enterprise-readiness/filters.md | 55 ++++++++++++++++++- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 249d1de4..a47eba80 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -24,7 +24,7 @@ There are three types of filters: - **Function Invocation Filter** - this filter is executed each time a `KernelFunction` is invoked. It allows: - Access to information about the function being executed and its arguments - Handling of exceptions during function execution - - Overriding of the function result + - Overriding of the function result, either before (for instance for caching scenario's) or after execution (for instance for responsible AI scenarios) - Retrying of the function in case of failure (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/RetryWithFilters.cs)) - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: @@ -37,9 +37,17 @@ Each filter includes a `context` object that contains all relevant information a In a filter, calling the `next` delegate is essential to proceed to the next registered filter or the original operation (whether function invocation or prompt rendering). Without calling `next`, the operation will not be executed. +::: zone pivot="programming-language-csharp" + To use a filter, first define it, then add it to the `Kernel` object either through dependency injection or the appropriate `Kernel` property. When using dependency injection, the order of filters is not guaranteed, so with multiple filters, the execution order may be unpredictable. -For cases where filter order is important, it is recommended to add filters directly to the `Kernel` object using appropriate properties. This approach allows filters to be added, removed, or reordered at runtime. +::: zone-end +::: zone pivot="programming-language-python" + +To use a filter, you can either define a function with the required parameters and add it to the `Kernel` object using the `add_filter` method, or use the `@kernel.filter` decorator to define a filter function and add it to the `Kernel` object. + +::: zone-end + ## Function Invocation Filter @@ -366,6 +374,49 @@ ChatMessageContent result = await chatCompletionService.GetChatMessageContentAsy ::: zone-end +## Ordering + +::: zone pivot="programming-language-csharp" + +When using dependency injection, the order of filters is not guaranteed. If the order of filters is important, it is recommended to add filters directly to the `Kernel` object using appropriate properties. This approach allows filters to be added, removed, or reordered at runtime. + +::: zone-end +::: zone pivot="programming-language-python" + +Filters are executed according to the order in which they are added to the `Kernel` object, which is equivalent between using `add_filter` and the `@kernel.filter` decorator. The order of filters can be important and should be understood well. + +Consider the following example: + +```python +def func(): + print('function') + + +@kernel.filter(FilterTypes.FUNCTION_INVOCATION) +async def filter1(context: FunctionInvocationContext, next): + print('before filter 1') + await next(context) + print('after filter 1') + +@kernel.filter(FilterTypes.FUNCTION_INVOCATION) +async def filter2(context: FunctionInvocationContext, next): + print('before filter 2') + await next(context) + print('after filter 2') +``` + +When executed the function, the output will be: + +```python +before filter 1 +before filter 2 +function +after filter 2 +after filter 1 +``` + +::: zone-end + ## More examples ::: zone pivot="programming-language-csharp" From 26e2b72938515ab2960c34cabc5db7ee8bd321ba Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 18 Feb 2025 10:03:52 +0100 Subject: [PATCH 020/117] try inline zone --- semantic-kernel/concepts/enterprise-readiness/filters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index a47eba80..3549d842 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -25,7 +25,7 @@ There are three types of filters: - Access to information about the function being executed and its arguments - Handling of exceptions during function execution - Overriding of the function result, either before (for instance for caching scenario's) or after execution (for instance for responsible AI scenarios) - - Retrying of the function in case of failure (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/RetryWithFilters.cs)) + - Retrying of the function in case of failure ::: zone pivot="programming-language-csharp (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/RetryWithFilters.cs)) ::: zone-end - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: - Viewing and modifying the prompt that will be sent to the AI (e.g., for RAG or [PII redaction](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/PIIDetection.cs)) From 436e9cc2eef73cc1b560384fd998d1a071df7b12 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 18 Feb 2025 10:04:58 +0100 Subject: [PATCH 021/117] fix bullet --- semantic-kernel/concepts/enterprise-readiness/filters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 3549d842..ff914220 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -92,7 +92,7 @@ kernel.FunctionInvocationFilters.Add(new LoggingFilter(logger)); ### Code examples * [Function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/FunctionInvocationFiltering.cs) -* + ::: zone-end ::: zone pivot="programming-language-python" From 9c91c5490867f753fa289eeaeee075059e833a2a Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 18 Feb 2025 10:06:28 +0100 Subject: [PATCH 022/117] single line zone --- .../concepts/enterprise-readiness/filters.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index ff914220..55f53937 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -25,7 +25,15 @@ There are three types of filters: - Access to information about the function being executed and its arguments - Handling of exceptions during function execution - Overriding of the function result, either before (for instance for caching scenario's) or after execution (for instance for responsible AI scenarios) - - Retrying of the function in case of failure ::: zone pivot="programming-language-csharp (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/RetryWithFilters.cs)) ::: zone-end +::: zone pivot="programming-language-csharp" + - Retrying of the function in case of failure (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/RetryWithFilters.cs)) +::: zone-end +::: zone pivot="programming-language-python" + - Retrying of the function in case of failure +::: zone-end +::: zone pivot="programming-language-java" + - Retrying of the function in case of failure +::: zone-end - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: - Viewing and modifying the prompt that will be sent to the AI (e.g., for RAG or [PII redaction](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/PIIDetection.cs)) From 9d59fc2788085f8582c3b7d49aa21ba88f3a92f5 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 18 Feb 2025 11:00:09 +0100 Subject: [PATCH 023/117] small text updates --- .../concepts/enterprise-readiness/filters.md | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 55f53937..f7434e14 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -25,15 +25,7 @@ There are three types of filters: - Access to information about the function being executed and its arguments - Handling of exceptions during function execution - Overriding of the function result, either before (for instance for caching scenario's) or after execution (for instance for responsible AI scenarios) -::: zone pivot="programming-language-csharp" - Retrying of the function in case of failure (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/RetryWithFilters.cs)) -::: zone-end -::: zone pivot="programming-language-python" - - Retrying of the function in case of failure -::: zone-end -::: zone pivot="programming-language-java" - - Retrying of the function in case of failure -::: zone-end - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: - Viewing and modifying the prompt that will be sent to the AI (e.g., for RAG or [PII redaction](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/PIIDetection.cs)) @@ -140,9 +132,9 @@ async def logger_filter(context: FunctionInvocationContext, next: Callable[[Func ### Streaming invocation -Functions in Semantic Kernel can be invoked in two ways: streaming and non-streaming. In streaming mode, a function typically returns `AsyncGenerator`, while in non-streaming mode, it returns `FunctionResult`. This distinction affects how results can be overridden in the filter: in streaming mode, the new function result value must be of type `AsyncGenerator`, whereas in non-streaming mode, it can simply be of type `T`. +Functions in Semantic Kernel can be invoked in two ways: streaming and non-streaming. In streaming mode, a function typically returns a `AsyncGenerator[T]` object where `T` is a kind of streaming content type, while in non-streaming mode, it returns `FunctionResult`. This distinction affects how results can be overridden in the filter: in streaming mode, the new function result value must also be of type `AsyncGenerator[T]`. -So to build a simple logger filter for streaming, you would use something like this: +So to build a simple logger filter for a streaming function invocation, you would use something like this: ```python @kernel.filter(FilterTypes.FUNCTION_INVOCATION) @@ -391,7 +383,7 @@ When using dependency injection, the order of filters is not guaranteed. If the ::: zone-end ::: zone pivot="programming-language-python" -Filters are executed according to the order in which they are added to the `Kernel` object, which is equivalent between using `add_filter` and the `@kernel.filter` decorator. The order of filters can be important and should be understood well. +Filters are executed in the order they are added to the Kernel object -- whether through `add_filter` or the `@kernel.filter` decorator. Because execution order can affect behavior, it's important to manage filter order carefully. Consider the following example: @@ -413,7 +405,7 @@ async def filter2(context: FunctionInvocationContext, next): print('after filter 2') ``` -When executed the function, the output will be: +When executing the function, the output will be: ```python before filter 1 From 40430831c0d9193a5a6b49739c4a3a296c2ea7a8 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 19 Feb 2025 11:49:04 +0100 Subject: [PATCH 024/117] added new sample links --- .../concepts/enterprise-readiness/filters.md | 97 +++++++++++++------ 1 file changed, 65 insertions(+), 32 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index f7434e14..5f541620 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -20,7 +20,7 @@ A good example of filters is provided [here](https://devblogs.microsoft.com/sema ![Semantic Kernel Filters](../../media/WhatAreFilters.png) There are three types of filters: - +::: zone pivot="programming-language-csharp" - **Function Invocation Filter** - this filter is executed each time a `KernelFunction` is invoked. It allows: - Access to information about the function being executed and its arguments - Handling of exceptions during function execution @@ -30,6 +30,29 @@ There are three types of filters: - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: - Viewing and modifying the prompt that will be sent to the AI (e.g., for RAG or [PII redaction](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Filtering/PIIDetection.cs)) - Preventing prompt submission to the AI by overriding the function result (e.g., for [Semantic Caching](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Caching/SemanticCachingWithFilters.cs)) +::: zone-end +::: zone pivot="programming-language-python" +- **Function Invocation Filter** - this filter is executed each time a `KernelFunction` is invoked. It allows: + - Access to information about the function being executed and its arguments + - Handling of exceptions during function execution + - Overriding of the function result, either before (for instance for caching scenario's) or after execution (for instance for responsible AI scenarios) + - Retrying of the function in case of failure (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/retry_with_different_model.py)) + +- **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: +- Viewing and modifying the prompt that will be sent to the AI +- Preventing prompt submission to the AI by overriding the function result (e.g., for [Semantic Caching](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/caching/semantic_caching.py)) +::: zone-end +::: zone pivot="programming-language-java" +- **Function Invocation Filter** - this filter is executed each time a `KernelFunction` is invoked. It allows: + - Access to information about the function being executed and its arguments + - Handling of exceptions during function execution + - Overriding of the function result, either before (for instance for caching scenario's) or after execution (for instance for responsible AI scenarios) + - Retrying of the function in case of failure + +- **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: +- Viewing and modifying the prompt that will be sent to the AI +- Preventing prompt submission to the AI by overriding the function result +::: zone-end - **Auto Function Invocation Filter** - similar to the function invocation filter, this filter operates within the scope of `automatic function calling`, providing additional context, including chat history, a list of all functions to be executed, and iteration counters. It also allows termination of the auto function calling process (e.g., if a desired result is obtained from the second of three planned functions). @@ -129,37 +152,8 @@ async def logger_filter(context: FunctionInvocationContext, next: Callable[[Func logger.info(f"FunctionInvoked - {context.function.plugin_name}.{context.function.name}") ``` - -### Streaming invocation - -Functions in Semantic Kernel can be invoked in two ways: streaming and non-streaming. In streaming mode, a function typically returns a `AsyncGenerator[T]` object where `T` is a kind of streaming content type, while in non-streaming mode, it returns `FunctionResult`. This distinction affects how results can be overridden in the filter: in streaming mode, the new function result value must also be of type `AsyncGenerator[T]`. - -So to build a simple logger filter for a streaming function invocation, you would use something like this: - -```python -@kernel.filter(FilterTypes.FUNCTION_INVOCATION) -async def streaming_exception_handling( - context: FunctionInvocationContext, - next: Callable[[FunctionInvocationContext], Coroutine[Any, Any, None]], -): - await next(context) - - async def override_stream(stream): - try: - async for partial in stream: - yield partial - except Exception as e: - yield [ - StreamingChatMessageContent(role=AuthorRole.ASSISTANT, content=f"Exception caught: {e}", choice_index=0) - ] - - stream = context.result.value - context.result = FunctionResult(function=context.result.function, value=override_stream(stream)) -``` - ### Code examples * [Function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/function_invocation_filters.py) -* [Streaming function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/function_invocation_filters_stream.py) ::: zone-end ::: zone pivot="programming-language-java" @@ -310,9 +304,8 @@ async def auto_function_invocation_filter(context: AutoFunctionInvocationContext More info coming soon. ::: zone-end -::: zone pivot="programming-language-csharp" - ## Streaming and non-streaming invocation +::: zone pivot="programming-language-csharp" Functions in Semantic Kernel can be invoked in two ways: streaming and non-streaming. In streaming mode, a function typically returns `IAsyncEnumerable`, while in non-streaming mode, it returns `FunctionResult`. This distinction affects how results can be overridden in the filter: in streaming mode, the new function result value must be of type `IAsyncEnumerable`, whereas in non-streaming mode, it can simply be of type `T`. To determine which result type needs to be returned, the `context.IsStreaming` flag is available in the filter context model. @@ -372,6 +365,44 @@ IChatCompletionService chatCompletionService = kernel.GetRequiredService Date: Wed, 19 Feb 2025 11:50:38 +0100 Subject: [PATCH 025/117] fix indentation --- semantic-kernel/concepts/enterprise-readiness/filters.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 5f541620..15b7e90b 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -39,8 +39,8 @@ There are three types of filters: - Retrying of the function in case of failure (e.g., [switching to an alternative AI model](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/retry_with_different_model.py)) - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: -- Viewing and modifying the prompt that will be sent to the AI -- Preventing prompt submission to the AI by overriding the function result (e.g., for [Semantic Caching](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/caching/semantic_caching.py)) + - Viewing and modifying the prompt that will be sent to the AI + - Preventing prompt submission to the AI by overriding the function result (e.g., for [Semantic Caching](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/caching/semantic_caching.py)) ::: zone-end ::: zone pivot="programming-language-java" - **Function Invocation Filter** - this filter is executed each time a `KernelFunction` is invoked. It allows: @@ -50,8 +50,8 @@ There are three types of filters: - Retrying of the function in case of failure - **Prompt Render Filter** - this filter is triggered before the prompt rendering operation, enabling: -- Viewing and modifying the prompt that will be sent to the AI -- Preventing prompt submission to the AI by overriding the function result + - Viewing and modifying the prompt that will be sent to the AI + - Preventing prompt submission to the AI by overriding the function result ::: zone-end - **Auto Function Invocation Filter** - similar to the function invocation filter, this filter operates within the scope of `automatic function calling`, providing additional context, including chat history, a list of all functions to be executed, and iteration counters. It also allows termination of the auto function calling process (e.g., if a desired result is obtained from the second of three planned functions). From 7d9f07a4106203f81f7fba21100bbc4b11246309 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 19 Feb 2025 11:56:36 +0100 Subject: [PATCH 026/117] polish --- semantic-kernel/concepts/enterprise-readiness/filters.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/enterprise-readiness/filters.md b/semantic-kernel/concepts/enterprise-readiness/filters.md index 15b7e90b..ae38f166 100644 --- a/semantic-kernel/concepts/enterprise-readiness/filters.md +++ b/semantic-kernel/concepts/enterprise-readiness/filters.md @@ -376,7 +376,7 @@ So to build a simple logger filter for a streaming function invocation, you woul @kernel.filter(FilterTypes.FUNCTION_INVOCATION) async def streaming_exception_handling( context: FunctionInvocationContext, - next: Callable[[FunctionInvocationContext], Coroutine[Any, Any, None]], + next: Callable[[FunctionInvocationContext], Awaitable[None]], ): await next(context) if not context.is_streaming: @@ -395,7 +395,7 @@ async def streaming_exception_handling( context.result = FunctionResult(function=context.result.function, value=override_stream(stream)) ``` -Code example: +### Code examples * [Streaming function invocation filter examples](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/filtering/function_invocation_filters_stream.py) ::: zone-end From f0d5d11f745b14d0364f45bd55bcf7a70cfe64af Mon Sep 17 00:00:00 2001 From: Ben Thomas Date: Tue, 25 Feb 2025 08:01:41 -0800 Subject: [PATCH 027/117] Update semantic-kernel/Frameworks/process/examples/example-cycles.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/Frameworks/process/examples/example-cycles.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/process/examples/example-cycles.md b/semantic-kernel/Frameworks/process/examples/example-cycles.md index d46a2424..f9440026 100644 --- a/semantic-kernel/Frameworks/process/examples/example-cycles.md +++ b/semantic-kernel/Frameworks/process/examples/example-cycles.md @@ -108,10 +108,10 @@ class ProofreadStep(KernelProcessStep): print(f"{ProofreadStep.__name__}\n\t Proofreading product documentation...") system_prompt = """ - Your job is to proofread customer facing documentation for a new product from Contoso. You will be provide with + Your job is to proofread customer facing documentation for a new product from Contoso. You will be provided with proposed documentation for a product and you must do the following things: - 1. Determine if the documentation is passes the following criteria: + 1. Determine if the documentation passes the following criteria: 1. Documentation must use a professional tone. 1. Documentation should be free of spelling or grammar mistakes. 1. Documentation should be free of any offensive or inappropriate language. From b754b73e7fb613770e601a26059a3753e874a8cc Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Tue, 25 Feb 2025 09:16:04 -0800 Subject: [PATCH 028/117] Update semantic-kernel/Frameworks/process/examples/example-cycles.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/Frameworks/process/examples/example-cycles.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/process/examples/example-cycles.md b/semantic-kernel/Frameworks/process/examples/example-cycles.md index f9440026..f7e4dba9 100644 --- a/semantic-kernel/Frameworks/process/examples/example-cycles.md +++ b/semantic-kernel/Frameworks/process/examples/example-cycles.md @@ -98,8 +98,7 @@ class ProofreadingResponse(BaseModel): meets_expectations: bool = Field(description="Specifies if the proposed docs meets the standards for publishing.") explanation: str = Field(description="An explanation of why the documentation does or does not meet expectations.") - suggestions: list[str] = Field(description="List of suggestions, empty if there no suggestions for improvement.") - + suggestions: list[str] = Field(description="List of suggestions, empty if there are no suggestions for improvement.") # A process step to proofread documentation class ProofreadStep(KernelProcessStep): From 1db60e286b126a2f070144932e72638c827f63ed Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Tue, 25 Feb 2025 09:16:11 -0800 Subject: [PATCH 029/117] Update semantic-kernel/Frameworks/process/examples/example-first-process.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../Frameworks/process/examples/example-first-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/process/examples/example-first-process.md b/semantic-kernel/Frameworks/process/examples/example-first-process.md index 447fa929..88d30f2f 100644 --- a/semantic-kernel/Frameworks/process/examples/example-first-process.md +++ b/semantic-kernel/Frameworks/process/examples/example-first-process.md @@ -208,7 +208,7 @@ class GenerateDocumentationStep(KernelProcessStep[GeneratedDocumentationState]): system_prompt: ClassVar[str] = """ Your job is to write high quality and engaging customer facing documentation for a new product from Contoso. You will -be provide with information about the product in the form of internal documentation, specs, and troubleshooting guides +be provided with information about the product in the form of internal documentation, specs, and troubleshooting guides and you must use this information and nothing else to generate the documentation. If suggestions are provided on the documentation you create, take the suggestions into account and rewrite the documentation. Make sure the product sounds amazing. From 50864c0c7cf5838ea62acda5be19b86ec967d405 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Tue, 25 Feb 2025 09:16:22 -0800 Subject: [PATCH 030/117] Update semantic-kernel/Frameworks/process/examples/example-cycles.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/Frameworks/process/examples/example-cycles.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/process/examples/example-cycles.md b/semantic-kernel/Frameworks/process/examples/example-cycles.md index f7e4dba9..78248e01 100644 --- a/semantic-kernel/Frameworks/process/examples/example-cycles.md +++ b/semantic-kernel/Frameworks/process/examples/example-cycles.md @@ -228,7 +228,7 @@ class GenerateDocumentationStep(KernelProcessStep[GeneratedDocumentationState]): system_prompt: ClassVar[str] = """ Your job is to write high quality and engaging customer facing documentation for a new product from Contoso. You will -be provide with information about the product in the form of internal documentation, specs, and troubleshooting guides +be provided with information about the product in the form of internal documentation, specs, and troubleshooting guides and you must use this information and nothing else to generate the documentation. If suggestions are provided on the documentation you create, take the suggestions into account and rewrite the documentation. Make sure the product sounds amazing. From a97ebdbdb77b0e58b45374adfd2c3397f52e956b Mon Sep 17 00:00:00 2001 From: Eric Urban Date: Wed, 26 Feb 2025 02:27:05 -0800 Subject: [PATCH 031/117] OpenAI not Open AI (#468) --- semantic-kernel/Frameworks/agent/TOC.yml | 2 +- .../Frameworks/agent/agent-architecture.md | 6 ++--- .../Frameworks/agent/agent-chat.md | 6 ++--- .../Frameworks/agent/agent-functions.md | 4 +-- .../Frameworks/agent/agent-streaming.md | 12 ++++----- .../Frameworks/agent/agent-templates.md | 4 +-- .../Frameworks/agent/assistant-agent.md | 26 +++++++++---------- .../Frameworks/agent/chat-completion-agent.md | 4 +-- .../examples/example-agent-collaboration.md | 6 ++--- .../agent/examples/example-assistant-code.md | 18 ++++++------- .../examples/example-assistant-search.md | 16 ++++++------ .../agent/examples/example-chat-agent.md | 8 +++--- semantic-kernel/Frameworks/agent/index.md | 4 +-- .../ai-services/embedding-generation/index.md | 6 ++--- .../embedding-generation.md | 4 +-- .../how-to/vector-store-data-ingestion.md | 2 +- 16 files changed, 64 insertions(+), 64 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/TOC.yml b/semantic-kernel/Frameworks/agent/TOC.yml index 8c88375f..c85787ea 100644 --- a/semantic-kernel/Frameworks/agent/TOC.yml +++ b/semantic-kernel/Frameworks/agent/TOC.yml @@ -4,7 +4,7 @@ href: agent-architecture.md - name: Chat Completion Agent href: chat-completion-agent.md -- name: Open AI Assistant Agent +- name: OpenAI Assistant Agent href: assistant-agent.md - name: Agent Collaboration href: agent-chat.md diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index e44f73d5..c3966fa8 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -28,7 +28,7 @@ The _Agent Framework_ was developed with the following key priorities in mind: ## Agent -The abstract _Agent_ class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [_Kernel_](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [_Chat Completion Agent_](./chat-completion-agent.md) and the [_Open AI Assistant Agent_](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. +The abstract _Agent_ class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [_Kernel_](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [_Chat Completion Agent_](./chat-completion-agent.md) and the [_OpenAI Assistant Agent_](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. ::: zone pivot="programming-language-csharp" @@ -103,7 +103,7 @@ The _Agent Framework_ is built on the foundational concepts and features that ma At the heart of the _Semantic Kernel_ ecosystem is the [_Kernel_](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The _Kernel_ acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. -The [_Chat Completion Agent_](./chat-completion-agent.md) and [_Open AI Assistant Agent_](./assistant-agent.md) articles provide specific details on how to create each type of agent. +The [_Chat Completion Agent_](./chat-completion-agent.md) and [_OpenAI Assistant Agent_](./assistant-agent.md) articles provide specific details on how to create each type of agent. These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. #### Related API's: @@ -169,7 +169,7 @@ Plugins are a fundamental aspect of the _Semantic Kernel_, enabling developers t Agent messaging, including both input and response, is built upon the core content types of the _Semantic Kernel_, providing a unified structure for communication. This design choice simplifies the process of transitioning from traditional chat-completion patterns to more advanced agent-driven patterns in your application development. By leveraging familiar _Semantic Kernel_ content types, developers can seamlessly integrate agent capabilities into their applications without needing to overhaul existing systems. This streamlining ensures that as you evolve from basic conversational AI to more autonomous, task-oriented agents, the underlying framework remains consistent, making development faster and more efficient. -> Note: The [_Open AI Assistant Agent_`_](./assistant-agent.md) introduced content types specific to its usage for _File References_ and _Content Annotation_: +> Note: The [_OpenAI Assistant Agent_`_](./assistant-agent.md) introduced content types specific to its usage for _File References_ and _Content Annotation_: #### Related API's: diff --git a/semantic-kernel/Frameworks/agent/agent-chat.md b/semantic-kernel/Frameworks/agent/agent-chat.md index 90771da6..bf59e765 100644 --- a/semantic-kernel/Frameworks/agent/agent-chat.md +++ b/semantic-kernel/Frameworks/agent/agent-chat.md @@ -38,7 +38,7 @@ Detailed API documentation related to this discussion is available at: ## What is _Agent Chat_? -_Agent Chat_ provides a framework that enables interaction between multiple agents, even if they are of different types. This makes it possible for a [_Chat Completion Agent_](./chat-completion-agent.md) and an [_Open AI Assistant Agent_](./assistant-agent.md) to work together within the same conversation. _Agent Chat_ also defines entry points for initiating collaboration between agents, whether through multiple responses or a single agent response. +_Agent Chat_ provides a framework that enables interaction between multiple agents, even if they are of different types. This makes it possible for a [_Chat Completion Agent_](./chat-completion-agent.md) and an [_OpenAI Assistant Agent_](./assistant-agent.md) to work together within the same conversation. _Agent Chat_ also defines entry points for initiating collaboration between agents, whether through multiple responses or a single agent response. As an abstract class, _Agent Chat_ can be subclassed to support custom scenarios. @@ -280,7 +280,7 @@ history = await chat.get_chat_messages() ::: zone-end -Since different agent types or configurations may maintain their own version of the conversation history, agent specific history is also available by specifing an agent. (For example: [_Open AI Assistant_](./assistant-agent.md) versus [_Chat Completion Agent_](./chat-completion-agent.md).) +Since different agent types or configurations may maintain their own version of the conversation history, agent specific history is also available by specifing an agent. (For example: [_OpenAI Assistant_](./assistant-agent.md) versus [_Chat Completion Agent_](./chat-completion-agent.md).) ::: zone pivot="programming-language-csharp" ```csharp @@ -636,7 +636,7 @@ if chat.is_complete: ### Clear Full Conversation State -When done using an _Agent Chat_ where an [_Open AI Assistant_](./assistant-agent.md) participated, it may be necessary to delete the remote _thread_ associated with the _assistant_. _Agent Chat_ supports resetting or clearing the entire conversation state, which includes deleting any remote _thread_ definition. This ensures that no residual conversation data remains linked to the assistant once the chat concludes. +When done using an _Agent Chat_ where an [_OpenAI Assistant_](./assistant-agent.md) participated, it may be necessary to delete the remote _thread_ associated with the _assistant_. _Agent Chat_ supports resetting or clearing the entire conversation state, which includes deleting any remote _thread_ definition. This ensures that no residual conversation data remains linked to the assistant once the chat concludes. A full reset does not remove the _agents_ that had joined the _Agent Chat_ and leaves the _Agent Chat_ in a state where it can be reused. This allows for the continuation of interactions with the same agents without needing to reinitialize them, making future conversations more efficient. diff --git a/semantic-kernel/Frameworks/agent/agent-functions.md b/semantic-kernel/Frameworks/agent/agent-functions.md index a367e793..3ada7e4f 100644 --- a/semantic-kernel/Frameworks/agent/agent-functions.md +++ b/semantic-kernel/Frameworks/agent/agent-functions.md @@ -50,7 +50,7 @@ Any [Plugin](../../concepts/plugins/index.md) available to an _Agent_ is managed [Plugins](../../concepts/plugins/index.md) can be added to the _Kernel_ either before or after the _Agent_ is created. The process of initializing [Plugins](../../concepts/plugins/index.md) follows the same patterns used for any _Semantic Kernel_ implementation, allowing for consistency and ease of use in managing AI capabilities. -> Note: For a [_Chat Completion Agent_](./chat-completion-agent.md), the function calling mode must be explicitly enabled. [_Open AI Assistant_](./assistant-agent.md) agent is always based on automatic function calling. +> Note: For a [_Chat Completion Agent_](./chat-completion-agent.md), the function calling mode must be explicitly enabled. [_OpenAI Assistant_](./assistant-agent.md) agent is always based on automatic function calling. ::: zone pivot="programming-language-csharp" ```csharp @@ -198,7 +198,7 @@ agent = ChatCompletionAgent( ## Limitations for Agent Function Calling -When directly invoking a[_Chat Completion Agent_](./chat-completion-agent.md), all _Function Choice Behaviors_ are supported. However, when using an [_Open AI Assistant_](./assistant-agent.md) or [_Agent Chat_](./agent-chat.md), only _Automatic_ [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is currently available. +When directly invoking a[_Chat Completion Agent_](./chat-completion-agent.md), all _Function Choice Behaviors_ are supported. However, when using an [_OpenAI Assistant_](./assistant-agent.md) or [_Agent Chat_](./agent-chat.md), only _Automatic_ [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is currently available. ## How-To diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index befc0004..eac08b71 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -19,9 +19,9 @@ A streamed response delivers the message content in small, incremental chunks. T #### Streaming References: -- [Open AI Streaming Guide](https://platform.openai.com/docs/api-reference/streaming) -- [Open AI Chat Completion Streaming](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream) -- [Open AI Assistant Streaming](https://platform.openai.com/docs/api-reference/assistants-streaming) +- [OpenAI Streaming Guide](https://platform.openai.com/docs/api-reference/streaming) +- [OpenAI Chat Completion Streaming](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream) +- [OpenAI Assistant Streaming](https://platform.openai.com/docs/api-reference/assistants-streaming) - [Azure OpenAI Service REST API](/azure/ai-services/openai/reference) @@ -54,7 +54,7 @@ A streamed response delivers the message content in small, incremental chunks. T ## Streaming Agent Invocation -The _Agent Framework_ supports _streamed_ responses when using [_Agent Chat_](./agent-chat.md) or when directly invoking a [_Chat Completion Agent_](./chat-completion-agent.md) or [_Open AI Assistant Agent_](./assistant-agent.md). In either mode, the framework delivers responses asynchronously as they are streamed. Alongside the streamed response, a consistent, non-streamed history is maintained to track the conversation. This ensures both real-time interaction and a reliable record of the conversation's flow. +The _Agent Framework_ supports _streamed_ responses when using [_Agent Chat_](./agent-chat.md) or when directly invoking a [_Chat Completion Agent_](./chat-completion-agent.md) or [_OpenAI Assistant Agent_](./assistant-agent.md). In either mode, the framework delivers responses asynchronously as they are streamed. Alongside the streamed response, a consistent, non-streamed history is maintained to track the conversation. This ensures both real-time interaction and a reliable record of the conversation's flow. ### Streamed response from _Chat Completion Agent_ @@ -104,9 +104,9 @@ async for response in agent.invoke_stream(chat) ::: zone-end -### Streamed response from _Open AI Assistant Agent_ +### Streamed response from _OpenAI Assistant Agent_ -When invoking a streamed response from an [_Open AI Assistant Agent_](./assistant-agent.md), an optional _Chat History_ can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. +When invoking a streamed response from an [_OpenAI Assistant Agent_](./assistant-agent.md), an optional _Chat History_ can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. ::: zone pivot="programming-language-csharp" ```csharp diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index 36d0ad38..e972ea8a 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -93,9 +93,9 @@ agent = ChatCompletionAgent( ::: zone-end -#### Open AI Assistant Agent +#### OpenAI Assistant Agent -Templated instructions are especially powerful when working with an [_Open AI Assistant Agent_](./assistant-agent.md). With this approach, a single assistant definition can be created and reused multiple times, each time with different parameter values tailored to specific tasks or contexts. This enables a more efficient setup, allowing the same assistant framework to handle a wide range of scenarios while maintaining consistency in its core behavior. +Templated instructions are especially powerful when working with an [_OpenAI Assistant Agent_](./assistant-agent.md). With this approach, a single assistant definition can be created and reused multiple times, each time with different parameter values tailored to specific tasks or contexts. This enables a more efficient setup, allowing the same assistant framework to handle a wide range of scenarios while maintaining consistency in its core behavior. ::: zone pivot="programming-language-csharp" ```csharp diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index bd63e69e..e4d36c81 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -1,5 +1,5 @@ --- -title: Exploring the Semantic Kernel Open AI Assistant Agent (Experimental) +title: Exploring the Semantic Kernel OpenAI Assistant Agent (Experimental) description: An exploration of the definition, behaviors, and usage patterns for a `OpenAIAssistantAgent` zone_pivot_groups: programming-languages author: crickman @@ -8,7 +8,7 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring the _Semantic Kernel_ _Open AI Assistant Agent_ +# Exploring the _Semantic Kernel_ _OpenAI Assistant Agent_ > [!WARNING] > The *Semantic Kernel Agent Framework* is in preview and is subject to change. @@ -41,14 +41,14 @@ Detailed API documentation related to this discussion is available at: The _OpenAI Assistant API_ is a specialized interface designed for more advanced and interactive AI capabilities, enabling developers to create personalized and multi-step task-oriented agents. Unlike the Chat Completion API, which focuses on simple conversational exchanges, the Assistant API allows for dynamic, goal-driven interactions with additional features like code-interpreter and file-search. -- [Open AI Assistant Guide](https://platform.openai.com/docs/assistants) -- [Open AI Assistant API](https://platform.openai.com/docs/api-reference/assistants) +- [OpenAI Assistant Guide](https://platform.openai.com/docs/assistants) +- [OpenAI Assistant API](https://platform.openai.com/docs/api-reference/assistants) - [Assistant API in Azure](/azure/ai-services/openai/assistants-quickstart) -## Creating an _Open AI Assistant Agent_ +## Creating an _OpenAI Assistant Agent_ -Creating an _Open AI Assistant_ requires invoking a remote service, which is handled asynchronously. To manage this, the _Open AI Assistant Agent_ is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. +Creating an _OpenAI Assistant_ requires invoking a remote service, which is handled asynchronously. To manage this, the _OpenAI Assistant Agent_ is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. ::: zone pivot="programming-language-csharp" ```csharp @@ -91,9 +91,9 @@ openai_agent = await OpenAIAssistantAgent.create( ::: zone-end -## Retrieving an _Open AI Assistant Agent_ +## Retrieving an _OpenAI Assistant Agent_ -Once created, the identifier of the assistant may be access via its identifier. This identifier may be used to create an _Open AI Assistant Agent_ from an existing assistant definition. +Once created, the identifier of the assistant may be access via its identifier. This identifier may be used to create an _OpenAI Assistant Agent_ from an existing assistant definition. ::: zone pivot="programming-language-csharp" @@ -125,7 +125,7 @@ agent = await AzureAssistantAgent.retrieve(id=agent_id, kernel=kernel) ::: zone-end -## Using an _Open AI Assistant Agent_ +## Using an _OpenAI Assistant Agent_ As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your _OpenAI Assistant Agent_ are tied to this specific thread identifier which must be specified when calling the agent/ @@ -181,7 +181,7 @@ await agent.delete_thread(thread_id) ::: zone-end -## Deleting an _Open AI Assistant Agent_ +## Deleting an _OpenAI Assistant Agent_ Since the assistant's definition is stored remotely, it supports the capability to self-delete. This enables the agent to be removed from the system when it is no longer needed. @@ -217,10 +217,10 @@ is_deleted = agent._is_deleted ## How-To -For an end-to-end example for a _Open AI Assistant Agent_, see: +For an end-to-end example for a _OpenAI Assistant Agent_, see: -- [How-To: _Open AI Assistant Agent_ Code Interpreter](./examples/example-assistant-code.md) -- [How-To: _Open AI Assistant Agent_ File Search](./examples/example-assistant-search.md) +- [How-To: _OpenAI Assistant Agent_ Code Interpreter](./examples/example-assistant-code.md) +- [How-To: _OpenAI Assistant Agent_ File Search](./examples/example-assistant-search.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 6c1c115a..b3e7a40d 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -51,7 +51,7 @@ For .NET, some of AI services that support models with chat-completion include: Model|_Semantic Kernel_ AI Service --|-- -Azure Open AI|[`Microsoft.SemanticKernel.Connectors.AzureOpenAI`](/dotnet/api/microsoft.semantickernel.connectors.azureopenai) +Azure OpenAI|[`Microsoft.SemanticKernel.Connectors.AzureOpenAI`](/dotnet/api/microsoft.semantickernel.connectors.azureopenai) Gemini|[`Microsoft.SemanticKernel.Connectors.Google`](/dotnet/api/microsoft.semantickernel.connectors.google) HuggingFace|[`Microsoft.SemanticKernel.Connectors.HuggingFace`](/dotnet/api/microsoft.semantickernel.connectors.huggingface) Mistral|[`Microsoft.SemanticKernel.Connectors.MistralAI`](/dotnet/api/microsoft.semantickernel.connectors.mistralai) @@ -234,4 +234,4 @@ For an end-to-end example for a _Chat Completion Agent_, see: > [!div class="nextstepaction"] -> [Exploring _Open AI Assistant Agent_](./assistant-agent.md) +> [Exploring _OpenAI Assistant Agent_](./assistant-agent.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index 87fe4167..fc11bf27 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -111,16 +111,16 @@ from semantic_kernel.kernel import Kernel ## Configuration -This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _Open AI_ or _Azure Open AI_. +This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. ::: zone pivot="programming-language-csharp" ```powershell -# Open AI +# OpenAI dotnet user-secrets set "OpenAISettings:ApiKey" "" dotnet user-secrets set "OpenAISettings:ChatModel" "gpt-4o" -# Azure Open AI +# Azure OpenAI dotnet user-secrets set "AzureOpenAISettings:ApiKey" "" # Not required if using token-credential dotnet user-secrets set "AzureOpenAISettings:Endpoint" "" dotnet user-secrets set "AzureOpenAISettings:ChatModelDeployment" "gpt-4o" diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index c9b79287..f8fab56a 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -1,6 +1,6 @@ --- -title: How-To: _Open AI Assistant Agent_ Code Interpreter (Experimental) -description: A step-by-step walk-through of defining and utilizing the code-interpreter tool of an Open AI Assistant Agent. +title: How-To: _OpenAI Assistant Agent_ Code Interpreter (Experimental) +description: A step-by-step walk-through of defining and utilizing the code-interpreter tool of an OpenAI Assistant Agent. zone_pivot_groups: programming-languages author: crickman ms.topic: tutorial @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _Open AI Assistant Agent_ Code Interpreter +# How-To: _OpenAI Assistant Agent_ Code Interpreter > [!WARNING] > The *Semantic Kernel Agent Framework* is in preview and is subject to change. ## Overview -In this sample, we will explore how to use the _code-interpreter_ tool of an [_Open AI Assistant Agent_](../assistant-agent.md) to complete data-analysis tasks. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will generate both image and text responses. This will demonstrate the versatility of this tool in performing quantitative analysis. +In this sample, we will explore how to use the _code-interpreter_ tool of an [_OpenAI Assistant Agent_](../assistant-agent.md) to complete data-analysis tasks. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will generate both image and text responses. This will demonstrate the versatility of this tool in performing quantitative analysis. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -104,16 +104,16 @@ Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` da ## Configuration -This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _Open AI_ or _Azure Open AI_. +This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. ::: zone pivot="programming-language-csharp" ```powershell -# Open AI +# OpenAI dotnet user-secrets set "OpenAISettings:ApiKey" "" dotnet user-secrets set "OpenAISettings:ChatModel" "gpt-4o" -# Azure Open AI +# Azure OpenAI dotnet user-secrets set "AzureOpenAISettings:ApiKey" "" # Not required if using token-credential dotnet user-secrets set "AzureOpenAISettings:Endpoint" "" dotnet user-secrets set "AzureOpenAISettings:ChatModelDeployment" "gpt-4o" @@ -205,7 +205,7 @@ The full example code is provided in the [Final](#final) section. Refer to that ### Setup -Prior to creating an _Open AI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. +Prior to creating an _OpenAI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. ::: zone pivot="programming-language-csharp" @@ -885,5 +885,5 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How-To: _Open AI Assistant Agent_ Code File Search](./example-assistant-search.md) +> [How-To: _OpenAI Assistant Agent_ Code File Search](./example-assistant-search.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index f38a28d4..e05c069c 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -1,6 +1,6 @@ --- -title: How-To: _Open AI Assistant Agent_ File Search (Experimental) -description: A step-by-step walk-through of defining and utilizing the file-search tool of an Open AI Assistant Agent. +title: How-To: _OpenAI Assistant Agent_ File Search (Experimental) +description: A step-by-step walk-through of defining and utilizing the file-search tool of an OpenAI Assistant Agent. zone_pivot_groups: programming-languages author: crickman ms.topic: tutorial @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _Open AI Assistant Agent_ File Search +# How-To: _OpenAI Assistant Agent_ File Search > [!WARNING] > The *Semantic Kernel Agent Framework* is in preview and is subject to change. ## Overview -In this sample, we will explore how to use the _file-search_ tool of an [_Open AI Assistant Agent_](../assistant-agent.md) to complete comprehension tasks. The approach will be step-by-step, ensuring clarity and precision throughout the process. As part of the task, the agent will provide document citations within the response. +In this sample, we will explore how to use the _file-search_ tool of an [_OpenAI Assistant Agent_](../assistant-agent.md) to complete comprehension tasks. The approach will be step-by-step, ensuring clarity and precision throughout the process. As part of the task, the agent will provide document citations within the response. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -106,16 +106,16 @@ Additionally, copy the `Grimms-The-King-of-the-Golden-Mountain.txt`, `Grimms-The ## Configuration -This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _Open AI_ or _Azure Open AI_. +This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. ::: zone pivot="programming-language-csharp" ```powershell -# Open AI +# OpenAI dotnet user-secrets set "OpenAISettings:ApiKey" "" dotnet user-secrets set "OpenAISettings:ChatModel" "gpt-4o" -# Azure Open AI +# Azure OpenAI dotnet user-secrets set "AzureOpenAISettings:ApiKey" "" # Not required if using token-credential dotnet user-secrets set "AzureOpenAISettings:Endpoint" "https://lightspeed-team-shared-openai-eastus.openai.azure.com/" dotnet user-secrets set "AzureOpenAISettings:ChatModelDeployment" "gpt-4o" @@ -208,7 +208,7 @@ The full example code is provided in the [Final](#final) section. Refer to that ### Setup -Prior to creating an _Open AI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. +Prior to creating an _OpenAI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. ::: zone pivot="programming-language-csharp" diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index 589526ef..ced3bb01 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -103,18 +103,18 @@ Additionally, copy the GitHub plug-in and models (`github.py`) from [_Semantic K ## Configuration -This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _Open AI_ or _Azure Open AI_ and also for _GitHub_. +This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_ and also for _GitHub_. > Note: For information on GitHub _Personal Access Tokens_, see: [Managing your personal access tokens](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens). ::: zone pivot="programming-language-csharp" ```powershell -# Open AI +# OpenAI dotnet user-secrets set "OpenAISettings:ApiKey" "" dotnet user-secrets set "OpenAISettings:ChatModel" "gpt-4o" -# Azure Open AI +# Azure OpenAI dotnet user-secrets set "AzureOpenAISettings:ApiKey" "" # Not required if using token-credential dotnet user-secrets set "AzureOpenAISettings:Endpoint" "" dotnet user-secrets set "AzureOpenAISettings:ChatModelDeployment" "gpt-4o" @@ -682,6 +682,6 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How-To: _Open AI Assistant Agent_ Code Interpreter](./example-assistant-code.md) +> [How-To: _OpenAI Assistant Agent_ Code Interpreter](./example-assistant-code.md) diff --git a/semantic-kernel/Frameworks/agent/index.md b/semantic-kernel/Frameworks/agent/index.md index 978b0dce..7d8299ea 100644 --- a/semantic-kernel/Frameworks/agent/index.md +++ b/semantic-kernel/Frameworks/agent/index.md @@ -66,7 +66,7 @@ Package|Description [Microsoft.SemanticKernel](https://www.nuget.org/packages/Microsoft.SemanticKernel)|This contains the core _Semantic Kernel_ libraries for getting started with the _Agent Framework_. This must be explicitly referenced by your application. [Microsoft.SemanticKernel.Agents.Abstractions](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Abstractions)|Defines the core agent abstractions for the _Agent Framework_. Generally not required to be specified as it is included in both the `Microsoft.SemanticKernel.Agents.Core` and `Microsoft.SemanticKernel.Agents.OpenAI` packages. [Microsoft.SemanticKernel.Agents.Core](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Core)|Includes the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes. -[Microsoft.SemanticKernel.Agents.OpenAI](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.OpenAI)|Provides ability to use the [Open AI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent`](./assistant-agent.md). +[Microsoft.SemanticKernel.Agents.OpenAI](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.OpenAI)|Provides ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent`](./assistant-agent.md). ::: zone-end @@ -74,7 +74,7 @@ Package|Description Module|Description --|-- -[semantic-kernel.agents](https://pypi.org/project/semantic-kernel/)|This is the _Semantic Kernel_ library for getting started with the _Agent Framework_. This must be explicitly referenced by your application. This module contains the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes, as well as the ability to use the [Open AI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent` or `AzureOpenAssistant`](./assistant-agent.md). +[semantic-kernel.agents](https://pypi.org/project/semantic-kernel/)|This is the _Semantic Kernel_ library for getting started with the _Agent Framework_. This must be explicitly referenced by your application. This module contains the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes, as well as the ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent` or `AzureOpenAssistant`](./assistant-agent.md). ::: zone-end diff --git a/semantic-kernel/concepts/ai-services/embedding-generation/index.md b/semantic-kernel/concepts/ai-services/embedding-generation/index.md index f2f4aa7c..773ab713 100644 --- a/semantic-kernel/concepts/ai-services/embedding-generation/index.md +++ b/semantic-kernel/concepts/ai-services/embedding-generation/index.md @@ -154,7 +154,7 @@ using Microsoft.SemanticKernel; IKernelBuilder kernelBuilder = Kernel.CreateBuilder(); kernelBuilder.AddAzureOpenAITextEmbeddingGeneration( deploymentName: "NAME_OF_YOUR_DEPLOYMENT", // Name of deployment, e.g. "text-embedding-ada-002". - endpoint: "YOUR_AZURE_ENDPOINT", // Name of Azure Open AI service endpoint, e.g. https://myaiservice.openai.azure.com. + endpoint: "YOUR_AZURE_ENDPOINT", // Name of Azure OpenAI service endpoint, e.g. https://myaiservice.openai.azure.com. apiKey: "YOUR_API_KEY", modelId: "MODEL_ID", // Optional name of the underlying model if the deployment name doesn't match the model name, e.g. text-embedding-ada-002. serviceId: "YOUR_SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel. @@ -301,7 +301,7 @@ var builder = Host.CreateApplicationBuilder(args); #pragma warning disable SKEXP0010 builder.Services.AddAzureOpenAITextEmbeddingGeneration( deploymentName: "NAME_OF_YOUR_DEPLOYMENT", // Name of deployment, e.g. "text-embedding-ada-002". - endpoint: "YOUR_AZURE_ENDPOINT", // Name of Azure Open AI service endpoint, e.g. https://myaiservice.openai.azure.com. + endpoint: "YOUR_AZURE_ENDPOINT", // Name of Azure OpenAI service endpoint, e.g. https://myaiservice.openai.azure.com. apiKey: "YOUR_API_KEY", modelId: "MODEL_ID", // Optional name of the underlying model if the deployment name doesn't match the model name, e.g. text-embedding-ada-002. serviceId: "YOUR_SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel. @@ -468,7 +468,7 @@ using Microsoft.SemanticKernel.Connectors.AzureOpenAI; #pragma warning disable SKEXP0010 AzureOpenAITextEmbeddingGenerationService textEmbeddingGenerationService = new ( deploymentName: "NAME_OF_YOUR_DEPLOYMENT", // Name of deployment, e.g. "text-embedding-ada-002". - endpoint: "YOUR_AZURE_ENDPOINT", // Name of Azure Open AI service endpoint, e.g. https://myaiservice.openai.azure.com. + endpoint: "YOUR_AZURE_ENDPOINT", // Name of Azure OpenAI service endpoint, e.g. https://myaiservice.openai.azure.com. apiKey: "YOUR_API_KEY", modelId: "MODEL_ID", // Optional name of the underlying model if the deployment name doesn't match the model name, e.g. text-embedding-ada-002. httpClient: new HttpClient(), // Optional; if not provided, the HttpClient from the kernel will be used. diff --git a/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md b/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md index a1765df1..736d5d59 100644 --- a/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md +++ b/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md @@ -26,7 +26,7 @@ You can construct instances of the text embedding services provided by Semantic They all implement the `ITextEmbeddingGenerationService` interface. ```csharp -// Constructing an Azure Open AI embedding generation service directly. +// Constructing an Azure OpenAI embedding generation service directly. ITextEmbeddingGenerationService azureOpenAITES = new AzureOpenAITextEmbeddingGenerationService( "text-embedding-ada-002", "https://{myservice}.openai.azure.com/", @@ -113,7 +113,7 @@ public async Task GenerateEmbeddingsAndSearchAsync( ## Embedding dimensions Vector databases typically require you to specify the number of dimensions that each vector has when creating the collection. -Different embedding models typically support generating vectors with different dimension sizes. E.g. Open AI `text-embedding-ada-002` +Different embedding models typically support generating vectors with different dimension sizes. E.g. OpenAI `text-embedding-ada-002` generates vectors with 1536 dimensions. Some models also allow a developer to choose the number of dimensions they want in the output vector, e.g. Google `text-embedding-004` produces vectors with 768 dimension by default, but allows a developer to choose any number of dimensions between 1 and 768. diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md b/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md index be9d84d3..d7f6e62b 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md @@ -240,7 +240,7 @@ var deploymentName = "text-embedding-ada-002"; var endpoint = "https://sksample.openai.azure.com/"; var apiKey = "your-api-key"; -// Register Azure Open AI text embedding generation service and Redis vector store. +// Register Azure OpenAI text embedding generation service and Redis vector store. var builder = Kernel.CreateBuilder() .AddAzureOpenAITextEmbeddingGeneration(deploymentName, endpoint, apiKey) .AddRedisVectorStore("localhost:6379"); From 20fdca69cd29b62a636e6b490acadf15f0f4ddd6 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Wed, 26 Feb 2025 10:57:59 +0000 Subject: [PATCH 032/117] Fix unsupported distance functions in samples --- semantic-kernel/concepts/vector-store-connectors/index.md | 2 +- .../out-of-the-box-connectors/pinecone-connector.md | 2 +- .../out-of-the-box-connectors/qdrant-connector.md | 2 +- .../out-of-the-box-connectors/redis-connector.md | 4 ++-- .../out-of-the-box-connectors/weaviate-connector.md | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/index.md index 1284ab54..56f362eb 100644 --- a/semantic-kernel/concepts/vector-store-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/index.md @@ -169,7 +169,7 @@ public class Hotel [VectorStoreRecordData(IsFullTextSearchable = true)] public string Description { get; set; } - [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineDistance, IndexKind.Hnsw)] + [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw)] public ReadOnlyMemory? DescriptionEmbedding { get; set; } [VectorStoreRecordData(IsFilterable = true)] diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index ff7fd03a..134a29bc 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -158,7 +158,7 @@ public class Hotel [VectorStoreRecordData(IsFullTextSearchable = true, StoragePropertyName = "hotel_description")] public string Description { get; set; } - [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineDistance, IndexKind.Hnsw)] + [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw)] public ReadOnlyMemory? DescriptionEmbedding { get; set; } } ``` diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md index 1b0934e2..03ee4e4b 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md @@ -138,7 +138,7 @@ public class Hotel [VectorStoreRecordData(IsFullTextSearchable = true, StoragePropertyName = "hotel_description")] public string Description { get; set; } - [VectorStoreRecordVector(4, DistanceFunction.CosineDistance, IndexKind.Hnsw, StoragePropertyName = "hotel_description_embedding")] + [VectorStoreRecordVector(4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw, StoragePropertyName = "hotel_description_embedding")] public ReadOnlyMemory? DescriptionEmbedding { get; set; } } ``` diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md index fbd9f8f1..72d8ef82 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md @@ -340,7 +340,7 @@ public class Hotel [VectorStoreRecordData(IsFullTextSearchable = true)] public string Description { get; set; } - [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineDistance, IndexKind.Hnsw)] + [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw)] public ReadOnlyMemory? DescriptionEmbedding { get; set; } } ``` @@ -375,7 +375,7 @@ public class Hotel [VectorStoreRecordData(IsFullTextSearchable = true, StoragePropertyName = "hotel_description")] public string Description { get; set; } - [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineDistance, IndexKind.Hnsw, StoragePropertyName = "hotel_description_embedding")] + [VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw, StoragePropertyName = "hotel_description_embedding")] public ReadOnlyMemory? DescriptionEmbedding { get; set; } } ``` diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md index 52ea034a..52a0178a 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md @@ -163,7 +163,7 @@ public class Hotel public string Description { get; set; } [JsonPropertyName("HOTEL_DESCRIPTION_EMBEDDING")] - [VectorStoreRecordVector(4, DistanceFunction.EuclideanDistance, IndexKind.QuantizedFlat)] + [VectorStoreRecordVector(4, DistanceFunction.CosineDistance, IndexKind.QuantizedFlat)] public ReadOnlyMemory? DescriptionEmbedding { get; set; } } ``` From d7d91a3c5555b92e2821aaba0d2c8dcb08c0bf96 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Mon, 17 Feb 2025 15:09:44 +0100 Subject: [PATCH 033/117] updated table --- .../out-of-the-box-connectors/index.md | 94 +++++++++---------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 15b30d1d..e95a8f24 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -22,60 +22,60 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma ::: zone pivot="programming-language-csharp" -| Vector Store Connectors | C# | Uses officially supported SDK | Maintainer / Vendor | -|------------------------------------------------------------|:---------------:|:---------------------------------:|:----------------------------------:| -| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Couchbase](./couchbase-connector.md) | ✅ | ✅ | Couchbase | -| [Elasticsearch](./elasticsearch-connector.md) | ✅ | ✅ | Elastic | -| Chroma | Planned | | | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | -| Milvus | Planned | | | -| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Pinecone](./pinecone-connector.md) | ✅ | ❌ | Microsoft Semantic Kernel Project | -| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| Sql Server | Planned | | | -| [SQLite](./sqlite-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | -| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Vector Store Connectors | C# | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------------------------ | :------------------------: | :---------------------------: | :-------------------------------: | +| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Couchbase](./couchbase-connector.md) | ✅ | ✅ | Couchbase | +| [Elasticsearch](./elasticsearch-connector.md) | ✅ | ✅ | Elastic | +| Chroma | Planned | | | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| Milvus | Planned | | | +| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Pinecone](./pinecone-connector.md) | ✅ | ❌ | Microsoft Semantic Kernel Project | +| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Sql Server | Planned | | | +| [SQLite](./sqlite-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | +| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end ::: zone pivot="programming-language-python" -| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | -|------------------------------------------------------------|:---------------:|:----------------------------------:|:----------------------------------:| -| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | -| Chroma | Planned | | | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | -| Milvus | Planned | | | -| [MongoDB](./mongodb-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Pinecone](./pinecone-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Postgres](./postgres-connector.md) | ✅ | | Microsoft Semantic Kernel Project | -| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| Sql Server | Planned | | | -| [SQLite](./sqlite-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | -| [Weaviate](./weaviate-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------------------------ | :------------------------: | :---------------------------: | :-------------------------------: | +| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | +| Chroma | Planned | | | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| Milvus | Planned | | | +| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Pinecone](./pinecone-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | +| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Sql Server | Planned | | | +| [SQLite](./sqlite-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | +| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | +| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end ::: zone pivot="programming-language-java" -| Vector Store Connectors | Java | Uses officially supported SDK | Maintainer / Vendor | -|------------------------------------------------------------|:--------------:|:----------------------------------:|:----------------------------------:| -| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| HSQLDB | Use [JDBC](./jdbc-connector.md) | ✅ | Microsoft Semantic Kernel Project | -| [JDBC](./jdbc-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| MySQL | Use [JDBC](./jdbc-connector.md) | ✅ | Microsoft Semantic Kernel Project | -| Postgres | Use [JDBC](./jdbc-connector.md) | | Microsoft Semantic Kernel Project | -| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| SQLite | Use [JDBC](./jdbc-connector.md) | ✅ | Microsoft Semantic Kernel Project | -| [Volatile (In-Memory)](./volatile-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| Vector Store Connectors | Java | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------- | :-----------------------------: | :---------------------------: | :-------------------------------: | +| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| HSQLDB | Use [JDBC](./jdbc-connector.md) | ✅ | Microsoft Semantic Kernel Project | +| [JDBC](./jdbc-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| MySQL | Use [JDBC](./jdbc-connector.md) | ✅ | Microsoft Semantic Kernel Project | +| Postgres | Use [JDBC](./jdbc-connector.md) | | Microsoft Semantic Kernel Project | +| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| SQLite | Use [JDBC](./jdbc-connector.md) | ✅ | Microsoft Semantic Kernel Project | +| [Volatile (In-Memory)](./volatile-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | ::: zone-end From e5bc203287653a1be611c5d55cff0d835ba61067 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Mon, 17 Feb 2025 15:53:24 +0100 Subject: [PATCH 034/117] adding mssing stores --- .../out-of-the-box-connectors/index.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index e95a8f24..958500a3 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -59,9 +59,10 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma | [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| Sql Server | Planned | | | -| [SQLite](./sqlite-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | +| SQL Server | Planned | ✅ | Microsoft Semantic Kernel Project | +| SQLite | Planned | ✅ | Microsoft Semantic Kernel Project | | [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | +| Usearch | Planned | ✅ | Microsoft Semantic Kernel Project | | [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end From 65270c5cf694a3cf6319ea3b494044235790b20a Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 26 Feb 2025 14:46:28 +0100 Subject: [PATCH 035/117] updated a whole bunch --- .../out-of-the-box-connectors/TOC.yml | 2 + .../azure-ai-search-connector.md | 64 +++++++--- .../azure-cosmosdb-mongodb-connector.md | 97 ++++++++++++--- .../chroma-connector.md | 104 ++++++++++++++++ .../out-of-the-box-connectors/index.md | 36 +++--- .../mongodb-connector.md | 96 ++++++++++++--- .../weaviate-connector.md | 113 ++++++++++++++---- 7 files changed, 423 insertions(+), 89 deletions(-) create mode 100644 semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml index b4c46306..de9c5f57 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml @@ -6,6 +6,8 @@ href: azure-cosmosdb-mongodb-connector.md - name: Azure CosmosDB NoSQL connector href: azure-cosmosdb-nosql-connector.md +- name: Chroma connector + href: chroma-connector.md - name: Couchbase connector href: couchbase-connector.md - name: Elasticsearch connector diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md index 305ce851..9f75aca2 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md @@ -17,27 +17,59 @@ ms.service: semantic-kernel The Azure AI Search Vector Store connector can be used to access and manage data in Azure AI Search. The connector has the following characteristics. -| Feature Area | Support | -|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Collection maps to | Azure AI Search Index | -| Supported key property types | string | -| Supported data property types |
      • string
      • int
      • long
      • double
      • float
      • bool
      • DateTimeOffset
      • *and enumerables of each of these types*
      | -| Supported vector property types | ReadOnlyMemory\ | -| Supported index types |
      • Hnsw
      • Flat
      | -| Supported distance functions |
      • CosineSimilarity
      • DotProductSimilarity
      • EuclideanDistance
      | -| Supported filter clauses |
      • AnyTagEqualTo
      • EqualTo
      | -| Supports multiple vectors in a record | Yes | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | Yes | -| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +::: zone pivot="programming-language-csharp" + +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure AI Search Index | +| Supported key property types | string | +| Supported data property types |
      • string
      • int
      • long
      • double
      • float
      • bool
      • DateTimeOffset
      • *and enumerables of each of these types*
      | +| Supported vector property types | ReadOnlyMemory\ | +| Supported index types |
      • Hnsw
      • Flat
      | +| Supported distance functions |
      • CosineSimilarity
      • DotProductSimilarity
      • EuclideanDistance
      | +| Supported filter clauses |
      • AnyTagEqualTo
      • EqualTo
      | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +::: zone-end +::: zone pivot="programming-language-python" +| Feature Area | Support | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure AI Search Index | +| Supported key property types | string | +| Supported data property types |
      • string
      • int
      • long
      • double
      • float
      • bool
      • DateTimeOffset
      • *and iterables of each of these types*
      | +| Supported vector property types | list[float], list[int], ndarray | +| Supported index types |
      • Hnsw
      • Flat
      | +| Supported distance functions |
      • CosineSimilarity
      • DotProductSimilarity
      • EuclideanDistance
    • Hamming
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +::: zone-end +::: zone pivot="programming-language-java" +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure AI Search Index | +| Supported key property types | string | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • DateTimeOffset
    • *and enumerables of each of these types*
    | +| Supported vector property types | ReadOnlyMemory\ | +| Supported index types |
    • Hnsw
    • Flat
    | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +::: zone-end ## Limitations Notable Azure AI Search connector functionality limitations. -| Feature Area | Workaround | -|--------------------------------------------------------------------------------------| -----------------------------------------------------------------------------------------------| -| Configuring full text search analyzers during collection creation is not supported. | Use the Azure AI Search Client SDK directly for collection creation | +| Feature Area | Workaround | +| ----------------------------------------------------------------------------------- | ------------------------------------------------------------------- | +| Configuring full text search analyzers during collection creation is not supported. | Use the Azure AI Search Client SDK directly for collection creation | ::: zone pivot="programming-language-csharp" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md index 46fbbf96..c50ed06f 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md @@ -13,30 +13,50 @@ ms.service: semantic-kernel > [!WARNING] > The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. -::: zone pivot="programming-language-csharp" - ## Overview The Azure CosmosDB MongoDB Vector Store connector can be used to access and manage data in Azure CosmosDB MongoDB (vCore). The connector has the following characteristics. -| Feature Area | Support | -|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Collection maps to | Azure Cosmos DB MongoDB (vCore) Collection + Index | -| Supported key property types | string | -| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and enumerables of each of these types*
    | -| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | -| Supported index types |
    • Hnsw
    • IvfFlat
    | -| Supported distance functions |
    • CosineDistance
    • DotProductSimilarity
    • EuclideanDistance
    | -| Supported filter clauses |
    • EqualTo
    | -| Supports multiple vectors in a record | Yes | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | No | -| StoragePropertyName supported? | No, use BsonElementAttribute instead. [See here for more info.](#data-mapping) | +::: zone pivot="programming-language-csharp" + +| Feature Area | Support | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure Cosmos DB MongoDB (vCore) Collection + Index | +| Supported key property types | string | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and enumerables of each of these types*
    | +| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | +| Supported index types |
    • Hnsw
    • IvfFlat
    | +| Supported distance functions |
    • CosineDistance
    • DotProductSimilarity
    • EuclideanDistance
    | +| Supported filter clauses |
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | No | +| StoragePropertyName supported? | No, use BsonElementAttribute instead. [See here for more info.](#data-mapping) | +::: zone-end +::: zone pivot="programming-language-python" +| Feature Area | Support | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure Cosmos DB MongoDB (vCore) Collection + Index | +| Supported key property types | string | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and iterables of each of these types*
    | +| Supported vector property types |
    • list[float]
    • list[int]
  • ndarray
| +| Supported index types |
  • Hnsw
  • IvfFlat
| +| Supported distance functions |
  • CosineDistance
  • DotProductSimilarity
  • EuclideanDistance
| +| Supported filter clauses |
  • EqualTo
  • AnyTagsEqualTo
| +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | No | +::: zone-end +::: zone pivot="programming-language-java" +More info coming soon. +::: zone-end ## Limitations This connector is compatible with Azure Cosmos DB MongoDB (vCore) and is *not* designed to be compatible with Azure Cosmos DB MongoDB (RU). +::: zone pivot="programming-language-csharp" + ## Getting started Add the Azure CosmosDB MongoDB Vector Store connector NuGet package to your project. @@ -165,9 +185,52 @@ public class Hotel ::: zone-end ::: zone pivot="programming-language-python" -## Coming soon +## Getting started -More info coming soon. +Add the Azure CosmosDB MongoDB Vector Store dependencies to your environment. Because the Azure CosmosDB MongoDB connector is built on the MongoDB Atlas connector and uses the same client as that one, you need to install with these extras: + +```bash +pip install semantic-kernel[azure, mongo] +``` + +You can then create the vector store. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBforMongoDBStore + +# If the right environment settings are set, namely AZURE_COSMOS_DB_MONGODB_CONNECTION_STRING and optionally AZURE_COSMOS_DB_MONGODB_DATABASE_NAME, this is enough to create the Store: +store = AzureCosmosDBforMongoDBStore() +``` + +Alternatively, you can also pass in your own mongodb client if you want to have more control over the client construction: + +```python +from pymongo import AsyncMongoClient +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBforMongoDBStore + +client = AsyncMongoClient(...) +store = AzureCosmosDBforMongoDBStore(mongo_client=client) +``` + +When a client is passed in, Semantic Kernel will not close the connection for you, so you need to ensure to close it, for instance with a `async with` statement. + +You can also create a collection directly, without the store. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBforMongoDBCollection + +# `hotel` is a class created with the @vectorstoremodel decorator +collection = AzureCosmosDBforMongoDBCollection( + collection_name="my_collection", + data_model_type=hotel +) +``` + +## Serialization + +Since the Azure CosmosDB for MongoDB connector needs a simple dict with the fields corresponding to the index as the input, the serialization is quite easy, it only uses a predetermined key `_id`, so we replace the key of the data model with that if it is not already `_id`. + +For more details on this concept see the [serialization documentation](./../serialization.md). ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md new file mode 100644 index 00000000..f16e802c --- /dev/null +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md @@ -0,0 +1,104 @@ +--- +title: Using the Semantic Kernel Chroma Vector Store connector (Preview) +description: Contains information on how to use a Semantic Kernel Vector store connector to access and manipulate data in ChromaDB. +zone_pivot_groups: programming-languages +author: eavanvalkenburg +ms.topic: conceptual +ms.author: eavanvalkenburg +ms.date: 02/26/2025 +ms.service: semantic-kernel +--- + +# Using the Chroma connector (Preview) + +> [!WARNING] +> The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. + +::: zone pivot="programming-language-csharp" + +## Not supported + +Not supported. + +::: zone-end +::: zone pivot="programming-language-python" + +## Overview + +The Chroma Vector Store connector can be used to access and manage data in Chroma. The connector has the +following characteristics. + +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------- | +| Collection maps to | Chroma collection | +| Supported key property types | string | +| Supported data property types | All types that are supported by System.Text.Json (either built-in or by using a custom converter) | +| Supported vector property types |
  • list[float]
  • list[int]
  • ndarray]
| +| Supported index types |
  • HNSW
| +| Supported distance functions |
  • CosineSimilarity
  • DotProductSimilarity
  • EuclideanSquaredDistance
| +| Supported filter clauses |
  • AnyTagEqualTo
  • EqualTo
| +| Supports multiple vectors in a record | No | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | + +## Limitations + +Notable Chroma connector functionality limitations. + +| Feature Area | Workaround | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------- | +| Client-server mode | Use the client.HttpClient and pass the result to the `client` parameter, we do not support a AsyncHttpClient at this time | +| Chroma Cloud | Unclear at this time, as Chroma Cloud is still in private preview | + +## Getting Started + +Add the Chroma Vector Store connector dependencies to your project. + +```bash +pip install semantic-kernel[chroma] +``` + +You can then create the vector store. + +```python +from semantic_kernel.connectors.memory.chroma import ChromaStore + +store = ChromaStore() +``` + +Alternatively, you can also pass in your own mongodb client if you want to have more control over the client construction: + +```python +from chromadb import Client +from semantic_kernel.connectors.memory.chroma import ChromaStore + +client = Client(...) +store = ChromaStore(client=client) +``` + +You can also create a collection directly, without the store. + +```python +from semantic_kernel.connectors.memory.chroma import ChromaCollection + +# `hotel` is a class created with the @vectorstoremodel decorator +collection = ChromaCollection( + collection_name="my_collection", + data_model_type=hotel +) +``` + +## Serialization + +The Chroma client returns both `get` and `search` results in tabular form, this means that there are between 3 and 5 lists being returned in a dict, the lists are 'keys', 'documents', 'embeddings', and optionally 'metadatas' and 'distances'. The Semantic Kernel Chroma connector will automatically convert this into a list of `dict` objects, which are then parsed back to your data model. + +It could be very interesting performance wise to do straight serialization from this format into a dataframe-like structure as that saves a lot of rebuilding of the data structure. This is not done for you, even when using container mode, you would have to specify this yourself, for more details on this concept see the [serialization documentation](./../serialization.md). + +::: zone-end +::: zone pivot="programming-language-java" + +## Not supported + +Not supported. + +::: zone-end diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 958500a3..ef0c44cc 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -45,25 +45,23 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma ::: zone-end ::: zone pivot="programming-language-python" -| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | -| ------------------------------------------------------------------ | :------------------------: | :---------------------------: | :-------------------------------: | -| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | -| Chroma | Planned | | | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | -| Milvus | Planned | | | -| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Pinecone](./pinecone-connector.md) | In Development | ✅ | Microsoft Semantic Kernel Project | -| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| SQL Server | Planned | ✅ | Microsoft Semantic Kernel Project | -| SQLite | Planned | ✅ | Microsoft Semantic Kernel Project | -| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | -| Usearch | Planned | ✅ | Microsoft Semantic Kernel Project | -| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------------------------ | :-----: | :---------------------------: | :-------------------------------: | +| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | +| Faiss | Planned | | | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Pinecone](./pinecone-connector.md) | Planned | ✅ | Microsoft Semantic Kernel Project | +| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| SQL Server | Planned | ✅ | Microsoft Semantic Kernel Project | +| SQLite | Planned | ✅ | Microsoft Semantic Kernel Project | +| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md index 6c406610..e5e79881 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md @@ -13,25 +13,44 @@ ms.service: semantic-kernel > [!WARNING] > The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. -::: zone pivot="programming-language-csharp" - ## Overview The MongoDB Vector Store connector can be used to access and manage data in MongoDB. The connector has the following characteristics. -| Feature Area | Support | -|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Collection maps to | MongoDB Collection + Index | -| Supported key property types | string | -| Supported data property types |
  • string
  • int
  • long
  • double
  • float
  • decimal
  • bool
  • DateTime
  • *and enumerables of each of these types*
| -| Supported vector property types |
  • ReadOnlyMemory\
  • ReadOnlyMemory\
| -| Supported index types | N/A | -| Supported distance functions |
  • CosineSimilarity
  • DotProductSimilarity
  • EuclideanDistance
| -| Supported filter clauses |
  • EqualTo
| -| Supports multiple vectors in a record | Yes | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | No | -| StoragePropertyName supported? | No, use BsonElementAttribute instead. [See here for more info.](#data-mapping) | +::: zone pivot="programming-language-csharp" + +| Feature Area | Support | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | MongoDB Collection + Index | +| Supported key property types | string | +| Supported data property types |
  • string
  • int
  • long
  • double
  • float
  • decimal
  • bool
  • DateTime
  • *and enumerables of each of these types*
| +| Supported vector property types |
  • ReadOnlyMemory\
  • ReadOnlyMemory\
| +| Supported index types | N/A | +| Supported distance functions |
  • CosineSimilarity
  • DotProductSimilarity
  • EuclideanDistance
| +| Supported filter clauses |
  • EqualTo
| +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | No | +| StoragePropertyName supported? | No, use BsonElementAttribute instead. [See here for more info.](#data-mapping) | +::: zone-end +::: zone pivot="programming-language-python" + +| Feature Area | Support | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | MongoDB Collection + Index | +| Supported key property types | string | +| Supported data property types |
  • string
  • int
  • long
  • double
  • float
  • decimal
  • bool
  • DateTime
  • *and iterables of each of these types*
| +| Supported vector property types |
  • list[float]
  • list[int]
  • ndarray
  • | +| Supported index types |
    • Hnsw
    • IvfFlat
    | +| Supported distance functions |
    • CosineDistance
    • DotProductSimilarity
    • EuclideanDistance
    | +| Supported filter clauses |
    • EqualTo
    • AnyTagsEqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | No | +::: zone-end +::: zone pivot="programming-language-java" +More info coming soon. +::: zone-end ## Getting started @@ -136,9 +155,52 @@ public class Hotel ::: zone-end ::: zone pivot="programming-language-python" -## Coming soon +## Getting started -More info coming soon. +Add the MongoDB Atlas Vector Store dependencies to your environment. It needs the `pymongo` package which is included in the mongo extra: + +```bash +pip install semantic-kernel[mongo] +``` + +You can then create the vector store. + +```python +from semantic_kernel.connectors.memory.mongodb_atlas import MongoDBAtlasStore + +# If the right environment settings are set, namely MONGODB_ATLAS_CONNECTION_STRING and optionally MONGODB_ATLAS_DATABASE_NAME and MONGODB_ATLAS_INDEX_NAME, this is enough to create the Store: +store = MongoDBAtlasStore() +``` + +Alternatively, you can also pass in your own mongodb client if you want to have more control over the client construction: + +```python +from pymongo import AsyncMongoClient +from semantic_kernel.connectors.memory.mongodb_atlas import MongoDBAtlasStore + +client = AsyncMongoClient(...) +store = MongoDBAtlasStore(mongo_client=client) +``` + +When a client is passed in, Semantic Kernel will not close the connection for you, so you need to ensure to close it, for instance with a `async with` statement. + +You can also create a collection directly, without the store. + +```python +from semantic_kernel.connectors.memory.mongodb_atlas import MongoDBAtlasCollection + +# `hotel` is a class created with the @vectorstoremodel decorator +collection = MongoDBAtlasCollection( + collection_name="my_collection", + data_model_type=hotel +) +``` + +## Serialization + +Since the MongoDB Atlas connector needs a simple dict with the fields corresponding to the index as the input, the serialization is quite easy, it only uses a predetermined key `_id`, so we replace the key of the data model with that if it is not already `_id`. + +For more details on this concept see the [serialization documentation](./../serialization.md). ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md index 52ea034a..608135d8 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md @@ -13,33 +13,51 @@ ms.service: semantic-kernel > [!WARNING] > The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. -::: zone pivot="programming-language-csharp" - ## Overview The Weaviate Vector Store connector can be used to access and manage data in Weaviate. The connector has the following characteristics. -| Feature Area | Support | -|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Collection maps to | Weaviate Collection | -| Supported key property types | Guid | -| Supported data property types |
    • string
    • byte
    • short
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • DateTimeOffset
    • Guid
    • *and enumerables of each of these types*
    | -| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | -| Supported index types |
    • Hnsw
    • Flat
    • Dynamic
    | -| Supported distance functions |
    • CosineDistance
    • NegativeDotProductSimilarity
    • EuclideanSquaredDistance
    • Hamming
    • ManhattanDistance
    | -| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | -| Supports multiple vectors in a record | Yes | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | Yes | -| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +::: zone pivot="programming-language-csharp" + +| Feature Area | Support | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Weaviate Collection | +| Supported key property types | Guid | +| Supported data property types |
    • string
    • byte
    • short
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • DateTimeOffset
    • Guid
    • *and enumerables of each of these types*
    | +| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | +| Supported index types |
    • Hnsw
    • Flat
    • Dynamic
    | +| Supported distance functions |
    • CosineDistance
    • NegativeDotProductSimilarity
    • EuclideanSquaredDistance
    • Hamming
    • ManhattanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +::: zone-end +::: zone pivot="programming-language-python" +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Weaviate Collection | +| Supported key property types | Guid | +| Supported data property types |
    • string
    • byte
    • short
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • *and iterables of each of these types*
    | +| Supported vector property types |
    • list[float]
    • list[int]
  • ndarray
  • | +| Supported index types |
    • Hnsw
    • Flat
    • Dynamic
    | +| Supported distance functions |
    • CosineDistance
    • NegativeDotProductSimilarity
    • EuclideanSquaredDistance
    • Hamming
    • ManhattanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +::: zone-end +::: zone pivot="programming-language-java" +Coming soon. +::: zone-end ## Limitations Notable Weaviate connector functionality limitations. -| Feature Area | Workaround | -|------------------------------------------------------------------------| -----------------------------------------------------------------------------------------------| -| Using the 'vector' property for single vector objects is not supported | Use of the 'vectors' property is supported instead. | +| Feature Area | Workaround | +| ---------------------------------------------------------------------- | --------------------------------------------------- | +| Using the 'vector' property for single vector objects is not supported | Use of the 'vectors' property is supported instead. | > [!WARNING] > Weaviate requires collection names to start with an upper case letter. If you do not provide a collection name with an upper case letter, Weaviate will return an error when you try and create your collection. The error that you will see is `Cannot query field "mycollection" on type "GetObjectsObj". Did you mean "Mycollection"?` where `mycollection` is your collection name. In this example, if you change your collection name to `Mycollection` instead, this will fix the error. @@ -181,9 +199,64 @@ public class Hotel ::: zone-end ::: zone pivot="programming-language-python" -## Coming soon +## Getting Started -More info coming soon. +Add the Weaviate Vector Store connector dependencies to your project. + +```bash +pip install semantic-kernel[weaviate] +``` + +You can then create the vector store, it uses environment settings to connect: + +For using Weaviate Cloud: + +- url: WEAVIATE_URL +- api_key: WEAVIATE_API_KEY + +For using Weaviate Local (i.e. Weaviate in a Docker container): + +- local_host: WEAVIATE_LOCAL_HOST +- local_port: WEAVIATE_LOCAL_PORT +- local_grpc_port: WEAVIATE_LOCAL_GRPC_PORT + +If you want to use embedded: + +- use_embed: WEAVIATE_USE_EMBED + +These should be set exclusively, so only one set of the above is present, otherwise it will raise an exception. + +```python +from semantic_kernel.connectors.memory.weaviate import WeaviateStore + +store = WeaviateStore() +``` + +Alternatively, you can also pass in your own mongodb client if you want to have more control over the client construction: + +```python +import weaviate +from semantic_kernel.connectors.memory.weaviate import WeaviateStore + +client = weaviate.WeaviateAsyncClient(...) +store = WeaviateStore(async_client=client) +``` + +You can also create a collection directly, without the store. + +```python +from semantic_kernel.connectors.memory.weaviate import WeaviateCollection + +# `hotel` is a class created with the @vectorstoremodel decorator +collection = WeaviateCollection( + collection_name="my_collection", + data_model_type=hotel +) +``` + +## Serialization + +The Weaviate client returns it's own objects which are parsed and turned into dicts in the regular flow, for more details on this concept see the [serialization documentation](./../serialization.md). ::: zone-end ::: zone pivot="programming-language-java" From ed29edc78d76f582961cffe467bf765bcec5c9c7 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 26 Feb 2025 14:51:08 +0100 Subject: [PATCH 036/117] fixes --- .../out-of-the-box-connectors/chroma-connector.md | 2 +- .../out-of-the-box-connectors/mongodb-connector.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md index f16e802c..c02e9fd1 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md @@ -4,7 +4,7 @@ description: Contains information on how to use a Semantic Kernel Vector store c zone_pivot_groups: programming-languages author: eavanvalkenburg ms.topic: conceptual -ms.author: eavanvalkenburg +ms.author: edvan ms.date: 02/26/2025 ms.service: semantic-kernel --- diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md index e5e79881..152d579b 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md @@ -51,6 +51,7 @@ The MongoDB Vector Store connector can be used to access and manage data in Mong ::: zone pivot="programming-language-java" More info coming soon. ::: zone-end +::: zone pivot="programming-language-csharp" ## Getting started @@ -157,7 +158,7 @@ public class Hotel ## Getting started -Add the MongoDB Atlas Vector Store dependencies to your environment. It needs the `pymongo` package which is included in the mongo extra: +Add the MongoDB Atlas Vector Store dependencies to your environment. It needs the `pymongo` package which is included in the mongo extra: , you need to install with these extras: ```bash pip install semantic-kernel[mongo] From f37d616cd8b0eced2dc07704855e4d8e6a9ac151 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 26 Feb 2025 16:45:16 +0100 Subject: [PATCH 037/117] initial version of realtime docs --- semantic-kernel/concepts/ai-services/TOC.yml | 4 +- semantic-kernel/concepts/ai-services/index.md | 22 ++--- .../concepts/ai-services/realtime.md | 88 +++++++++++++++++++ 3 files changed, 103 insertions(+), 11 deletions(-) create mode 100644 semantic-kernel/concepts/ai-services/realtime.md diff --git a/semantic-kernel/concepts/ai-services/TOC.yml b/semantic-kernel/concepts/ai-services/TOC.yml index 8d24a54b..fd35b379 100644 --- a/semantic-kernel/concepts/ai-services/TOC.yml +++ b/semantic-kernel/concepts/ai-services/TOC.yml @@ -6,4 +6,6 @@ - name: Embedding generation href: embedding-generation/TOC.yml - name: AI Integrations - href: integrations.md \ No newline at end of file + href: integrations.md +- name: Realtime + href: realtime.md \ No newline at end of file diff --git a/semantic-kernel/concepts/ai-services/index.md b/semantic-kernel/concepts/ai-services/index.md index 3e473aec..2a2da7b2 100644 --- a/semantic-kernel/concepts/ai-services/index.md +++ b/semantic-kernel/concepts/ai-services/index.md @@ -14,21 +14,23 @@ One of the main features of Semantic Kernel is its ability to add different AI s Within Semantic Kernel, there are interfaces for the most popular AI tasks. In the table below, you can see the services that are supported by each of the SDKs. -| Services | C# | Python | Java | Notes | -|-----------------------------------|:----:|:------:|:----:|-------| -| [Chat completion](./chat-completion/index.md) | ✅ | ✅ | ✅ | -| Text generation | ✅ | ✅ | ✅ | -| Embedding generation (Experimental) | ✅ | ✅ | ✅ | -| Text-to-image (Experimental) | ✅ | ✅ | ❌ | -| Image-to-text (Experimental) | ✅ | ❌ | ❌ | -| Text-to-audio (Experimental) | ✅ | ✅ | ❌ | -| Audio-to-text (Experimental) | ✅ | ✅ | ❌ | +| Services | C# | Python | Java | Notes | +| --------------------------------------------- | :---: | :----: | :---: | ----- | +| [Chat completion](./chat-completion/index.md) | ✅ | ✅ | ✅ | +| Text generation | ✅ | ✅ | ✅ | +| Embedding generation (Experimental) | ✅ | ✅ | ✅ | +| Text-to-image (Experimental) | ✅ | ✅ | ❌ | +| Image-to-text (Experimental) | ✅ | ❌ | ❌ | +| Text-to-audio (Experimental) | ✅ | ✅ | ❌ | +| Audio-to-text (Experimental) | ✅ | ✅ | ❌ | +| Realtime (Experimental) | ❌ | ✅ | ❌ | > [!TIP] > In most scenarios, you will only need to add chat completion to your kernel, but to support multi-modal AI, you can add any of the above services to your kernel. ## Next steps + To learn more about each of the services, please refer to the specific articles for each service type. In each of the articles we provide sample code for adding the service to the kernel across multiple AI service providers. > [!div class="nextstepaction"] -> [Learn about chat completion](./chat-completion/index.md) \ No newline at end of file +> [Learn about chat completion](./chat-completion/index.md) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md new file mode 100644 index 00000000..4635a42f --- /dev/null +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -0,0 +1,88 @@ +--- +title: Realtime AI Integrations for Semantic Kernel +description: Learn about realtime AI integrations available in Semantic Kernel. +author: eavanvalkenburg +ms.topic: conceptual +ms.author: edvan +ms.date: 02/26/2025 +ms.service: semantic-kernel +--- + +# Realtime API integrations for Semantic Kernel + +The first realtime API integration for Semantic Kernel has been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes. + +## Realtime Client abstraction + +To support different realtime api's from different vendors, using different protocols, a new client abstraction has been added to the kernel. This client is used to connect to the realtime service and send and receive messages. +The client is responsible for handling the connection to the service, sending messages, and receiving messages. The client is also responsible for handling any errors that occur during the connection or message sending/receiving process. + +### Realtime API + +Any realtime client consists of the following methods: + +| Method | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------ | +| `create_session` | Creates a new session | +| `update_session` | Updates an existing session | +| `delete_session` | Deletes an existing session | +| `receive` | This is a asynchronous generator method that listens for messages from the service and yields them as they arrive. | +| `send` | Sends a message to the service | + +## Python implementations + +The python version of semantic kernel currently supports the following realtime clients: + +| Client | Protocol | Description | +| ------ | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| OpenAI | Websocket | The OpenAI Realtime API is a websocket based api that allows you to send and receive messages in realtime, this connector uses the OpenAI Python package to connect and receive and send messages. | +| OpenAI | WebRTC | The OpenAI Realtime API is a WebRTC based api that allows you to send and receive messages in realtime, it needs a webRTC compatible audio track at session creation time. | +| Azure | Websocket | The Azure Realtime API is a websocket based api that allows you to send and receive messages in realtime, this uses the same package as the OpenAI websocket connector. | + +## Getting started + +To get started with the Realtime API, you need to install the `semantic-kernel` package with the `realtime` extra. + +```bash +pip install semantic-kernel[realtime] +``` + +Then you can create a kernel and add the realtime client to it. + +```python +from semantic_kernel.connectors.ai.open_ai import ( + AzureRealtimeWebsocket, + ListenEvents, + OpenAIRealtimeExecutionSettings, +) +from semantic_kernel.contents import RealtimeAudioEvent, RealtimeTextEvent + +# this will use environment variables to get the api key, endpoint, api version and deployment name. +realtime_client = AzureRealtimeWebsocket() +settings = OpenAIRealtimeExecutionSettings() +async with realtime_client(settings=settings, create_response=True): + async for event in realtime_client.receive(): + match event: + # receiving a piece of audio + case RealtimeAudioEvent(): + await audio_player.add_audio(event.audio) + # receiving a piece of audio transcript + case RealtimeTextEvent(): + # the model returns both audio and transcript of the audio, which we will print + print(event.text.text, end="") + case _: + # OpenAI Specific events + if event.service_type == ListenEvents.SESSION_UPDATED: + print("Session updated") + if event.service_type == ListenEvents.RESPONSE_CREATED: + print("\nMosscap (transcript): ", end="") +``` + +There are two important things to note, the first is that the `realtime_client` is an async context manager, this means that you can use it in an async function and use `async with` to create the session. +The second is that the `receive` method is an async generator, this means that you can use it in a for loop to receive messages as they arrive. + +In this simple example, we are passing the audio to a unspecified `audio_player` object, and printing the transcript as it arrives. + +There is also a `audio_output_callback` parameter on the client creation or on the `receive` method, this callback will be called first, and leads to smoother playback compared to the above example. + +See the samples in our repo [link to follow]. From 2891764d605c14bafbc740d451fa962a19a2e9f8 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 26 Feb 2025 16:48:03 +0100 Subject: [PATCH 038/117] extra info in table --- semantic-kernel/concepts/ai-services/realtime.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index 4635a42f..65eddf90 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -33,11 +33,11 @@ Any realtime client consists of the following methods: The python version of semantic kernel currently supports the following realtime clients: -| Client | Protocol | Description | -| ------ | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| OpenAI | Websocket | The OpenAI Realtime API is a websocket based api that allows you to send and receive messages in realtime, this connector uses the OpenAI Python package to connect and receive and send messages. | -| OpenAI | WebRTC | The OpenAI Realtime API is a WebRTC based api that allows you to send and receive messages in realtime, it needs a webRTC compatible audio track at session creation time. | -| Azure | Websocket | The Azure Realtime API is a websocket based api that allows you to send and receive messages in realtime, this uses the same package as the OpenAI websocket connector. | +| Client | Protocol | Modalities | Function calling enabled | Description | +| ------ | --------- | ------------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| OpenAI | Websocket | Text & Audio | Yes | The OpenAI Realtime API is a websocket based api that allows you to send and receive messages in realtime, this connector uses the OpenAI Python package to connect and receive and send messages. | +| OpenAI | WebRTC | Text & Audio | Yes | The OpenAI Realtime API is a WebRTC based api that allows you to send and receive messages in realtime, it needs a webRTC compatible audio track at session creation time. | +| Azure | Websocket | Text & Audio | Yes | The Azure Realtime API is a websocket based api that allows you to send and receive messages in realtime, this uses the same package as the OpenAI websocket connector. | ## Getting started From 02978dbcb1c964e492e212be19c8823091f65cab Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Wed, 26 Feb 2025 16:50:58 +0100 Subject: [PATCH 039/117] added link --- semantic-kernel/concepts/ai-services/index.md | 2 +- .../concepts/ai-services/integrations.md | 27 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/semantic-kernel/concepts/ai-services/index.md b/semantic-kernel/concepts/ai-services/index.md index 2a2da7b2..9ce5d2fd 100644 --- a/semantic-kernel/concepts/ai-services/index.md +++ b/semantic-kernel/concepts/ai-services/index.md @@ -23,7 +23,7 @@ Within Semantic Kernel, there are interfaces for the most popular AI tasks. In t | Image-to-text (Experimental) | ✅ | ❌ | ❌ | | Text-to-audio (Experimental) | ✅ | ✅ | ❌ | | Audio-to-text (Experimental) | ✅ | ✅ | ❌ | -| Realtime (Experimental) | ❌ | ✅ | ❌ | +| [Realtime](./realtime.md) (Experimental) | ❌ | ✅ | ❌ | > [!TIP] > In most scenarios, you will only need to add chat completion to your kernel, but to support multi-modal AI, you can add any of the above services to your kernel. diff --git a/semantic-kernel/concepts/ai-services/integrations.md b/semantic-kernel/concepts/ai-services/integrations.md index a8d34ec5..d30a6984 100644 --- a/semantic-kernel/concepts/ai-services/integrations.md +++ b/semantic-kernel/concepts/ai-services/integrations.md @@ -18,21 +18,22 @@ With the available AI connectors, developers can easily build AI agents with swa ### AI Services -| Services | C# | Python | Java | Notes | -|-----------------------------------|:----:|:------:|:----:|-------| -| Text Generation | ✅ | ✅ | ✅ | Example: Text-Davinci-003 | -| Chat Completion | ✅ | ✅ | ✅ | Example: GPT4, Chat-GPT | -| Text Embeddings (Experimental) | ✅ | ✅ | ✅ | Example: Text-Embeddings-Ada-002 | -| Text to Image (Experimental) | ✅ | ✅ | ❌ | Example: Dall-E | -| Image to Text (Experimental) | ✅ | ❌ | ❌ | Example: Pix2Struct | -| Text to Audio (Experimental) | ✅ | ✅ | ❌ | Example: Text-to-speech | -| Audio to Text (Experimental) | ✅ | ✅ | ❌ | Example: Whisper | +| Services | C# | Python | Java | Notes | +| ------------------------------ | :---: | :----: | :---: | -------------------------------- | +| Text Generation | ✅ | ✅ | ✅ | Example: Text-Davinci-003 | +| Chat Completion | ✅ | ✅ | ✅ | Example: GPT4, Chat-GPT | +| Text Embeddings (Experimental) | ✅ | ✅ | ✅ | Example: Text-Embeddings-Ada-002 | +| Text to Image (Experimental) | ✅ | ✅ | ❌ | Example: Dall-E | +| Image to Text (Experimental) | ✅ | ❌ | ❌ | Example: Pix2Struct | +| Text to Audio (Experimental) | ✅ | ✅ | ❌ | Example: Text-to-speech | +| Audio to Text (Experimental) | ✅ | ✅ | ❌ | Example: Whisper | +| Realtime (Experimental) | ❌ | ✅ | ❌ | Example: gpt-4o-realtime-preview | ## Additional plugins If you want to extend the functionality of your AI agent, you can use plugins to integrate with other Microsoft services. Here are some of the plugins that are available for Semantic Kernel: -| Plugin | C# | Python | Java | Description | -| ---------- | :-: | :----: | :--: | ----------- | -| Logic Apps | ✅ | ✅ | ✅ | Build workflows within Logic Apps using its available connectors and import them as plugins in Semantic Kernel. [Learn more](../plugins/adding-logic-apps-as-plugins.md). | -| Azure Container Apps Dynamic Sessions | ✅ | ✅ | ❌ | With dynamic sessions, you can recreate the Code Interpreter experience from the Assistants API by effortlessly spinning up Python containers where AI agents can execute Python code. [Learn more](/azure/container-apps/sessions). | +| Plugin | C# | Python | Java | Description | +| ------------------------------------- | :---: | :----: | :---: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Logic Apps | ✅ | ✅ | ✅ | Build workflows within Logic Apps using its available connectors and import them as plugins in Semantic Kernel. [Learn more](../plugins/adding-logic-apps-as-plugins.md). | +| Azure Container Apps Dynamic Sessions | ✅ | ✅ | ❌ | With dynamic sessions, you can recreate the Code Interpreter experience from the Assistants API by effortlessly spinning up Python containers where AI agents can execute Python code. [Learn more](/azure/container-apps/sessions). | From 26fa90967da73031d994b08aa3f7b181a81ce3d4 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Fri, 28 Feb 2025 11:10:36 +0900 Subject: [PATCH 040/117] Update Agent Framework related doc and code samples. Add migration code for Python (#469) * Update OpenAI assistant related code samples. Add migration code for Python * improve migration guide * Update semantic-kernel/support/migration/openai-assistant-agent-migration-guide.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update semantic-kernel/support/migration/openai-assistant-agent-migration-guide.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Replace italics with code format. * update bookmarks * Update Python docs * Add dotnet migration guide. * update formatting in migration guide * fix headers * Fix header again * update guide to include rc * Small update to include new method get_response * Update important tags with some experimental (group chat) and some release candidate --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../Frameworks/agent/agent-architecture.md | 34 +- .../Frameworks/agent/agent-chat.md | 58 +- .../Frameworks/agent/agent-functions.md | 89 ++- .../Frameworks/agent/agent-streaming.md | 24 +- .../Frameworks/agent/agent-templates.md | 18 +- .../Frameworks/agent/assistant-agent.md | 95 ++- .../Frameworks/agent/chat-completion-agent.md | 79 ++- .../examples/example-agent-collaboration.md | 118 ++-- .../agent/examples/example-assistant-code.md | 181 +++-- .../examples/example-assistant-search.md | 152 +++-- .../agent/examples/example-chat-agent.md | 38 +- semantic-kernel/Frameworks/agent/index.md | 16 +- .../agent-framework-rc-migration-guide.md | 623 ++++++++++++++++++ semantic-kernel/support/migration/toc.yml | 4 +- 14 files changed, 1180 insertions(+), 349 deletions(-) create mode 100644 semantic-kernel/support/migration/agent-framework-rc-migration-guide.md diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index c3966fa8..b22c48ee 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -1,5 +1,5 @@ --- -title: Semantic Kernel Agent Architecture (Experimental) +title: Semantic Kernel Agent Architecture description: An overview of the architecture of the Semantic Kernel Agent Framework and how it aligns with core Semantic Kernel features. zone_pivot_groups: programming-languages author: crickman @@ -10,15 +10,15 @@ ms.service: semantic-kernel --- # An Overview of the Agent Architecture -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> Single-agent features, such as ChatCompletionAgent and OpenAIAssistantAgent, are in the release candidate stage. These features are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. However, agent chat patterns are still in the experimental stage. These patterns are under active development and may change significantly before advancing to the preview or release candidate stage. This article covers key concepts in the architecture of the Agent Framework, including foundational principles, design objectives, and strategic goals. ## Goals -The _Agent Framework_ was developed with the following key priorities in mind: +The `Agent Framework` was developed with the following key priorities in mind: - The _Semantic Kernel_ framework serves as the core foundation for implementing agent functionalities. - Multiple agents can collaborate within a single conversation, while integrating human input. @@ -28,7 +28,7 @@ The _Agent Framework_ was developed with the following key priorities in mind: ## Agent -The abstract _Agent_ class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [_Kernel_](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [_Chat Completion Agent_](./chat-completion-agent.md) and the [_OpenAI Assistant Agent_](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. +The abstract `Agent` class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [`Kernel`](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [`ChatCompletionAgent`](./chat-completion-agent.md) and the [`OpenAIAssistantAgent`](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. ::: zone pivot="programming-language-csharp" @@ -49,7 +49,7 @@ The abstract _Agent_ class serves as the core abstraction for all types of agent ::: zone-end -Agents can either be invoked directly to perform tasks or orchestrated within an [_Agent Chat_](./agent-chat.md), where multiple agents may collaborate or interact dynamically with user inputs. This flexible structure allows agents to adapt to various conversational or task-driven scenarios, providing developers with robust tools for building intelligent, multi-agent systems. +Agents can either be invoked directly to perform tasks or orchestrated within an [`AgentChat`](./agent-chat.md), where multiple agents may collaborate or interact dynamically with user inputs. This flexible structure allows agents to adapt to various conversational or task-driven scenarios, providing developers with robust tools for building intelligent, multi-agent systems. #### Deep Dive: @@ -64,7 +64,7 @@ Agents can either be invoked directly to perform tasks or orchestrated within an ## Agent Chat -The [_Agent Chat_](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [_Agent Group Chat_](./agent-chat.md#creating-an-agent-group-chat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. +The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. This structure facilitates more complex, multi-agent scenarios where different agents can work together, share information, and dynamically respond to evolving conversations, making it an ideal solution for advanced use cases such as customer support, multi-faceted task management, or collaborative problem-solving environments. @@ -74,7 +74,7 @@ This structure facilitates more complex, multi-agent scenarios where different a ## Agent Channel -The _Agent Channel_ class enables agents of various types to participate in an [_Agent Chat_](./agent-chat.md). This functionality is completely hidden from users of the _Agent Framework_ and only needs to be considered by developers creating a custom [_Agent_](#agent). +The _Agent Channel_ class enables agents of various types to participate in an [`AgentChat`](./agent-chat.md). This functionality is completely hidden from users of the `Agent Framework` and only needs to be considered by developers creating a custom [`Agent`](#agent). ::: zone pivot="programming-language-csharp" @@ -96,14 +96,14 @@ The _Agent Channel_ class enables agents of various types to participate in an [ ## Agent Alignment with _Semantic Kernel_ Features -The _Agent Framework_ is built on the foundational concepts and features that many developers have come to know within the _Semantic Kernel_ ecosystem. These core principles serve as the building blocks for the Agent Framework’s design. By leveraging the familiar structure and capabilities of the _Semantic Kernel_, the Agent Framework extends its functionality to enable more advanced, autonomous agent behaviors, while maintaining consistency with the broader _Semantic Kernel_ architecture. This ensures a smooth transition for developers, allowing them to apply their existing knowledge to create intelligent, adaptable agents within the framework. +The `Agent Framework` is built on the foundational concepts and features that many developers have come to know within the _Semantic Kernel_ ecosystem. These core principles serve as the building blocks for the Agent Framework’s design. By leveraging the familiar structure and capabilities of the _Semantic Kernel_, the Agent Framework extends its functionality to enable more advanced, autonomous agent behaviors, while maintaining consistency with the broader _Semantic Kernel_ architecture. This ensures a smooth transition for developers, allowing them to apply their existing knowledge to create intelligent, adaptable agents within the framework. -### The _Kernel_ +### The `Kernel` -At the heart of the _Semantic Kernel_ ecosystem is the [_Kernel_](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The _Kernel_ acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. +At the heart of the Semantic Kernel ecosystem is the [`Kernel`](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The `Kernel` acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. -The [_Chat Completion Agent_](./chat-completion-agent.md) and [_OpenAI Assistant Agent_](./assistant-agent.md) articles provide specific details on how to create each type of agent. +The [`ChatCompletionAgent`](./chat-completion-agent.md) and [`OpenAIAssistantAgent`](./assistant-agent.md) articles provide specific details on how to create each type of agent. These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. #### Related API's: @@ -136,7 +136,7 @@ Plugins are a fundamental aspect of the _Semantic Kernel_, enabling developers t #### Example: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) #### Related API's: @@ -169,7 +169,7 @@ Plugins are a fundamental aspect of the _Semantic Kernel_, enabling developers t Agent messaging, including both input and response, is built upon the core content types of the _Semantic Kernel_, providing a unified structure for communication. This design choice simplifies the process of transitioning from traditional chat-completion patterns to more advanced agent-driven patterns in your application development. By leveraging familiar _Semantic Kernel_ content types, developers can seamlessly integrate agent capabilities into their applications without needing to overhaul existing systems. This streamlining ensures that as you evolve from basic conversational AI to more autonomous, task-oriented agents, the underlying framework remains consistent, making development faster and more efficient. -> Note: The [_OpenAI Assistant Agent_`_](./assistant-agent.md) introduced content types specific to its usage for _File References_ and _Content Annotation_: +> Note: The [`OpenAIAssistantAgent`](./assistant-agent.md) introduced content types specific to its usage for _File References_ and _Content Annotation_: #### Related API's: @@ -205,13 +205,13 @@ Agent messaging, including both input and response, is built upon the core conte ### [Templating](./agent-templates.md) -An agent's role is primarily shaped by the instructions it receives, which dictate its behavior and actions. Similar to invoking a _Kernel_ [prompt](../../concepts/prompts/index.md), an agent's instructions can include templated parameters—both values and functions—that are dynamically substituted during execution. This enables flexible, context-aware responses, allowing the agent to adjust its output based on real-time input. +An agent's role is primarily shaped by the instructions it receives, which dictate its behavior and actions. Similar to invoking a `Kernel` [prompt](../../concepts/prompts/index.md), an agent's instructions can include templated parameters—both values and functions—that are dynamically substituted during execution. This enables flexible, context-aware responses, allowing the agent to adjust its output based on real-time input. Additionally, an agent can be configured directly using a _Prompt Template Configuration_, providing developers with a structured and reusable way to define its behavior. This approach offers a powerful tool for standardizing and customizing agent instructions, ensuring consistency across various use cases while still maintaining dynamic adaptability. #### Example: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) #### Related API's: @@ -245,7 +245,7 @@ Additionally, an agent can be configured directly using a _Prompt Template Confi ### [Chat Completion](./chat-completion-agent.md) -The [_Chat Completion Agent_](./chat-completion-agent.md) is designed around any _Semantic Kernel_ [AI service](../../concepts/ai-services/chat-completion/index.md), offering a flexible and convenient persona encapsulation that can be seamlessly integrated into a wide range of applications. This agent allows developers to easily bring conversational AI capabilities into their systems without having to deal with complex implementation details. It mirrors the features and patterns found in the underlying [AI service](../../concepts/ai-services/chat-completion/index.md), ensuring that all functionalities—such as natural language processing, dialogue management, and contextual understanding—are fully supported within the [_Chat Completion Agent_](./chat-completion-agent.md), making it a powerful tool for building conversational interfaces. +The [`ChatCompletionAgent`](./chat-completion-agent.md) is designed around any _Semantic Kernel_ [AI service](../../concepts/ai-services/chat-completion/index.md), offering a flexible and convenient persona encapsulation that can be seamlessly integrated into a wide range of applications. This agent allows developers to easily bring conversational AI capabilities into their systems without having to deal with complex implementation details. It mirrors the features and patterns found in the underlying [AI service](../../concepts/ai-services/chat-completion/index.md), ensuring that all functionalities—such as natural language processing, dialogue management, and contextual understanding—are fully supported within the [`ChatCompletionAgent`](./chat-completion-agent.md), making it a powerful tool for building conversational interfaces. #### Related API's: diff --git a/semantic-kernel/Frameworks/agent/agent-chat.md b/semantic-kernel/Frameworks/agent/agent-chat.md index bf59e765..d0eb6c7c 100644 --- a/semantic-kernel/Frameworks/agent/agent-chat.md +++ b/semantic-kernel/Frameworks/agent/agent-chat.md @@ -8,10 +8,10 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring Agent Collaboration in _Agent Chat_ +# Exploring Agent Collaboration in `AgentChat` -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. Detailed API documentation related to this discussion is available at: @@ -36,22 +36,22 @@ Detailed API documentation related to this discussion is available at: ::: zone-end -## What is _Agent Chat_? +## What is `AgentChat`? -_Agent Chat_ provides a framework that enables interaction between multiple agents, even if they are of different types. This makes it possible for a [_Chat Completion Agent_](./chat-completion-agent.md) and an [_OpenAI Assistant Agent_](./assistant-agent.md) to work together within the same conversation. _Agent Chat_ also defines entry points for initiating collaboration between agents, whether through multiple responses or a single agent response. +`AgentChat` provides a framework that enables interaction between multiple agents, even if they are of different types. This makes it possible for a [`ChatCompletionAgent`](./chat-completion-agent.md) and an [`OpenAIAssistantAgent`](./assistant-agent.md) to work together within the same conversation. `AgentChat` also defines entry points for initiating collaboration between agents, whether through multiple responses or a single agent response. -As an abstract class, _Agent Chat_ can be subclassed to support custom scenarios. +As an abstract class, `AgentChat` can be subclassed to support custom scenarios. -One such subclass, _Agent Group Chat_, offers a concrete implementation of _Agent Chat_, using a strategy-based approach to manage conversation dynamics. +One such subclass, `AgentGroupChat`, offers a concrete implementation of `AgentChat`, using a strategy-based approach to manage conversation dynamics. -## Creating an _Agent Group Chat_ +## Creating an `AgentGroupChat` -To create an _Agent Group Chat_, you may either specify the participating agents or create an empty chat and subsequently add agent participants. Configuring the _Chat-Settings_ and _Strategies_ is also performed during _Agent Group Chat_ initialization. These settings define how the conversation dynamics will function within the group. +To create an `AgentGroupChat`, you may either specify the participating agents or create an empty chat and subsequently add agent participants. Configuring the _Chat-Settings_ and _Strategies_ is also performed during `AgentGroupChat` initialization. These settings define how the conversation dynamics will function within the group. -> Note: The default _Chat-Settings_ result in a conversation that is limited to a single response. See [_Agent Chat_ Behavior](#defining-agent-group-chat-behavior) for details on configuring _Chat-Settings. +> Note: The default _Chat-Settings_ result in a conversation that is limited to a single response. See [`AgentChat` Behavior](#defining-agentgroupchat-behavior) for details on configuring _Chat-Settings. -#### Creating _Agent Group Chat_ with _Agents_: +#### Creating an `AgentGroupChat` with an `Agent`: ::: zone pivot="programming-language-csharp" ```csharp @@ -81,7 +81,7 @@ chat = AgentGroupChat(agents=[agent1, agent2]) ::: zone-end -#### Adding _Agents_ to a _Agent Group Chat_: +#### Adding an `Agent` to an `AgentGroupChat`: ::: zone pivot="programming-language-csharp" ```csharp @@ -120,13 +120,13 @@ chat.add_agent(agent=agent2) ::: zone-end -## Using _Agent Group Chat_ +## Using `AgentGroupChat` -_Agent Chat_ supports two modes of operation: _Single-Turn_ and _Multi-Turn_. In _single-turn_, a specific agent is designated to provide a response. In _multi-turn_, all agents in the conversation take turns responding until a termination criterion is met. In both modes, agents can collaborate by responding to one another to achieve a defined goal. +`AgentChat` supports two modes of operation: `Single-Turn` and `Multi-Turn`. In `single-turn`, a specific agent is designated to provide a response. In `multi-turn`, all agents in the conversation take turns responding until a termination criterion is met. In both modes, agents can collaborate by responding to one another to achieve a defined goal. ### Providing Input -Adding an input message to an _Agent Chat_ follows the same pattern as whit a _Chat History_ object. +Adding an input message to an `AgentChat` follows the same pattern as whit a `ChatHistory` object. ::: zone pivot="programming-language-csharp" ```csharp @@ -140,7 +140,7 @@ chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, "") ```python chat = AgentGroupChat() -await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content="")) +await chat.add_chat_message(message="") ``` ::: zone-end @@ -154,7 +154,7 @@ await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=" Note: The most recent message is provided first (descending order: newest to oldest). @@ -280,7 +280,7 @@ history = await chat.get_chat_messages() ::: zone-end -Since different agent types or configurations may maintain their own version of the conversation history, agent specific history is also available by specifing an agent. (For example: [_OpenAI Assistant_](./assistant-agent.md) versus [_Chat Completion Agent_](./chat-completion-agent.md).) +Since different agent types or configurations may maintain their own version of the conversation history, agent specific history is also available by specifing an agent. (For example: [`OpenAIAssistant`](./assistant-agent.md) versus [`ChatCompletionAgent`](./chat-completion-agent.md).) ::: zone pivot="programming-language-csharp" ```csharp @@ -319,7 +319,7 @@ history2 = await chat.get_chat_messages(agent=agent2) ::: zone-end -## Defining _Agent Group Chat_ Behavior +## Defining `AgentGroupChat` Behavior Collaboration among agents to solve complex tasks is a core agentic pattern. To use this pattern effectively, a system must be in place that not only determines which agent should respond during each turn but also assesses when the conversation has achieved its intended goal. This requires managing agent selection and establishing clear criteria for conversation termination, ensuring seamless cooperation between agents toward a solution. Both of these aspects are governed by the _Execution Settings_ property. @@ -329,7 +329,7 @@ The following sections, [Agent Selection](#agent-selection) and [Chat Terminatio In multi-turn invocation, agent selection is guided by a _Selection Strategy_. This strategy is defined by a base class that can be extended to implement custom behaviors tailored to specific needs. For convenience, two predefined concrete _Selection Strategies_ are also available, offering ready-to-use approaches for handling agent selection during conversations. -If known, an initial agent may be specified to always take the first turn. A history reducer may also be employed to limit token usage when using a strategy based on a _Kernel Function_. +If known, an initial agent may be specified to always take the first turn. A history reducer may also be employed to limit token usage when using a strategy based on a `KernelFunction`. ::: zone pivot="programming-language-csharp" @@ -414,14 +414,12 @@ REVIEWER_NAME = "Reviewer" WRITER_NAME = "Writer" agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions="", ) agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions="", @@ -472,7 +470,7 @@ chat = AgentGroupChat( In _multi-turn_ invocation, the _Termination Strategy_ dictates when the final turn takes place. This strategy ensures the conversation ends at the appropriate point. -This strategy is defined by a base class that can be extended to implement custom behaviors tailored to specific needs. For convenience, serveral predefined concrete _Selection Strategies_ are also available, offering ready-to-use approaches for defining termination criteria for an _Agent Chat_ conversations. +This strategy is defined by a base class that can be extended to implement custom behaviors tailored to specific needs. For convenience, serveral predefined concrete _Selection Strategies_ are also available, offering ready-to-use approaches for defining termination criteria for an `AgentChat` conversations. ::: zone pivot="programming-language-csharp" @@ -549,14 +547,12 @@ REVIEWER_NAME = "Reviewer" WRITER_NAME = "Writer" agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions="", ) agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions="", @@ -595,7 +591,7 @@ chat = AgentGroupChat( ### Resetting Chat Completion State -Regardless of whether _Agent Group Chat_ is invoked using the single-turn or multi-turn approach, the state of the _Agent Group Chat_ is updated to indicate it is _completed_ once the termination criteria is met. This ensures that the system recognizes when a conversation has fully concluded. To continue using an _Agent Group Chat_ instance after it has reached the _Completed_ state, this state must be reset to allow further interactions. Without resetting, additional interactions or agent responses will not be possible. +Regardless of whether `AgentGroupChat` is invoked using the single-turn or multi-turn approach, the state of the `AgentGroupChat` is updated to indicate it is _completed_ once the termination criteria is met. This ensures that the system recognizes when a conversation has fully concluded. To continue using an `AgentGroupChat` instance after it has reached the _Completed_ state, this state must be reset to allow further interactions. Without resetting, additional interactions or agent responses will not be possible. In the case of a multi-turn invocation that reaches the maximum turn limit, the system will cease agent invocation but will not mark the instance as _completed_. This allows for the possibility of extending the conversation without needing to reset the _Completion_ state. @@ -636,9 +632,9 @@ if chat.is_complete: ### Clear Full Conversation State -When done using an _Agent Chat_ where an [_OpenAI Assistant_](./assistant-agent.md) participated, it may be necessary to delete the remote _thread_ associated with the _assistant_. _Agent Chat_ supports resetting or clearing the entire conversation state, which includes deleting any remote _thread_ definition. This ensures that no residual conversation data remains linked to the assistant once the chat concludes. +When done using an `AgentChat` where an [`OpenAIAssistant`](./assistant-agent.md) participated, it may be necessary to delete the remote _thread_ associated with the _assistant_. `AgentChat` supports resetting or clearing the entire conversation state, which includes deleting any remote _thread_ definition. This ensures that no residual conversation data remains linked to the assistant once the chat concludes. -A full reset does not remove the _agents_ that had joined the _Agent Chat_ and leaves the _Agent Chat_ in a state where it can be reused. This allows for the continuation of interactions with the same agents without needing to reinitialize them, making future conversations more efficient. +A full reset does not remove the _agents_ that had joined the `AgentChat` and leaves the `AgentChat` in a state where it can be reused. This allows for the continuation of interactions with the same agents without needing to reinitialize them, making future conversations more efficient. ::: zone pivot="programming-language-csharp" ```csharp @@ -669,9 +665,9 @@ await chat.reset() ## How-To -For an end-to-end example for using _Agent Group Chat_ for _Agent_ collaboration, see: +For an end-to-end example for using `AgentGroupChat` for `Agent` collaboration, see: -- [How to Coordinate Agent Collaboration using _Agent Group Chat_](./examples/example-agent-collaboration.md) +- [How to Coordinate Agent Collaboration using `AgentGroupChat`](./examples/example-agent-collaboration.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/agent-functions.md b/semantic-kernel/Frameworks/agent/agent-functions.md index 3ada7e4f..879703b6 100644 --- a/semantic-kernel/Frameworks/agent/agent-functions.md +++ b/semantic-kernel/Frameworks/agent/agent-functions.md @@ -1,5 +1,5 @@ --- -title: Configuring Agents with Semantic Kernel Plugins. (Experimental) +title: Configuring Agents with Semantic Kernel Plugins. description: Describes how to use Semantic Kernal plugins and function calling with agents. zone_pivot_groups: programming-languages author: crickman @@ -10,14 +10,14 @@ ms.service: semantic-kernel --- # Configuring Agents with Semantic Kernel Plugins -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Functions and Plugins in Semantic Kernel -Function calling is a powerful tool that allows developers to add custom functionalities and expand the capabilities of AI applications. The _Semantic Kernel_ [Plugin](../../concepts/plugins/index.md) architecture offers a flexible framework to support [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md). For an _Agent_, integrating [Plugins](../../concepts/plugins/index.md) and [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is built on this foundational _Semantic Kernel_ feature. +Function calling is a powerful tool that allows developers to add custom functionalities and expand the capabilities of AI applications. The _Semantic Kernel_ [Plugin](../../concepts/plugins/index.md) architecture offers a flexible framework to support [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md). For an `Agent`, integrating [Plugins](../../concepts/plugins/index.md) and [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is built on this foundational _Semantic Kernel_ feature. -Once configured, an agent will choose when and how to call an available function, as it would in any usage outside of the _Agent Framework_. +Once configured, an agent will choose when and how to call an available function, as it would in any usage outside of the `Agent Framework`. ::: zone pivot="programming-language-csharp" @@ -46,11 +46,11 @@ Once configured, an agent will choose when and how to call an available function ## Adding Plugins to an Agent -Any [Plugin](../../concepts/plugins/index.md) available to an _Agent_ is managed within its respective _Kernel_ instance. This setup enables each _Agent_ to access distinct functionalities based on its specific role. +Any [Plugin](../../concepts/plugins/index.md) available to an `Agent` is managed within its respective `Kernel` instance. This setup enables each `Agent` to access distinct functionalities based on its specific role. -[Plugins](../../concepts/plugins/index.md) can be added to the _Kernel_ either before or after the _Agent_ is created. The process of initializing [Plugins](../../concepts/plugins/index.md) follows the same patterns used for any _Semantic Kernel_ implementation, allowing for consistency and ease of use in managing AI capabilities. +[Plugins](../../concepts/plugins/index.md) can be added to the `Kernel` either before or after the `Agent` is created. The process of initializing [Plugins](../../concepts/plugins/index.md) follows the same patterns used for any _Semantic Kernel_ implementation, allowing for consistency and ease of use in managing AI capabilities. -> Note: For a [_Chat Completion Agent_](./chat-completion-agent.md), the function calling mode must be explicitly enabled. [_OpenAI Assistant_](./assistant-agent.md) agent is always based on automatic function calling. +> Note: For a [`ChatCompletionAgent`](./chat-completion-agent.md), the function calling mode must be explicitly enabled. [`OpenAIAssistant`](./assistant-agent.md) agent is always based on automatic function calling. ::: zone pivot="programming-language-csharp" ```csharp @@ -85,18 +85,46 @@ ChatCompletionAgent CreateSpecificAgent(Kernel kernel, string credentials) ::: zone-end ::: zone pivot="programming-language-python" + +There are two ways to create a `ChatCompletionAgent` with plugins. + +#### Method 1: Specify Plugins via the Constructor + +You can directly pass a list of plugins to the constructor: + ```python +from semantic_kernel.agents import ChatCompletionAgent + +# Create the Chat Completion Agent instance by specifying a list of plugins +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + instructions="", + plugins=[SamplePlugin()] +) +``` + +> [!TIP] +> By default, auto-function calling is enabled. To disable it, set the `function_choice_behavior` argument to `function_choice_behavior=FunctionChoiceBehavior.Auto(auto_invoke=False)` in the constructor. With this setting, plugins are still broadcast to the model, but they are not automatically invoked. If execution settings specify the same `service_id` or `ai_model_id` as the AI service configuration, the function calling behavior defined in the execution settings (via `KernelArguments`) will take precedence over the function choice behavior set in the constructor. + +#### Method 2: Configure the Kernel Manually + +If no kernel is provided via the constructor, one is automatically created during model validation. Any plugins passed in take precedence and are added to the kernel. For more fine-grained control over the kernel's state, follow these steps: + +```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureChatPromptExecutionSettings +from semantic_kernel.functions import KernelFunctionFromPrompt +from semantic_kernel.kernel import Kernel + # Create the instance of the Kernel kernel = Kernel() -# Define the service ID -service_id = "" - # Add the chat completion service to the Kernel -kernel.add_service(AzureChatCompletion(service_id=service_id)) +kernel.add_service(AzureChatCompletion()) -# Get the AI Service settings for the specified service_id -settings = kernel.get_prompt_execution_settings_from_service_id(service_id=service_id) +# Get the AI Service settings +settings = kernel.get_prompt_execution_settings_from_service_id() # Configure the function choice behavior to auto invoke kernel functions settings.function_choice_behavior = FunctionChoiceBehavior.Auto() @@ -106,13 +134,16 @@ kernel.add_plugin(SamplePlugin(), plugin_name="") # Create the agent agent = ChatCompletionAgent( - service_id=service_id, kernel=kernel, name=, instructions=, arguments=KernelArguments(settings=settings), ) ``` + +> [!TIP] +> If a `service_id` is not specified when adding a service to the kernel, it defaults to `default`. When configuring multiple AI services on the kernel, it’s recommended to differentiate them using the `service_id` argument. This allows you to retrieve execution settings for a specific `service_id` and tie those settings to the desired service. + ::: zone-end ::: zone pivot="programming-language-java" @@ -160,27 +191,35 @@ ChatCompletionAgent CreateSpecificAgent(Kernel kernel) ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureChatPromptExecutionSettings +from semantic_kernel.functions import KernelFunctionFromPrompt +from semantic_kernel.kernel import Kernel + # Create the instance of the Kernel kernel = Kernel() -# Define the service ID -service_id = "" - # Add the chat completion service to the Kernel -kernel.add_service(AzureChatCompletion(service_id=service_id)) +kernel.add_service(AzureChatCompletion()) -# Get the AI Service settings for the specified service_id -settings = kernel.get_prompt_execution_settings_from_service_id(service_id=service_id) +# Create the AI Service settings +settings = AzureChatPromptExecutionSettings() # Configure the function choice behavior to auto invoke kernel functions settings.function_choice_behavior = FunctionChoiceBehavior.Auto() # Add the Plugin to the Kernel -kernel.add_plugin(SamplePlugin(), plugin_name="") +kernel.add_function( + plugin_name="", + function=KernelFunctionFromPrompt( + function_name="", + prompt="", + ) +) # Create the agent agent = ChatCompletionAgent( - service_id=service_id, kernel=kernel, name=, instructions=, @@ -198,14 +237,14 @@ agent = ChatCompletionAgent( ## Limitations for Agent Function Calling -When directly invoking a[_Chat Completion Agent_](./chat-completion-agent.md), all _Function Choice Behaviors_ are supported. However, when using an [_OpenAI Assistant_](./assistant-agent.md) or [_Agent Chat_](./agent-chat.md), only _Automatic_ [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is currently available. +When directly invoking a[`ChatCompletionAgent`](./chat-completion-agent.md), all _Function Choice Behaviors_ are supported. However, when using an [`OpenAIAssistant`](./assistant-agent.md) or [`AgentChat`](./agent-chat.md), only _Automatic_ [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is currently available. ## How-To For an end-to-end example for using function calling, see: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index eac08b71..0c0c340b 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -1,5 +1,5 @@ --- -title: How to Stream Agent Responses. (Experimental) +title: How to Stream Agent Responses. description: Describes how to utilize streamed responses for agents and agent chat. zone_pivot_groups: programming-languages author: crickman @@ -10,8 +10,8 @@ ms.service: semantic-kernel --- # How to Stream Agent Responses -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## What is a Streamed Response? @@ -54,11 +54,11 @@ A streamed response delivers the message content in small, incremental chunks. T ## Streaming Agent Invocation -The _Agent Framework_ supports _streamed_ responses when using [_Agent Chat_](./agent-chat.md) or when directly invoking a [_Chat Completion Agent_](./chat-completion-agent.md) or [_OpenAI Assistant Agent_](./assistant-agent.md). In either mode, the framework delivers responses asynchronously as they are streamed. Alongside the streamed response, a consistent, non-streamed history is maintained to track the conversation. This ensures both real-time interaction and a reliable record of the conversation's flow. +The `Agent Framework` supports _streamed_ responses when using [`AgentChat`](./agent-chat.md) or when directly invoking a [`ChatCompletionAgent`](./chat-completion-agent.md) or [`OpenAIAssistantAgent`](./assistant-agent.md). In either mode, the framework delivers responses asynchronously as they are streamed. Alongside the streamed response, a consistent, non-streamed history is maintained to track the conversation. This ensures both real-time interaction and a reliable record of the conversation's flow. -### Streamed response from _Chat Completion Agent_ +### Streamed response from `ChatCompletionAgent` -When invoking a streamed response from a [_Chat Completion Agent_](./chat-completion-agent.md), the _Chat History_ is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the _Chat History_ reflects fully formed responses for consistency. +When invoking a streamed response from a [`ChatCompletionAgent`](./chat-completion-agent.md), the `ChatHistory` is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the `ChatHistory` reflects fully formed responses for consistency. ::: zone pivot="programming-language-csharp" ```csharp @@ -104,9 +104,9 @@ async for response in agent.invoke_stream(chat) ::: zone-end -### Streamed response from _OpenAI Assistant Agent_ +### Streamed response from `OpenAIAssistantAgent` -When invoking a streamed response from an [_OpenAI Assistant Agent_](./assistant-agent.md), an optional _Chat History_ can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. +When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-agent.md), an optional `ChatHistory` can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. ::: zone pivot="programming-language-csharp" ```csharp @@ -139,7 +139,7 @@ agent = OpenAIAssistantAgent(...) thread_id = await agent.create_thread() # Add user message to the conversation -await agent.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content="")) +await agent.add_chat_message(message="") # Generate the streamed agent response(s) async for response in agent.invoke_stream(thread_id=thread_id): @@ -154,9 +154,9 @@ async for response in agent.invoke_stream(thread_id=thread_id): ::: zone-end -## Streaming with _Agent Chat_ +## Streaming with `AgentChat` -When using [_Agent Chat_](./agent-chat.md), the full conversation history is always preserved and can be accessed directly through the [_Agent Chat_](./agent-chat.md) instance. Therefore, the key difference between streamed and non-streamed invocations lies in the delivery method and the resulting content type. In both cases, users can still access the complete history, but streamed responses provide real-time updates as the conversation progresses. This allows for greater flexibility in handling interactions, depending on the application's needs. +When using [`AgentChat`](./agent-chat.md), the full conversation history is always preserved and can be accessed directly through the [`AgentChat`](./agent-chat.md) instance. Therefore, the key difference between streamed and non-streamed invocations lies in the delivery method and the resulting content type. In both cases, users can still access the complete history, but streamed responses provide real-time updates as the conversation progresses. This allows for greater flexibility in handling interactions, depending on the application's needs. ::: zone pivot="programming-language-csharp" ```csharp @@ -202,6 +202,8 @@ chat = AgentGroupChat( termination_strategy=DefaultTerminationStrategy(maximum_iterations=10), ) +await chat.add_chat_message("") + # Invoke agents last_agent = None async for response in chat.invoke_stream(): diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index e972ea8a..35c851d4 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -1,5 +1,5 @@ --- -title: Create an Agent from a Semantic Kernel Template (Experimental) +title: Create an Agent from a Semantic Kernel Template description: Describes how to use a Semantic Kernel template to define an agent. zone_pivot_groups: programming-languages author: crickman @@ -10,8 +10,8 @@ ms.service: semantic-kernel --- # Create an Agent from a Semantic Kernel Template -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Prompt Templates in Semantic Kernel @@ -95,7 +95,7 @@ agent = ChatCompletionAgent( #### OpenAI Assistant Agent -Templated instructions are especially powerful when working with an [_OpenAI Assistant Agent_](./assistant-agent.md). With this approach, a single assistant definition can be created and reused multiple times, each time with different parameter values tailored to specific tasks or contexts. This enables a more efficient setup, allowing the same assistant framework to handle a wide range of scenarios while maintaining consistency in its core behavior. +Templated instructions are especially powerful when working with an [`OpenAIAssistantAgent`](./assistant-agent.md). With this approach, a single assistant definition can be created and reused multiple times, each time with different parameter values tailored to specific tasks or contexts. This enables a more efficient setup, allowing the same assistant framework to handle a wide range of scenarios while maintaining consistency in its core behavior. ::: zone pivot="programming-language-csharp" ```csharp @@ -208,7 +208,7 @@ agent = ChatCompletionAgent( ### Overriding Template Values for Direct Invocation -When invoking an agent directly, without using [_Agent Chat_](./agent-chat.md), the agent's parameters can be overridden as needed. This allows for greater control and customization of the agent's behavior during specific tasks, enabling you to modify its instructions or settings on the fly to suit particular requirements. +When invoking an agent directly, without using [`AgentChat`](./agent-chat.md), the agent's parameters can be overridden as needed. This allows for greater control and customization of the agent's behavior during specific tasks, enabling you to modify its instructions or settings on the fly to suit particular requirements. ::: zone pivot="programming-language-csharp" ```csharp @@ -263,6 +263,12 @@ chat = ChatHistory() override_arguments = KernelArguments(topic="Cat", length="3") +# Two ways to get a response from the agent + +# Get the response which returns a ChatMessageContent directly +response = await agent.get_response(chat, arguments=override_arguments) + +# or use the invoke method to return an AsyncIterable of ChatMessageContent async for response in agent.invoke(chat, arguments=override_arguments): # process agent response(s)... ``` @@ -280,7 +286,7 @@ async for response in agent.invoke(chat, arguments=override_arguments): For an end-to-end example for creating an agent from a _pmompt-template_, see: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index e4d36c81..17ee3199 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -1,5 +1,5 @@ --- -title: Exploring the Semantic Kernel OpenAI Assistant Agent (Experimental) +title: Exploring the Semantic Kernel OpenAI Assistant Agent description: An exploration of the definition, behaviors, and usage patterns for a `OpenAIAssistantAgent` zone_pivot_groups: programming-languages author: crickman @@ -8,10 +8,10 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring the _Semantic Kernel_ _OpenAI Assistant Agent_ +# Exploring the _Semantic Kernel_ `OpenAIAssistantAgent` -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. Detailed API documentation related to this discussion is available at: @@ -24,7 +24,6 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`open_ai_assistant_base`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_base) - [`azure_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.azure_assistant_agent) - [`open_ai_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_agent) @@ -46,9 +45,9 @@ The _OpenAI Assistant API_ is a specialized interface designed for more advanced - [Assistant API in Azure](/azure/ai-services/openai/assistants-quickstart) -## Creating an _OpenAI Assistant Agent_ +## Creating an `OpenAIAssistantAgent` -Creating an _OpenAI Assistant_ requires invoking a remote service, which is handled asynchronously. To manage this, the _OpenAI Assistant Agent_ is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. +Creating an `OpenAIAssistant` requires invoking a remote service, which is handled asynchronously. To manage this, the `OpenAIAssistantAgent` is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. ::: zone pivot="programming-language-csharp" ```csharp @@ -66,20 +65,40 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python -azure_agent = await AzureAssistantAgent.create( - kernel=kernel, - service_id=service_id, - name="", - instructions="" +from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent + +# Set up the client and model using Azure OpenAI Resources +client, model = AzureAssistantAgent.setup_resources() + +# Define the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, ) # or -openai_agent = await OpenAIAssistantAgent.create( - kernel=kernel, - service_id=service_id, - name="", - instructions="" +# Set up the client and model using OpenAI Resources +client, model = OpenAIAssistantAgent.setup_resources() + +# Define the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the OpenAIAssistantAgent instance using the client and the assistant definition +agent = OpenAIAssistantAgent( + client=client, + definition=definition, ) ``` ::: zone-end @@ -91,9 +110,9 @@ openai_agent = await OpenAIAssistantAgent.create( ::: zone-end -## Retrieving an _OpenAI Assistant Agent_ +## Retrieving an `OpenAIAssistantAgent` -Once created, the identifier of the assistant may be access via its identifier. This identifier may be used to create an _OpenAI Assistant Agent_ from an existing assistant definition. +Once created, the identifier of the assistant may be access via its identifier. This identifier may be used to create an `OpenAIAssistantAgent` from an existing assistant definition. ::: zone pivot="programming-language-csharp" @@ -110,11 +129,29 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python -agent = await OpenAIAssistantAgent.retrieve(id=agent_id, kernel=kernel) +# Using Azure OpenAI Resources -# or +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + name="", + instructions="", +) -agent = await AzureAssistantAgent.retrieve(id=agent_id, kernel=kernel) +# Store the assistant ID +assistant_id = definition.id + +# Retrieve the assistant definition from the server based on the assistant ID +new_asst_definition = await client.beta.assistants.retrieve(assistant_id) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=new_asst_definition, +) ``` ::: zone-end @@ -125,9 +162,9 @@ agent = await AzureAssistantAgent.retrieve(id=agent_id, kernel=kernel) ::: zone-end -## Using an _OpenAI Assistant Agent_ +## Using an `OpenAIAssistantAgent` -As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your _OpenAI Assistant Agent_ are tied to this specific thread identifier which must be specified when calling the agent/ +As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your `OpenAIAssistantAgent` are tied to this specific thread identifier which must be specified when calling the agent/ ::: zone pivot="programming-language-csharp" ```csharp @@ -181,7 +218,7 @@ await agent.delete_thread(thread_id) ::: zone-end -## Deleting an _OpenAI Assistant Agent_ +## Deleting an `OpenAIAssistantAgent` Since the assistant's definition is stored remotely, it supports the capability to self-delete. This enables the agent to be removed from the system when it is no longer needed. @@ -217,12 +254,12 @@ is_deleted = agent._is_deleted ## How-To -For an end-to-end example for a _OpenAI Assistant Agent_, see: +For an end-to-end example for a `OpenAIAssistantAgent`, see: -- [How-To: _OpenAI Assistant Agent_ Code Interpreter](./examples/example-assistant-code.md) -- [How-To: _OpenAI Assistant Agent_ File Search](./examples/example-assistant-search.md) +- [How-To: `OpenAIAssistantAgent` Code Interpreter](./examples/example-assistant-code.md) +- [How-To: `OpenAIAssistantAgent` File Search](./examples/example-assistant-search.md) > [!div class="nextstepaction"] -> [Agent Collaboration in _Agent Chat_](./agent-chat.md) +> [Agent Collaboration in `AgentChat`](./agent-chat.md) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index b3e7a40d..d1044239 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -1,5 +1,5 @@ --- -title: Exploring the Semantic Kernel Chat Completion Agent (Experimental) +title: Exploring the Semantic Kernel Chat Completion Agent description: An exploration of the definition, behaviors, and usage patterns for a Chat Completion Agent zone_pivot_groups: programming-languages author: crickman @@ -10,8 +10,8 @@ ms.service: semantic-kernel --- # Exploring the _Semantic Kernel_ Chat Completion Agent -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. Detailed API documentation related to this discussion is available at: @@ -76,7 +76,7 @@ Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantic ## Creating a Chat Completion Agent -A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [_Kernel_](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [_Kernel_](../../concepts/kernel.md) instance. +A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. ::: zone pivot="programming-language-csharp" ```csharp @@ -104,11 +104,10 @@ ChatCompletionAgent agent = kernel = Kernel() # Add the AzureChatCompletion AI Service to the Kernel -kernel.add_service(AzureChatCompletion(service_id="")) +kernel.add_service(AzureChatCompletion()) # Create the agent agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="", instructions="", @@ -125,9 +124,9 @@ agent = ChatCompletionAgent( ## AI Service Selection -No different from using _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) directly, a _chat completion agent_ support the specification of a _service-selector_. A _service-selector_ indentifies which [AI service](../../concepts/ai-services/index.md) to target when the [_Kernel_](../../concepts/kernel.md) contains more than one. +No different from using _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a _service-selector_. A _service-selector_ indentifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. -> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no _service-selector_ is provided, the same _default_ logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the _Agent Framework_ +> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no _service-selector_ is provided, the same _default_ logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` ::: zone pivot="programming-language-csharp" ```csharp @@ -157,18 +156,26 @@ ChatCompletionAgent agent = ::: zone pivot="programming-language-python" ```python +from semantic_kernel.connectors.ai.open_ai import ( + AzureChatCompletion, + AzureChatPromptExecutionSettings, +) + # Define the Kernel kernel = Kernel() # Add the AzureChatCompletion AI Service to the Kernel -kernel.add_service(AzureChatCompletion(service_id="")) +kernel.add_service(AzureChatCompletion(service_id="service1")) +kernel.add_service(AzureChatCompletion(service_id="service2")) + +settings = AzureChatPromptExecutionSettings(service_id="service2") # Create the agent agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="", instructions="", + arguments=KernelArguments(settings=settings) ) ``` ::: zone-end @@ -179,11 +186,12 @@ agent = ChatCompletionAgent( ::: zone-end -## Conversing with _Chat Completion Agent_ - -Conversing with your _Chat Completion Agent_ is based on a _Chat History_ instance, no different from interacting with a _Chat Completion_ [AI service](../../concepts/ai-services/index.md). +## Conversing with `ChatCompletionAgent` ::: zone pivot="programming-language-csharp" + +Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a _Chat Completion_ [AI service](../../concepts/ai-services/index.md). + ```csharp // Define agent ChatCompletionAgent agent = ...; @@ -203,6 +211,11 @@ await foreach (ChatMessageContent response in agent.InvokeAsync(chat)) ::: zone-end ::: zone pivot="programming-language-python" + +There are multiple ways to converse with a `ChatCompletionAgent`. + +The easiest is to call and await `get_response`: + ```python # Define agent agent = ChatCompletionAgent(...) @@ -213,10 +226,44 @@ chat = ChatHistory() # Add the user message chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) +# Generate the agent response +response = await agent.get_response(chat) +# response is a `ChatMessageContent` object +``` +Otherwise, calling the `invoke` method returns an `AsyncIterable` of `ChatMessageContent`. + +```python +# Define agent +agent = ChatCompletionAgent(...) + +# Define the chat history +chat = ChatHistory() + +# Add the user message +chat.add_user_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + # Generate the agent response(s) async for response in agent.invoke(chat): # process agent response(s) ``` + +The `ChatCompletionAgent` also supports streaming in which the `invoke_stream` method returns an `AsyncIterable` of `StreamingChatMessageContent`: + +```python +# Define agent +agent = ChatCompletionAgent(...) + +# Define the chat history +chat = ChatHistory() + +# Add the user message +chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + +# Generate the agent response(s) +async for response in agent.invoke_stream(chat): + # process agent response(s) +``` + ::: zone-end ::: zone pivot="programming-language-java" @@ -228,10 +275,10 @@ async for response in agent.invoke(chat): #### How-To: -For an end-to-end example for a _Chat Completion Agent_, see: +For an end-to-end example for a `ChatCompletionAgent`, see: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) > [!div class="nextstepaction"] -> [Exploring _OpenAI Assistant Agent_](./assistant-agent.md) +> [Exploring `OpenAIAssistantAgent`](./assistant-agent.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index fc11bf27..d557c7fc 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -10,12 +10,12 @@ ms.service: semantic-kernel --- # How-To: Coordinate Agent Collaboration using Agent Group Chat -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. ## Overview -In this sample, we will explore how to use _Agent Group Chat_ to coordinate collboration of two different agents working to review and rewrite user provided content. Each agent is assigned a distinct role: +In this sample, we will explore how to use `AgentGroupChat` to coordinate collboration of two different agents working to review and rewrite user provided content. Each agent is assigned a distinct role: - **Reviewer**: Reviews and provides direction to _Writer_. - **Writer**: Updates user content based on _Reviewer_ input. @@ -61,7 +61,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -81,24 +81,21 @@ Start by installing the Semantic Kernel Python package. pip install semantic-kernel ``` +Next add the required imports. + ```python import asyncio import os -import copy +from semantic_kernel import Kernel from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent -from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( +from semantic_kernel.agents.strategies import ( KernelFunctionSelectionStrategy, -) -from semantic_kernel.agents.strategies.termination.kernel_function_termination_strategy import ( KernelFunctionTerminationStrategy, ) -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_decorator import kernel_function -from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt -from semantic_kernel.kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistoryTruncationReducer +from semantic_kernel.functions import KernelFunctionFromPrompt ``` ::: zone-end @@ -108,13 +105,12 @@ from semantic_kernel.kernel import Kernel ::: zone-end - ## Configuration -This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. - ::: zone pivot="programming-language-csharp" +This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. + ```powershell # OpenAI dotnet user-secrets set "OpenAISettings:ApiKey" "" @@ -173,7 +169,7 @@ public class Settings ::: zone-end ::: zone pivot="programming-language-python" -The quickest way to get started with the proper configuration to run the sample code is to create a `.env` file at the root of your project (where your script is run). +The quickest way to get started with the proper configuration to run the sample code is to create a `.env` file at the root of your project (where your script is run). The sample requires that you have Azure OpenAI or OpenAI resources available. Configure the following settings in your `.env` file for either Azure OpenAI or OpenAI: @@ -197,21 +193,20 @@ Once configured, the respective AI service classes will pick up the required var ::: zone-end - ## Coding The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [_Agent_ Definition](#agent-definition) - Create the two _Chat Completion Agent_ instances (_Reviewer_ and _Writer_). -3. [_Chat_ Definition](#chat-definition) - Create the _Agent Group Chat_ and associated strategies. +2. [`Agent` Definition](#agent-definition) - Create the two `ChatCompletionAgent` instances (_Reviewer_ and _Writer_). +3. [_Chat_ Definition](#chat-definition) - Create the `AgentGroupChat` and associated strategies. 4. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating any _Chat Completion Agent_, the configuration settings, plugins, and _Kernel_ must be initialized. +Prior to creating any `ChatCompletionAgent`, the configuration settings, plugins, and `Kernel` must be initialized. ::: zone pivot="programming-language-csharp" @@ -252,14 +247,12 @@ kernel = Kernel() ::: zone-end ::: zone pivot="programming-language-java" - > Agents are currently unavailable in Java. - ::: zone-end -Let's also create a second _Kernel_ instance via _cloning_ and add a plug-in that will allow the reivew to place updated content on the clip-board. - ::: zone pivot="programming-language-csharp" +Let's also create a second `Kernel` instance via _cloning_ and add a plug-in that will allow the reivew to place updated content on the clip-board. + ```csharp Kernel toolKernel = kernel.Clone(); toolKernel.Plugins.AddFromType(); @@ -313,7 +306,7 @@ private sealed class ClipboardAccess ### Agent Definition ::: zone pivot="programming-language-csharp" -Let's declare the agent names as `const` so they might be referenced in _Agent Group Chat_ strategies: +Let's declare the agent names as `const` so they might be referenced in `AgentGroupChat` strategies: ```csharp const string ReviewerName = "Reviewer"; @@ -374,7 +367,6 @@ ChatCompletionAgent agentReviewer = ::: zone pivot="programming-language-python" ```python agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions=""" @@ -425,7 +417,6 @@ ChatCompletionAgent agentWriter = The _Writer_ agent is similiar. It is given a single-purpose task, follow direction and rewrite the content. ```python agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions=""" @@ -446,9 +437,9 @@ Your sole responsibility is to rewrite content according to review suggestions. ### Chat Definition -Defining the _Agent Group Chat_ requires considering the strategies for selecting the _Agent_ turn and determining when to exit the _Chat_ loop. For both of these considerations, we will define a _Kernel Prompt Function_. +Defining the `AgentGroupChat` requires considering the strategies for selecting the `Agent` turn and determining when to exit the _Chat_ loop. For both of these considerations, we will define a _Kernel Prompt Function_. -The first to reason over _Agent_ selection: +The first to reason over `Agent` selection: ::: zone pivot="programming-language-csharp" @@ -481,8 +472,8 @@ KernelFunction selectionFunction = ::: zone pivot="programming-language-python" ```python selection_function = KernelFunctionFromPrompt( - function_name="selection", - prompt=f""" + function_name="selection", + prompt=f""" Examine the provided RESPONSE and choose the next participant. State only the name of the chosen participant without explanation. Never choose the participant named in the RESPONSE. @@ -499,7 +490,7 @@ Rules: RESPONSE: {{{{$lastmessage}}}} """ - ) +) ``` ::: zone-end @@ -532,11 +523,11 @@ KernelFunction terminationFunction = ::: zone pivot="programming-language-python" ```python - termination_keyword = "yes" +termination_keyword = "yes" - termination_function = KernelFunctionFromPrompt( - function_name="termination", - prompt=f""" +termination_function = KernelFunctionFromPrompt( + function_name="termination", + prompt=f""" Examine the RESPONSE and determine whether the content has been deemed satisfactory. If the content is satisfactory, respond with a single word without explanation: {termination_keyword}. If specific suggestions are being provided, it is not satisfactory. @@ -545,7 +536,7 @@ If no correction is suggested, it is satisfactory. RESPONSE: {{{{$lastmessage}}}} """ - ) +) ``` ::: zone-end @@ -575,7 +566,7 @@ history_reducer = ChatHistoryTruncationReducer(target_count=1) ::: zone-end -Finally we are ready to bring everything together in our _Agent Group Chat_ definition. +Finally we are ready to bring everything together in our `AgentGroupChat` definition. ::: zone pivot="programming-language-csharp" @@ -668,9 +659,9 @@ The `lastmessage` `history_variable_name` corresponds with the `KernelFunctionSe ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent Group Chat_. Start by creating creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `AgentGroupChat`. Start by creating creating an empty loop. -> Note: Unlike the other examples, no external history or _thread_ is managed. _Agent Group Chat_ manages the conversation history internally. +> Note: Unlike the other examples, no external history or _thread_ is managed. `AgentGroupChat` manages the conversation history internally. ::: zone pivot="programming-language-csharp" ```csharp @@ -700,9 +691,9 @@ while not is_complete: Now let's capture user input within the previous loop. In this case: - Empty input will be ignored - The term `EXIT` will signal that the conversation is completed -- The term `RESET` will clear the _Agent Group Chat_ history +- The term `RESET` will clear the `AgentGroupChat` history - Any term starting with `@` will be treated as a file-path whose content will be provided as input -- Valid input will be added to the _Agent Group Chat_ as a _User_ message. +- Valid input will be added to the `AgentGroupChat` as a _User_ message. ```csharp Console.WriteLine(); @@ -753,9 +744,9 @@ chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, input)); Now let's capture user input within the previous loop. In this case: - Empty input will be ignored. - The term `exit` will signal that the conversation is complete. -- The term `reset` will clear the _Agent Group Chat_ history. +- The term `reset` will clear the `AgentGroupChat` history. - Any term starting with `@` will be treated as a file-path whose content will be provided as input. -- Valid input will be added to the _Agent Group Chat_ as a _User_ message. +- Valid input will be added to the `AgentGroupChat` as a _User_ message. The operation logic inside the while loop looks like: @@ -790,7 +781,7 @@ if user_input.startswith("@") and len(user_input) > 1: continue # Add the current user_input to the chat -await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) +await chat.add_chat_message(message=user_input) ``` ::: zone-end @@ -800,7 +791,7 @@ await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=use ::: zone-end -To initate the _Agent_ collaboration in response to user input and display the _Agent_ responses, invoke the _Agent Group Chat_; however, first be sure to reset the _Completion_ state from any prior invocation. +To initate the `Agent` collaboration in response to user input and display the `Agent` responses, invoke the `AgentGroupChat`; however, first be sure to reset the _Completion_ state from any prior invocation. > Note: Service failures are being caught and displayed to avoid crashing the conversation loop. @@ -853,7 +844,6 @@ chat.is_complete = False ::: zone-end - ## Final ::: zone pivot="programming-language-csharp" @@ -1148,24 +1138,20 @@ import os from semantic_kernel import Kernel from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent -from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( +from semantic_kernel.agents.strategies import ( KernelFunctionSelectionStrategy, -) -from semantic_kernel.agents.strategies.termination.kernel_function_termination_strategy import ( KernelFunctionTerminationStrategy, ) -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.history_reducer.chat_history_truncation_reducer import ChatHistoryTruncationReducer -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt - -################################################################### -# The following sample demonstrates how to create a simple, # -# agent group chat that utilizes a Reviewer Chat Completion # -# Agent along with a Writer Chat Completion Agent to # -# complete a user's task. # -################################################################### +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistoryTruncationReducer +from semantic_kernel.functions import KernelFunctionFromPrompt + +""" +The following sample demonstrates how to create a simple, +agent group chat that utilizes a Reviewer Chat Completion +Agent along with a Writer Chat Completion Agent to +complete a user's task. +""" # Define agent names REVIEWER_NAME = "Reviewer" @@ -1185,7 +1171,6 @@ async def main(): # Create ChatCompletionAgents using the same kernel. agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions=""" @@ -1202,7 +1187,6 @@ RULES: ) agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions=""" @@ -1312,7 +1296,7 @@ RESPONSE: continue # Add the current user_input to the chat - await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) + await chat.add_chat_message(message=user_input) try: async for response in chat.invoke(): diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index f8fab56a..be6fdc4d 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -1,5 +1,5 @@ --- -title: How-To: _OpenAI Assistant Agent_ Code Interpreter (Experimental) +title: How-To: `OpenAIAssistantAgent` Code Interpreter description: A step-by-step walk-through of defining and utilizing the code-interpreter tool of an OpenAI Assistant Agent. zone_pivot_groups: programming-languages author: crickman @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _OpenAI Assistant Agent_ Code Interpreter +# How-To: `OpenAIAssistantAgent` Code Interpreter -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Overview -In this sample, we will explore how to use the _code-interpreter_ tool of an [_OpenAI Assistant Agent_](../assistant-agent.md) to complete data-analysis tasks. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will generate both image and text responses. This will demonstrate the versatility of this tool in performing quantitative analysis. +In this sample, we will explore how to use the _code-interpreter_ tool of an [`OpenAIAssistantAgent`](../assistant-agent.md) to complete data-analysis tasks. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will generate both image and text responses. This will demonstrate the versatility of this tool in performing quantitative analysis. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -56,7 +56,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -79,29 +79,27 @@ Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` da ::: zone-end ::: zone pivot="programming-language-python" + Start by creating a folder that will hold your script (`.py` file) and the sample resources. Include the following imports at the top of your `.py` file: ```python import asyncio import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_file_reference_content import StreamingFileReferenceContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingFileReferenceContent ``` Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` data files from the [_Semantic Kernel_ `learn_resources/resources` directory](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files to your working directory. ::: zone-end + ::: zone pivot="programming-language-java" > Agents are currently unavailable in Java. ::: zone-end - ## Configuration This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. @@ -164,6 +162,7 @@ public class Settings } ``` ::: zone-end + ::: zone pivot="programming-language-python" The quickest way to get started with the proper configuration to run the sample code is to create a `.env` file at the root of your project (where your script is run). @@ -198,17 +197,17 @@ Once configured, the respective AI service classes will pick up the required var The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [Agent Definition](#agent-definition) - Create the _OpenAI_Assistant_Agent_ with templatized instructions and plug-in. +2. [Agent Definition](#agent-definition) - Create the _OpenAI_Assistant`Agent` with templatized instructions and plug-in. 3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating an _OpenAI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. - ::: zone pivot="programming-language-csharp" +Prior to creating an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. + Instantiate the `Settings` class referenced in the previous [Configuration](#configuration) section. Use the settings to also create an `OpenAIClientProvider` that will be used for the [Agent Definition](#agent-definition) as well as file-upload. ```csharp @@ -239,11 +238,13 @@ OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByC ::: zone pivot="programming-language-python" +Prior to creating an `AzureAssistantAgent` or an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. + > [!TIP] > You may need to adjust the file paths depending upon where your files are located. ```python -# Let's form the file paths that we will later pass to the assistant +# Let's form the file paths that we will use as part of file upload csv_file_path_1 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", @@ -256,7 +257,39 @@ csv_file_path_2 = os.path.join( "PopulationByCountry.csv", ) ``` -You may need to modify the path creation code based on the storage location of your CSV files. + +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Upload the files to the client +file_ids: list[str] = [] +for path in [csv_file_path_1, csv_file_path_2]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + +# Get the code interpreter tool and resources +code_interpreter_tools, code_interpreter_tool_resources = AzureAssistantAgent.configure_code_interpreter_tool( + file_ids=file_ids +) + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions=""" + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + name="SampleAssistantAgent", + tools=code_interpreter_tools, + tool_resources=code_interpreter_tool_resources, +) +``` + +We first set up the Azure OpenAI resources to obtain the client and model. Next, we upload the CSV files from the specified paths using the client's Files API. We then configure the `code_interpreter_tool` using the uploaded file IDs, which are linked to the assistant upon creation along with the model, instructions, and name. ::: zone-end @@ -270,7 +303,7 @@ You may need to modify the path creation code based on the storage location of y ::: zone pivot="programming-language-csharp" -We are now ready to instantiate an _OpenAI Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. +We are now ready to instantiate an `OpenAIAssistantAgent`. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. ```csharp Console.WriteLine("Defining agent..."); @@ -296,21 +329,13 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" -We are now ready to instantiate an _Azure Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. +We are now ready to instantiate an `AzureAssistantAgent`. The agent is configured with the client and the assistant definition. ```python -agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", - instructions=""" - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - enable_code_interpreter=True, - code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], +# Create the agent using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, ) ``` ::: zone-end @@ -323,7 +348,7 @@ agent = await AzureAssistantAgent.create( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent_. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. Let's also ensure the resources are removed at the end of execution to minimize unnecessary charges. @@ -369,11 +394,10 @@ try: while not is_complete: # agent interaction logic here finally: - print("Cleaning up resources...") - if agent is not None: - [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] - await agent.delete_thread(thread_id) - await agent.delete() + print("\nCleaning up resources...") + [await client.files.delete(file_id) for file_id in file_ids] + await client.beta.threads.delete(thread.id) + await client.beta.assistants.delete(agent.id) ``` ::: zone-end @@ -426,7 +450,7 @@ await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(rol ::: zone-end -Before invoking the _Agent_ response, let's add some helper methods to download any files that may be produced by the _Agent_. +Before invoking the `Agent` response, let's add some helper methods to download any files that may be produced by the `Agent`. ::: zone pivot="programming-language-csharp" Here we're place file content in the system defined temporary directory and then launching the system defined viewer application. @@ -513,7 +537,7 @@ async def download_response_image(agent, file_ids: list[str]): ::: zone-end -To generate an _Agent_ response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any generated _File References_ for download and review at the end of the response cycle. It's important to note that generated code is identified by the presence of a _Metadata_ key in the response message, distinguishing it from the conversational reply. +To generate an `Agent` response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any generated _File References_ for download and review at the end of the response cycle. It's important to note that generated code is identified by the presence of a _Metadata_ key in the response message, distinguishing it from the conversational reply. ::: zone pivot="programming-language-csharp" ```csharp @@ -743,19 +767,16 @@ import asyncio import logging import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_file_reference_content import StreamingFileReferenceContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingFileReferenceContent logging.basicConfig(level=logging.ERROR) -################################################################### -# The following sample demonstrates how to create a simple, # -# OpenAI assistant agent that utilizes the code interpreter # -# to analyze uploaded files. # -################################################################### +""" +The following sample demonstrates how to create a simple, +OpenAI assistant agent that utilizes the code interpreter +to analyze uploaded files. +""" # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( @@ -802,22 +823,43 @@ async def download_response_image(agent: AzureAssistantAgent, file_ids: list[str async def main(): - agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", + # Create the client using Azure OpenAI resources and configuration + client, model = AzureAssistantAgent.setup_resources() + + # Upload the files to the client + file_ids: list[str] = [] + for path in [csv_file_path_1, csv_file_path_2]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + + # Get the code interpreter tool and resources + code_interpreter_tools, code_interpreter_tool_resources = AzureAssistantAgent.configure_code_interpreter_tool( + file_ids=file_ids + ) + + # Create the assistant definition + definition = await client.beta.assistants.create( + model=model, instructions=""" - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - enable_code_interpreter=True, - code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + name="SampleAssistantAgent", + tools=code_interpreter_tools, + tool_resources=code_interpreter_tool_resources, + ) + + # Create the agent using the client and the assistant definition + agent = AzureAssistantAgent( + client=client, + definition=definition, ) print("Creating thread...") - thread_id = await agent.create_thread() + thread = await client.beta.threads.create() try: is_complete: bool = False @@ -829,14 +871,13 @@ async def main(): if user_input.lower() == "exit": is_complete = True + break - await agent.add_chat_message( - thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) - ) + await agent.add_chat_message(thread_id=thread.id, message=user_input) is_code = False last_role = None - async for response in agent.invoke_stream(thread_id=thread_id): + async for response in agent.invoke_stream(thread_id=thread.id): current_is_code = response.metadata.get("code", False) if current_is_code: @@ -858,16 +899,16 @@ async def main(): ]) if is_code: print("```\n") + print() await download_response_image(agent, file_ids) file_ids.clear() finally: print("\nCleaning up resources...") - if agent is not None: - [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] - await agent.delete_thread(thread_id) - await agent.delete() + [await client.files.delete(file_id) for file_id in file_ids] + await client.beta.threads.delete(thread.id) + await client.beta.assistants.delete(agent.id) if __name__ == "__main__": @@ -885,5 +926,5 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How-To: _OpenAI Assistant Agent_ Code File Search](./example-assistant-search.md) +> [How-To: `OpenAIAssistantAgent` Code File Search](./example-assistant-search.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index e05c069c..1aad1696 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -1,5 +1,5 @@ --- -title: How-To: _OpenAI Assistant Agent_ File Search (Experimental) +title: How-To: `OpenAIAssistantAgent` File Search description: A step-by-step walk-through of defining and utilizing the file-search tool of an OpenAI Assistant Agent. zone_pivot_groups: programming-languages author: crickman @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _OpenAI Assistant Agent_ File Search +# How-To: `OpenAIAssistantAgent` File Search -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Overview -In this sample, we will explore how to use the _file-search_ tool of an [_OpenAI Assistant Agent_](../assistant-agent.md) to complete comprehension tasks. The approach will be step-by-step, ensuring clarity and precision throughout the process. As part of the task, the agent will provide document citations within the response. +In this sample, we will explore how to use the _file-search_ tool of an [`OpenAIAssistantAgent`](../assistant-agent.md) to complete comprehension tasks. The approach will be step-by-step, ensuring clarity and precision throughout the process. As part of the task, the agent will provide document citations within the response. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -54,7 +54,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -86,11 +86,8 @@ Start by creating a folder that will hold your script (`.py` file) and the sampl import asyncio import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_annotation_content import StreamingAnnotationContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingAnnotationContent ``` Additionally, copy the `Grimms-The-King-of-the-Golden-Mountain.txt`, `Grimms-The-Water-of-Life.txt` and `Grimms-The-White-Snake.txt` public domain content from [_Semantic Kernel_ `LearnResources` Project](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files in your project folder. @@ -201,14 +198,14 @@ Once configured, the respective AI service classes will pick up the required var The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [Agent Definition](#agent-definition) - Create the _Chat_Completion_Agent_ with templatized instructions and plug-in. +2. [Agent Definition](#agent-definition) - Create the _Chat_Completion`Agent` with templatized instructions and plug-in. 3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating an _OpenAI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. +Prior to creating an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. ::: zone pivot="programming-language-csharp" @@ -225,6 +222,16 @@ OpenAIClientProvider clientProvider = ``` ::: zone-end + +::: zone pivot="programming-language-python" +The class method `setup_resources()` on the Assistant Agent handles creating the client and returning it and the model to use based on the desired configuration. Pydantic settings are used to load environment variables first from environment variables or from the `.env` file. One may pass in the `api_key`, `api_version`, `deployment_name` or `endpoint`, which will take precedence over any environment variables configured. + +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() +``` +::: zone-end + ::: zone pivot="programming-language-java" > Agents are currently unavailable in Java. @@ -248,8 +255,21 @@ string storeId = operation.VectorStoreId; ::: zone pivot="programming-language-python" ```python def get_filepath_for_filename(filename: str) -> str: - base_directory = os.path.dirname(os.path.realpath(__file__)) + base_directory = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) return os.path.join(base_directory, filename) + +# Upload the files to the client +file_ids: list[str] = [] +for path in [get_filepath_for_filename(filename) for filename in filenames]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + +# Get the file search tool and resources +file_search_tools, file_search_tool_resources = AzureAssistantAgent.configure_file_search_tool(file_ids=file_ids) ``` ::: zone-end @@ -315,7 +335,7 @@ foreach (string fileName in _fileNames) ### Agent Definition -We are now ready to instantiate an _OpenAI Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _File Search_ tool enabled. Additionally, we explicitly associate the _Vector Store_ with the _File Search_ tool. +We are now ready to instantiate an `OpenAIAssistantAgent`. The agent is configured with its target model, _Instructions_, and the _File Search_ tool enabled. Additionally, we explicitly associate the _Vector Store_ with the _File Search_ tool. ::: zone pivot="programming-language-csharp" @@ -345,18 +365,24 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python -agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, instructions=""" The document store contains the text of fictional stories. Always analyze the document store to provide an answer to the user's question. Never rely on your knowledge of stories not included in the document store. Always format response using markdown. """, - enable_file_search=True, - vector_store_filenames=[get_filepath_for_filename(filename) for filename in filenames], + name="SampleAssistantAgent", + tools=file_search_tools, + tool_resources=file_search_tool_resources, +) + +# Create the agent using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, ) ``` ::: zone-end @@ -369,7 +395,7 @@ agent = await AzureAssistantAgent.create( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent_. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. Let's also ensure the resources are removed at the end of execution to minimize unnecessary charges. @@ -458,6 +484,7 @@ if not user_input: if user_input.lower() == "exit": is_complete = True + break await agent.add_chat_message( thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) @@ -471,7 +498,7 @@ await agent.add_chat_message( ::: zone-end -Before invoking the _Agent_ response, let's add a helper method to reformat the unicode annotation brackets to ANSI brackets. +Before invoking the `Agent` response, let's add a helper method to reformat the unicode annotation brackets to ANSI brackets. ::: zone pivot="programming-language-csharp" ```csharp @@ -492,7 +519,7 @@ private static string ReplaceUnicodeBrackets(this string content) => ::: zone-end -To generate an _Agent_ response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any associated _Citation Annotations_ for display at the end of the response cycle. Note each streamed chunk is being reformatted using the previous helper method. +To generate an `Agent` response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any associated _Citation Annotations_ for display at the end of the response cycle. Note each streamed chunk is being reformatted using the previous helper method. ::: zone pivot="programming-language-csharp" ```csharp @@ -702,18 +729,26 @@ public static class Program ::: zone pivot="programming-language-python" ```python +# Copyright (c) Microsoft. All rights reserved. + import asyncio import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_annotation_content import StreamingAnnotationContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingAnnotationContent + +""" +The following sample demonstrates how to create a simple, +OpenAI assistant agent that utilizes the vector store +to answer questions based on the uploaded documents. +""" def get_filepath_for_filename(filename: str) -> str: - base_directory = os.path.dirname(os.path.realpath(__file__)) + base_directory = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) return os.path.join(base_directory, filename) @@ -725,22 +760,48 @@ filenames = [ async def main(): - agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", + # Create the client using Azure OpenAI resources and configuration + client, model = AzureAssistantAgent.setup_resources() + + # Upload the files to the client + file_ids: list[str] = [] + for path in [get_filepath_for_filename(filename) for filename in filenames]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + + vector_store = await client.beta.vector_stores.create( + name="assistant_search", + file_ids=file_ids, + ) + + # Get the file search tool and resources + file_search_tools, file_search_tool_resources = AzureAssistantAgent.configure_file_search_tool( + vector_store_ids=vector_store.id + ) + + # Create the assistant definition + definition = await client.beta.assistants.create( + model=model, instructions=""" The document store contains the text of fictional stories. Always analyze the document store to provide an answer to the user's question. Never rely on your knowledge of stories not included in the document store. Always format response using markdown. """, - enable_file_search=True, - vector_store_filenames=[get_filepath_for_filename(filename) for filename in filenames], + name="SampleAssistantAgent", + tools=file_search_tools, + tool_resources=file_search_tool_resources, + ) + + # Create the agent using the client and the assistant definition + agent = AzureAssistantAgent( + client=client, + definition=definition, ) print("Creating thread...") - thread_id = await agent.create_thread() + thread = await client.beta.threads.create() try: is_complete: bool = False @@ -753,12 +814,10 @@ async def main(): is_complete = True break - await agent.add_chat_message( - thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) - ) + await agent.add_chat_message(thread_id=thread.id, message=user_input) footnotes: list[StreamingAnnotationContent] = [] - async for response in agent.invoke_stream(thread_id=thread_id): + async for response in agent.invoke_stream(thread_id=thread.id): footnotes.extend([item for item in response.items if isinstance(item, StreamingAnnotationContent)]) print(f"{response.content}", end="", flush=True) @@ -773,11 +832,10 @@ async def main(): ) finally: - print("Cleaning up resources...") - if agent is not None: - [await agent.delete_file(file_id) for file_id in agent.file_search_file_ids] - await agent.delete_thread(thread_id) - await agent.delete() + print("\nCleaning up resources...") + [await client.files.delete(file_id) for file_id in file_ids] + await client.beta.threads.delete(thread.id) + await client.beta.assistants.delete(agent.id) if __name__ == "__main__": @@ -795,5 +853,5 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How to Coordinate Agent Collaboration using _Agent Group Chat_](./example-agent-collaboration.md) +> [How to Coordinate Agent Collaboration using `AgentGroupChat`](./example-agent-collaboration.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index ced3bb01..d24a85f8 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -1,5 +1,5 @@ --- -title: How-To: _Chat Completion Agent_ (Experimental) +title: How-To: `ChatCompletionAgent` description: A step-by-step walk-through of defining and utilizing the features of a Chat Completion Agent. zone_pivot_groups: programming-languages author: crickman @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _Chat Completion Agent_ +# How-To: `ChatCompletionAgent` -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. ## Overview -In this sample, we will explore configuring a plugin to access _GitHub_ API and provide templatized instructions to a [_Chat Completion Agent_](../chat-completion-agent.md) to answer questions about a _GitHub_ repository. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will provide document citations within the response. +In this sample, we will explore configuring a plugin to access _GitHub_ API and provide templatized instructions to a [`ChatCompletionAgent`](../chat-completion-agent.md) to answer questions about a _GitHub_ repository. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will provide document citations within the response. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -55,7 +55,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -76,13 +76,11 @@ import sys from datetime import datetime from semantic_kernel.agents import ChatCompletionAgent -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents import AuthorRole, ChatHistory, ChatMessageContent +from semantic_kernel.functions import KernelArguments from semantic_kernel.kernel import Kernel -from semantic_kernel.functions.kernel_arguments import KernelArguments # Adjust the sys.path so we can use the GitHubPlugin and GitHubSettings classes # This is so we can run the code from the samples/learn_resources/agent_docs directory @@ -201,14 +199,14 @@ Once configured, the respective AI service classes will pick up the required var The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [_Agent_ Definition](#agent-definition) - Create the _Chat Completion Agent_ with templatized instructions and plug-in. +2. [`Agent` Definition](#agent-definition) - Create the `ChatCompletionAgent` with templatized instructions and plug-in. 3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating a _Chat Completion Agent_, the configuration settings, plugins, and _Kernel_ must be initialized. +Prior to creating a `ChatCompletionAgent`, the configuration settings, plugins, and `Kernel` must be initialized. ::: zone pivot="programming-language-csharp" @@ -293,7 +291,7 @@ settings.function_choice_behavior = FunctionChoiceBehavior.Auto() ### Agent Definition -Finally we are ready to instantiate a _Chat Completion Agent_ with its _Instructions_, associated _Kernel_, and the default _Arguments_ and _Execution Settings_. In this case, we desire to have the any plugin functions automatically executed. +Finally we are ready to instantiate a `ChatCompletionAgent` with its _Instructions_, associated `Kernel`, and the default _Arguments_ and _Execution Settings_. In this case, we desire to have the any plugin functions automatically executed. ::: zone pivot="programming-language-csharp" ```csharp @@ -328,7 +326,6 @@ Console.WriteLine("Ready!"); ::: zone pivot="programming-language-python" ```python agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="SampleAssistantAgent", instructions=f""" @@ -358,7 +355,7 @@ agent = ChatCompletionAgent( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent_. Start by creating a _Chat History_ object to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating a `ChatHistory` object to maintain the conversation state and creating an empty loop. ::: zone pivot="programming-language-csharp" ```csharp @@ -386,7 +383,7 @@ while not is_complete: ::: zone-end -Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the _Chat History_ as a _User_ message. +Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the `ChatHistory` as a _User_ message. ::: zone pivot="programming-language-csharp" ```csharp @@ -429,9 +426,9 @@ history.add_message(ChatMessageContent(role=AuthorRole.USER, content=user_input) ::: zone-end -To generate a _Agent_ response to user input, invoke the agent using _Arguments_ to provide the final template parameter that specifies the current date and time. +To generate a `Agent` response to user input, invoke the agent using _Arguments_ to provide the final template parameter that specifies the current date and time. -The _Agent_ response is then then displayed to the user. +The `Agent` response is then then displayed to the user. ::: zone pivot="programming-language-csharp" ```csharp @@ -628,7 +625,6 @@ async def main(): # Create the agent agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="SampleAssistantAgent", instructions=f""" @@ -682,6 +678,6 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How-To: _OpenAI Assistant Agent_ Code Interpreter](./example-assistant-code.md) +> [How-To: `OpenAIAssistantAgent` Code Interpreter](./example-assistant-code.md) diff --git a/semantic-kernel/Frameworks/agent/index.md b/semantic-kernel/Frameworks/agent/index.md index 7d8299ea..67a76125 100644 --- a/semantic-kernel/Frameworks/agent/index.md +++ b/semantic-kernel/Frameworks/agent/index.md @@ -1,5 +1,5 @@ --- -title: Semantic Kernel Agent Framework (Experimental) +title: Semantic Kernel Agent Framework description: Introducing the Semantic Kernel Agent Framework zone_pivot_groups: programming-languages author: crickman @@ -10,10 +10,10 @@ ms.service: semantic-kernel --- # Semantic Kernel Agent Framework -> [!WARNING] -> The _Semantic Kernel Agent Framework_ is in preview and is subject to change. +> [!IMPORTANT] +> Single-agent features, such as ChatCompletionAgent and OpenAIAssistantAgent, are in the release candidate stage. These features are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. However, agent chat patterns are still in the experimental stage. These patterns are under active development and may change significantly before advancing to the preview or release candidate stage. -The _Semantic Kernel Agent Framework_ provides a platform within the Semantic Kernel eco-system that allow for the creation of AI **agents** and the ability to incorporate **agentic patterns** into any application based on the same patterns and features that exist in the core _Semantic Kernel_ framework. +The Semantic Kernel Agent Framework provides a platform within the Semantic Kernel eco-system that allow for the creation of AI **agents** and the ability to incorporate **agentic patterns** into any application based on the same patterns and features that exist in the core Semantic Kernel framework. ## What is an AI agent? @@ -21,7 +21,7 @@ An **AI agent** is a software entity designed to perform tasks autonomously or s Agents can send and receive messages, generating responses using a combination of models, tools, human inputs, or other customizable components. -Agents are designed to work collaboratively, enabling complex workflows by interacting with each other. The _Agent Framework_ allows for the creation of both simple and sophisticated agents, enhancing modularity and ease of maintenance +Agents are designed to work collaboratively, enabling complex workflows by interacting with each other. The `Agent Framework` allows for the creation of both simple and sophisticated agents, enhancing modularity and ease of maintenance ## What problems do AI agents solve? @@ -63,8 +63,8 @@ For .NET SDK, serveral NuGet packages are available. Package|Description --|-- -[Microsoft.SemanticKernel](https://www.nuget.org/packages/Microsoft.SemanticKernel)|This contains the core _Semantic Kernel_ libraries for getting started with the _Agent Framework_. This must be explicitly referenced by your application. -[Microsoft.SemanticKernel.Agents.Abstractions](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Abstractions)|Defines the core agent abstractions for the _Agent Framework_. Generally not required to be specified as it is included in both the `Microsoft.SemanticKernel.Agents.Core` and `Microsoft.SemanticKernel.Agents.OpenAI` packages. +[Microsoft.SemanticKernel](https://www.nuget.org/packages/Microsoft.SemanticKernel)|This contains the core _Semantic Kernel_ libraries for getting started with the `Agent Framework`. This must be explicitly referenced by your application. +[Microsoft.SemanticKernel.Agents.Abstractions](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Abstractions)|Defines the core agent abstractions for the `Agent Framework`. Generally not required to be specified as it is included in both the `Microsoft.SemanticKernel.Agents.Core` and `Microsoft.SemanticKernel.Agents.OpenAI` packages. [Microsoft.SemanticKernel.Agents.Core](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Core)|Includes the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes. [Microsoft.SemanticKernel.Agents.OpenAI](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.OpenAI)|Provides ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent`](./assistant-agent.md). @@ -74,7 +74,7 @@ Package|Description Module|Description --|-- -[semantic-kernel.agents](https://pypi.org/project/semantic-kernel/)|This is the _Semantic Kernel_ library for getting started with the _Agent Framework_. This must be explicitly referenced by your application. This module contains the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes, as well as the ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent` or `AzureOpenAssistant`](./assistant-agent.md). +[semantic-kernel.agents](https://pypi.org/project/semantic-kernel/)|This is the _Semantic Kernel_ library for getting started with the `Agent Framework`. This must be explicitly referenced by your application. This module contains the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes, as well as the ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent` or `AzureOpenAssistant`](./assistant-agent.md). ::: zone-end diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md new file mode 100644 index 00000000..5dbc6bba --- /dev/null +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -0,0 +1,623 @@ +--- +title: Agent Framework Release Candidate Migration Guide +description: Describes the steps for developers to update their Agent Framework code to the latest abstractions. +zone_pivot_groups: programming-languages +author: moonbox3 +ms.topic: conceptual +ms.author: evmattso +ms.date: 02/26/2025 +ms.service: semantic-kernel +--- + +# Migration Guide for Updating from Old Code to New Code + +As we transition some agents from the experimental stage to the release candidate stage, we have updated the APIs to simplify and streamline their use. Refer to the specific scenario guide to learn how to update your existing code to work with the latest available APIs. + +::: zone pivot="programming-language-csharp" + +## OpenAIAssistantAgent C# Migration Guide + +We recently applied a significant shift around the [`OpenAIAssistantAgent`](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs) in the _Semantic Kernel Agent Framework_. + +These changes were applied in: + +- [PR #10583](https://github.com/microsoft/semantic-kernel/pull/10583) +- [PR #10616](https://github.com/microsoft/semantic-kernel/pull/10616) +- [PR #10633](https://github.com/microsoft/semantic-kernel/pull/10633) + +These changes are intended to: + +- Align with the pattern for using for our [`AzureAIAgent`](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Agents/AzureAI/AzureAIAgent.cs). +- Fix bugs around static initialization pattern. +- Avoid limiting features based on our abstraction of the underlying SDK. + +This guide provides step-by-step instructions for migrating your C# code from the old implementation to the new one. Changes include updates for creating assistants, managing the assistant lifecycle, handling threads, files, and vector stores. + +## 1. Client Instantiation + +Previously, `OpenAIClientProvider` was required for creating any `OpenAIAssistantAgent`. This dependency has been simplified. + +#### **New Way** +```csharp +OpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri(endpointUrl)); +AssistantClient assistantClient = client.GetAssistantClient(); +``` + +#### **Old Way (Deprecated)** +```csharp +var clientProvider = new OpenAIClientProvider(...); +``` + +## 2. Assistant Lifecycle + +### **Creating an Assistant** +You may now directly instantiate an `OpenAIAssistantAgent` using an existing or new Assistant definition from `AssistantClient`. + +##### **New Way** +```csharp +Assistant definition = await assistantClient.GetAssistantAsync(assistantId); +OpenAIAssistantAgent agent = new(definition, client); +``` + +Plugins can be directly included during initialization: +```csharp +KernelPlugin plugin = KernelPluginFactory.CreateFromType(); +Assistant definition = await assistantClient.GetAssistantAsync(assistantId); +OpenAIAssistantAgent agent = new(definition, client, [plugin]); +``` + +Creating a new assistant definition using an extension method: +```csharp +Assistant assistant = await assistantClient.CreateAssistantAsync( + model, + name, + instructions: instructions, + enableCodeInterpreter: true); +``` + +##### **Old Way (Deprecated)** +Previously, assistant definitions were managed indirectly. + +## 3. Invoking the Agent + +You may specify `RunCreationOptions` directly, enabling full access to underlying SDK capabilities. + +#### **New Way** +```csharp +RunCreationOptions options = new(); // configure as needed +var result = await agent.InvokeAsync(options); +``` + +#### **Old Way (Deprecated)** +```csharp +var options = new OpenAIAssistantInvocationOptions(); +``` + +## 4. Assistant Deletion + +You can directly manage assistant deletion with `AssistantClient`. + +```csharp +await assistantClient.DeleteAssistantAsync(agent.Id); +``` + +## 5. Thread Lifecycle + +### **Creating a Thread** +Threads are now created directly using `AssistantClient`. + +##### **New Way** +```csharp +AssistantThread thread = await assistantClient.CreateThreadAsync(); +``` + +Using a convenience extension method: +```csharp +string threadId = await assistantClient.CreateThreadAsync(messages: [new ChatMessageContent(AuthorRole.User, "")]); +``` + +##### **Old Way (Deprecated)** +Previously, thread management was indirect or agent-bound. + +### **Thread Deletion** +```csharp +await assistantClient.DeleteThreadAsync(thread.Id); +``` + +## 6. File Lifecycle + +File creation and deletion now utilize `OpenAIFileClient`. + +### **File Upload** +```csharp +string fileId = await client.UploadAssistantFileAsync(stream, ""); +``` + +### **File Deletion** +```csharp +await client.DeleteFileAsync(fileId); +``` + +## 7. Vector Store Lifecycle + +Vector stores are managed directly via `VectorStoreClient` with convenient extension methods. + +### **Vector Store Creation** +```csharp +string vectorStoreId = await client.CreateVectorStoreAsync([fileId1, fileId2], waitUntilCompleted: true); +``` + +### **Vector Store Deletion** +```csharp +await client.DeleteVectorStoreAsync(vectorStoreId); +``` + +## Backwards Compatibility + +Deprecated patterns are marked with `[Obsolete]`. To suppress obsolete warnings (`CS0618`), update your project file as follows: + +```xml + + $(NoWarn);CS0618 + +``` + +This migration guide helps you transition smoothly to the new implementation, simplifying client initialization, resource management, and integration with the **Semantic Kernel .NET SDK**. + +::: zone-end +::: zone pivot="programming-language-python" + +For developers upgrading to Semantic Kernel Python 1.22.0 or later, the ChatCompletionAgent and OpenAI Assistant abstractions have been updated. + +These changes were applied in: + +- [PR #10666](https://github.com/microsoft/semantic-kernel/pull/10666) +- [PR #10667](https://github.com/microsoft/semantic-kernel/pull/10667) +- [PR #10701](https://github.com/microsoft/semantic-kernel/pull/10701) +- [PR #10707](https://github.com/microsoft/semantic-kernel/pull/10707) + +This guide provides step-by-step instructions for migrating your Python code from the old implementation to the new implementation. + +## `ChatCompletionAgent` + +The `ChatCompletionAgent` has been updated to simplify service configuration, plugin handling, and function calling behaviors. Below are the key changes you should consider when migrating. + +### 1. Specifying the Service + +You can now specify the service directly as part of the agent constructor: + +#### New Way + +```python +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="", + instructions="", +) +``` + +Note: If both a kernel and a service are provided, the service will take precedence if it shares the same service_id or ai_model_id. Otherwise, if they are separate, the first AI service registered on the kernel will be used. + +#### Old Way (Still Valid) + +Previously, you would first add a service to a kernel and then pass the kernel to the agent: + +```python +kernel = Kernel() +kernel.add_service(AzureChatCompletion()) + +agent = ChatCompletionAgent( + kernel=kernel, + name="", + instructions="", +) +``` + +### 2. Adding Plugins + +Plugins can now be supplied directly through the constructor: + +#### New Way + +```python +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="", + instructions="", + plugins=[SamplePlugin()], +) +``` + +#### Old Way (Still Valid) + +Plugins previously had to be added to the kernel separately: + +```python +kernel = Kernel() +kernel.add_plugin(SamplePlugin()) + +agent = ChatCompletionAgent( + kernel=kernel, + name="", + instructions="", +) +``` + +Note: Both approaches are valid, but directly specifying plugins simplifies initialization. + +### 3. Invoking the Agent + +You now have two ways to invoke the agent. The new method directly retrieves a single response, while the old method supports streaming. + +#### New Way (Single Response) + +```python +chat_history = ChatHistory() +chat_history.add_user_message("") +response = await agent.get_response(chat_history) +# response is of type ChatMessageContent +``` + +#### Old Way (Still Valid) + +```python +chat_history = ChatHistory() +chat_history.add_user_message("") +async for response in agent.invoke(chat_history): + # handle response +``` + +### 4. Controlling Function Calling + +Function calling behavior can now be controlled directly when specifying the service within the agent constructor: + +```python +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="", + instructions="", + plugins=[MenuPlugin()], + function_choice_behavior=FunctionChoiceBehavior.Auto( + filters={"included_functions": ["get_specials", "get_item_price"]} + ), +) +``` + +Note: Previously, function calling configuration required separate setup on the kernel or service object. If execution settings specify the same `service_id` or `ai_model_id` as the AI service configuration, the function calling behavior defined in the execution settings (via `KernelArguments`) will take precedence over the function choice behavior set in the constructor. + +These updates enhance simplicity and configurability, making the ChatCompletionAgent easier to integrate and maintain. + +## `OpenAIAssistantAgent` + +The `AzureAssistantAgent` and `OpenAIAssistantAgent` changes include updates for creating assistants, creating threads, handling plugins, using the code interpreter tool, working with the file search tool, and adding chat messages to a thread. + +## Setting up Resources + +### Old Way + +The `AsyncAzureOpenAI` client was created as part of creating the Agent object. + +```python +agent = await AzureAssistantAgent.create( + deployment_name="optional-deployment-name", + api_key="optional-api-key", + endpoint="optional-endpoint", + ad_token="optional-ad-token", + ad_token_provider=optional_callable, + default_headers={"optional_header": "optional-header-value"}, + env_file_path="optional-env-file-path", + env_file_encoding="optional-env-file-encoding", + ..., +) +``` + +### New Way + +The agent provides a static method to create the required client for the specified resources, where method-level keyword arguments take precedence over environment variables and values in an existing `.env` file. + +```python +client, model = AzureAssistantAgent.setup_resources( + ad_token="optional-ad-token", + ad_token_provider=optional_callable, + api_key="optional-api-key", + api_version="optional-api-version", + base_url="optional-base-url", + default_headers="optional-default-headers", + deployment_name="optional-deployment-name", + endpoint="optional-endpoint", + env_file_path="optional-env-file-path", + env_file_encoding="optional-env-file-encoding", + token_scope="optional-token-scope", +) +``` + +## 1. Creating an Assistant + +### Old Way +```python +agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, +) +``` +or +```python +agent = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=, + instructions=, + enable_code_interpreter=True, +) +``` + +### New Way +```python +# Azure AssistantAgent + +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the agent using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` +or +```python +# OpenAI Assistant Agent + +# Create the client using OpenAI resources and configuration +client, model = OpenAIAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the agent using the client and the assistant definition +agent = OpenAIAssistantAgent( + client=client, + definition=definition, +) +``` + +## 2. Creating a Thread + +### Old Way +```python +thread_id = await agent.create_thread() +``` + +### New Way +```python +thread = await agent.client.beta.threads.create() +# Use thread.id for the thread_id string +``` + +## 3. Handling Plugins + +### Old Way +```python +# Create the instance of the Kernel +kernel = Kernel() + +# Add the sample plugin to the kernel +kernel.add_plugin(plugin=MenuPlugin(), plugin_name="menu") + +agent = await AzureAssistantAgent.create( + kernel=kernel, + name="", + instructions="" +) +``` +*Note: It is still possible to manage plugins via the kernel. If you do not supply a kernel, a kernel is automatically created at agent creation time and the plugins will be added to that instance.* + +### New Way +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the agent with plugins passed in as a list +agent = AzureAssistantAgent( + client=client, + definition=definition, + plugins=[MenuPlugin()], +) +``` + +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/getting_started_with_agents/openai_assistant/step2_plugins.py) for full details. + +## 4. Using the Code Interpreter Tool + +### Old Way +```python +csv_file_path = ... + +agent = await AzureAssistantAgent.create( + kernel=kernel, + name="", + instructions="", + enable_code_interpreter=True, + code_interpreter_filenames=[csv_file_path], +) +``` + +### New Way +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +csv_file_path = ... + +# Load the CSV file as a FileObject +with open(csv_file_path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + +# Get the code interpreter tool and resources +code_interpreter_tool, code_interpreter_tool_resource = AzureAssistantAgent.configure_code_interpreter_tool(file.id) + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + name="", + instructions=".", + tools=code_interpreter_tool, + tool_resources=code_interpreter_tool_resource, +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` + +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/agents/openai_assistant/openai_assistant_file_manipulation.py) for full details. + +## 5. Working with the File Search Tool + +### Old Way +```python +pdf_file_path = ... + +agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + vector_store_filenames=[pdf_file_path], +) +``` + +### New Way + +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +pdf_file_path = ... + +# Load the employees PDF file as a FileObject +with open(pdf_file_path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + +# Create a vector store specifying the file ID to be used for file search +vector_store = await client.beta.vector_stores.create( + name="step4_assistant_file_search", + file_ids=[file.id], +) + +file_search_tool, file_search_tool_resources = AzureAssistantAgent.configure_file_search_tool(vector_store.id) + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="Find answers to the user's questions in the provided file.", + name="FileSearch", + tools=file_search_tool, + tool_resources=file_search_tool_resources, +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` + +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/getting_started_with_agents/openai_assistant/step4_assistant_tool_file_search.py) for full details. + +## 6. Adding Chat Messages to a Thread + +### Old Way +```python +await agent.add_chat_message( + thread_id=thread_id, + message=ChatMessageContent(role=AuthorRole.USER, content=user_input) +) +``` + +### New Way +*Note: The old method still works if you pass in a `ChatMessageContent`, but you can now also pass a simple string.* +```python +await agent.add_chat_message( + thread_id=thread_id, + message=user_input, +) +``` + +## 7. Cleaning Up Resources + +### Old Way +```python +await agent.delete_file(file_id) +await agent.delete_thread(thread_id) +await agent.delete() +``` + +### New Way +```python +await client.files.delete(file_id) +await client.beta.threads.delete(thread.id) +await client.beta.assistants.delete(agent.id) +``` + +## Handling Structured Outputs + +### Old Way +*Unavailable in the old way* + +### New Way +```python +# Define a Pydantic model that represents the structured output from the OpenAI service +class ResponseModel(BaseModel): + response: str + items: list[str] + +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + name="", + instructions="", + response_format=AzureAssistantAgent.configure_response_format(ResponseModel), +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/agents/openai_assistant/openai_assistant_structured_outputs.py) for full details. + +This migration guide should help you update your code to the new implementation, leveraging client-based configuration and enhanced features. + +::: zone-end +::: zone pivot="programming-language-java" +> Agents are unavailable in Java. +::: zone-end diff --git a/semantic-kernel/support/migration/toc.yml b/semantic-kernel/support/migration/toc.yml index 91c21c5f..1718b55c 100644 --- a/semantic-kernel/support/migration/toc.yml +++ b/semantic-kernel/support/migration/toc.yml @@ -9,4 +9,6 @@ - name: Memory Store to Vector Store Migration href: memory-store-migration.md - name: Kernel Events and Filters Migration - href: kernel-events-and-filters-migration.md \ No newline at end of file + href: kernel-events-and-filters-migration.md +- name: Agent Framework Release Candidate Migration Guide + href: agent-framework-rc-migration-guide.md \ No newline at end of file From 24962d0ecddb051e2e1cc30e57e780ca3e018ac5 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Fri, 28 Feb 2025 11:15:09 +0900 Subject: [PATCH 041/117] Update Agent Framework related doc and code samples. Add migration code for Python (#469) (#473) * Update OpenAI assistant related code samples. Add migration code for Python * improve migration guide * Update semantic-kernel/support/migration/openai-assistant-agent-migration-guide.md * Update semantic-kernel/support/migration/openai-assistant-agent-migration-guide.md * Replace italics with code format. * update bookmarks * Update Python docs * Add dotnet migration guide. * update formatting in migration guide * fix headers * Fix header again * update guide to include rc * Small update to include new method get_response * Update important tags with some experimental (group chat) and some release candidate --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../Frameworks/agent/agent-architecture.md | 34 +- .../Frameworks/agent/agent-chat.md | 58 +- .../Frameworks/agent/agent-functions.md | 89 ++- .../Frameworks/agent/agent-streaming.md | 24 +- .../Frameworks/agent/agent-templates.md | 18 +- .../Frameworks/agent/assistant-agent.md | 95 ++- .../Frameworks/agent/chat-completion-agent.md | 79 ++- .../examples/example-agent-collaboration.md | 118 ++-- .../agent/examples/example-assistant-code.md | 181 +++-- .../examples/example-assistant-search.md | 152 +++-- .../agent/examples/example-chat-agent.md | 38 +- semantic-kernel/Frameworks/agent/index.md | 16 +- .../agent-framework-rc-migration-guide.md | 623 ++++++++++++++++++ semantic-kernel/support/migration/toc.yml | 4 +- 14 files changed, 1180 insertions(+), 349 deletions(-) create mode 100644 semantic-kernel/support/migration/agent-framework-rc-migration-guide.md diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index c3966fa8..b22c48ee 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -1,5 +1,5 @@ --- -title: Semantic Kernel Agent Architecture (Experimental) +title: Semantic Kernel Agent Architecture description: An overview of the architecture of the Semantic Kernel Agent Framework and how it aligns with core Semantic Kernel features. zone_pivot_groups: programming-languages author: crickman @@ -10,15 +10,15 @@ ms.service: semantic-kernel --- # An Overview of the Agent Architecture -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> Single-agent features, such as ChatCompletionAgent and OpenAIAssistantAgent, are in the release candidate stage. These features are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. However, agent chat patterns are still in the experimental stage. These patterns are under active development and may change significantly before advancing to the preview or release candidate stage. This article covers key concepts in the architecture of the Agent Framework, including foundational principles, design objectives, and strategic goals. ## Goals -The _Agent Framework_ was developed with the following key priorities in mind: +The `Agent Framework` was developed with the following key priorities in mind: - The _Semantic Kernel_ framework serves as the core foundation for implementing agent functionalities. - Multiple agents can collaborate within a single conversation, while integrating human input. @@ -28,7 +28,7 @@ The _Agent Framework_ was developed with the following key priorities in mind: ## Agent -The abstract _Agent_ class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [_Kernel_](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [_Chat Completion Agent_](./chat-completion-agent.md) and the [_OpenAI Assistant Agent_](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. +The abstract `Agent` class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [`Kernel`](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [`ChatCompletionAgent`](./chat-completion-agent.md) and the [`OpenAIAssistantAgent`](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. ::: zone pivot="programming-language-csharp" @@ -49,7 +49,7 @@ The abstract _Agent_ class serves as the core abstraction for all types of agent ::: zone-end -Agents can either be invoked directly to perform tasks or orchestrated within an [_Agent Chat_](./agent-chat.md), where multiple agents may collaborate or interact dynamically with user inputs. This flexible structure allows agents to adapt to various conversational or task-driven scenarios, providing developers with robust tools for building intelligent, multi-agent systems. +Agents can either be invoked directly to perform tasks or orchestrated within an [`AgentChat`](./agent-chat.md), where multiple agents may collaborate or interact dynamically with user inputs. This flexible structure allows agents to adapt to various conversational or task-driven scenarios, providing developers with robust tools for building intelligent, multi-agent systems. #### Deep Dive: @@ -64,7 +64,7 @@ Agents can either be invoked directly to perform tasks or orchestrated within an ## Agent Chat -The [_Agent Chat_](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [_Agent Group Chat_](./agent-chat.md#creating-an-agent-group-chat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. +The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. This structure facilitates more complex, multi-agent scenarios where different agents can work together, share information, and dynamically respond to evolving conversations, making it an ideal solution for advanced use cases such as customer support, multi-faceted task management, or collaborative problem-solving environments. @@ -74,7 +74,7 @@ This structure facilitates more complex, multi-agent scenarios where different a ## Agent Channel -The _Agent Channel_ class enables agents of various types to participate in an [_Agent Chat_](./agent-chat.md). This functionality is completely hidden from users of the _Agent Framework_ and only needs to be considered by developers creating a custom [_Agent_](#agent). +The _Agent Channel_ class enables agents of various types to participate in an [`AgentChat`](./agent-chat.md). This functionality is completely hidden from users of the `Agent Framework` and only needs to be considered by developers creating a custom [`Agent`](#agent). ::: zone pivot="programming-language-csharp" @@ -96,14 +96,14 @@ The _Agent Channel_ class enables agents of various types to participate in an [ ## Agent Alignment with _Semantic Kernel_ Features -The _Agent Framework_ is built on the foundational concepts and features that many developers have come to know within the _Semantic Kernel_ ecosystem. These core principles serve as the building blocks for the Agent Framework’s design. By leveraging the familiar structure and capabilities of the _Semantic Kernel_, the Agent Framework extends its functionality to enable more advanced, autonomous agent behaviors, while maintaining consistency with the broader _Semantic Kernel_ architecture. This ensures a smooth transition for developers, allowing them to apply their existing knowledge to create intelligent, adaptable agents within the framework. +The `Agent Framework` is built on the foundational concepts and features that many developers have come to know within the _Semantic Kernel_ ecosystem. These core principles serve as the building blocks for the Agent Framework’s design. By leveraging the familiar structure and capabilities of the _Semantic Kernel_, the Agent Framework extends its functionality to enable more advanced, autonomous agent behaviors, while maintaining consistency with the broader _Semantic Kernel_ architecture. This ensures a smooth transition for developers, allowing them to apply their existing knowledge to create intelligent, adaptable agents within the framework. -### The _Kernel_ +### The `Kernel` -At the heart of the _Semantic Kernel_ ecosystem is the [_Kernel_](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The _Kernel_ acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. +At the heart of the Semantic Kernel ecosystem is the [`Kernel`](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The `Kernel` acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. -The [_Chat Completion Agent_](./chat-completion-agent.md) and [_OpenAI Assistant Agent_](./assistant-agent.md) articles provide specific details on how to create each type of agent. +The [`ChatCompletionAgent`](./chat-completion-agent.md) and [`OpenAIAssistantAgent`](./assistant-agent.md) articles provide specific details on how to create each type of agent. These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. #### Related API's: @@ -136,7 +136,7 @@ Plugins are a fundamental aspect of the _Semantic Kernel_, enabling developers t #### Example: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) #### Related API's: @@ -169,7 +169,7 @@ Plugins are a fundamental aspect of the _Semantic Kernel_, enabling developers t Agent messaging, including both input and response, is built upon the core content types of the _Semantic Kernel_, providing a unified structure for communication. This design choice simplifies the process of transitioning from traditional chat-completion patterns to more advanced agent-driven patterns in your application development. By leveraging familiar _Semantic Kernel_ content types, developers can seamlessly integrate agent capabilities into their applications without needing to overhaul existing systems. This streamlining ensures that as you evolve from basic conversational AI to more autonomous, task-oriented agents, the underlying framework remains consistent, making development faster and more efficient. -> Note: The [_OpenAI Assistant Agent_`_](./assistant-agent.md) introduced content types specific to its usage for _File References_ and _Content Annotation_: +> Note: The [`OpenAIAssistantAgent`](./assistant-agent.md) introduced content types specific to its usage for _File References_ and _Content Annotation_: #### Related API's: @@ -205,13 +205,13 @@ Agent messaging, including both input and response, is built upon the core conte ### [Templating](./agent-templates.md) -An agent's role is primarily shaped by the instructions it receives, which dictate its behavior and actions. Similar to invoking a _Kernel_ [prompt](../../concepts/prompts/index.md), an agent's instructions can include templated parameters—both values and functions—that are dynamically substituted during execution. This enables flexible, context-aware responses, allowing the agent to adjust its output based on real-time input. +An agent's role is primarily shaped by the instructions it receives, which dictate its behavior and actions. Similar to invoking a `Kernel` [prompt](../../concepts/prompts/index.md), an agent's instructions can include templated parameters—both values and functions—that are dynamically substituted during execution. This enables flexible, context-aware responses, allowing the agent to adjust its output based on real-time input. Additionally, an agent can be configured directly using a _Prompt Template Configuration_, providing developers with a structured and reusable way to define its behavior. This approach offers a powerful tool for standardizing and customizing agent instructions, ensuring consistency across various use cases while still maintaining dynamic adaptability. #### Example: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) #### Related API's: @@ -245,7 +245,7 @@ Additionally, an agent can be configured directly using a _Prompt Template Confi ### [Chat Completion](./chat-completion-agent.md) -The [_Chat Completion Agent_](./chat-completion-agent.md) is designed around any _Semantic Kernel_ [AI service](../../concepts/ai-services/chat-completion/index.md), offering a flexible and convenient persona encapsulation that can be seamlessly integrated into a wide range of applications. This agent allows developers to easily bring conversational AI capabilities into their systems without having to deal with complex implementation details. It mirrors the features and patterns found in the underlying [AI service](../../concepts/ai-services/chat-completion/index.md), ensuring that all functionalities—such as natural language processing, dialogue management, and contextual understanding—are fully supported within the [_Chat Completion Agent_](./chat-completion-agent.md), making it a powerful tool for building conversational interfaces. +The [`ChatCompletionAgent`](./chat-completion-agent.md) is designed around any _Semantic Kernel_ [AI service](../../concepts/ai-services/chat-completion/index.md), offering a flexible and convenient persona encapsulation that can be seamlessly integrated into a wide range of applications. This agent allows developers to easily bring conversational AI capabilities into their systems without having to deal with complex implementation details. It mirrors the features and patterns found in the underlying [AI service](../../concepts/ai-services/chat-completion/index.md), ensuring that all functionalities—such as natural language processing, dialogue management, and contextual understanding—are fully supported within the [`ChatCompletionAgent`](./chat-completion-agent.md), making it a powerful tool for building conversational interfaces. #### Related API's: diff --git a/semantic-kernel/Frameworks/agent/agent-chat.md b/semantic-kernel/Frameworks/agent/agent-chat.md index bf59e765..d0eb6c7c 100644 --- a/semantic-kernel/Frameworks/agent/agent-chat.md +++ b/semantic-kernel/Frameworks/agent/agent-chat.md @@ -8,10 +8,10 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring Agent Collaboration in _Agent Chat_ +# Exploring Agent Collaboration in `AgentChat` -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. Detailed API documentation related to this discussion is available at: @@ -36,22 +36,22 @@ Detailed API documentation related to this discussion is available at: ::: zone-end -## What is _Agent Chat_? +## What is `AgentChat`? -_Agent Chat_ provides a framework that enables interaction between multiple agents, even if they are of different types. This makes it possible for a [_Chat Completion Agent_](./chat-completion-agent.md) and an [_OpenAI Assistant Agent_](./assistant-agent.md) to work together within the same conversation. _Agent Chat_ also defines entry points for initiating collaboration between agents, whether through multiple responses or a single agent response. +`AgentChat` provides a framework that enables interaction between multiple agents, even if they are of different types. This makes it possible for a [`ChatCompletionAgent`](./chat-completion-agent.md) and an [`OpenAIAssistantAgent`](./assistant-agent.md) to work together within the same conversation. `AgentChat` also defines entry points for initiating collaboration between agents, whether through multiple responses or a single agent response. -As an abstract class, _Agent Chat_ can be subclassed to support custom scenarios. +As an abstract class, `AgentChat` can be subclassed to support custom scenarios. -One such subclass, _Agent Group Chat_, offers a concrete implementation of _Agent Chat_, using a strategy-based approach to manage conversation dynamics. +One such subclass, `AgentGroupChat`, offers a concrete implementation of `AgentChat`, using a strategy-based approach to manage conversation dynamics. -## Creating an _Agent Group Chat_ +## Creating an `AgentGroupChat` -To create an _Agent Group Chat_, you may either specify the participating agents or create an empty chat and subsequently add agent participants. Configuring the _Chat-Settings_ and _Strategies_ is also performed during _Agent Group Chat_ initialization. These settings define how the conversation dynamics will function within the group. +To create an `AgentGroupChat`, you may either specify the participating agents or create an empty chat and subsequently add agent participants. Configuring the _Chat-Settings_ and _Strategies_ is also performed during `AgentGroupChat` initialization. These settings define how the conversation dynamics will function within the group. -> Note: The default _Chat-Settings_ result in a conversation that is limited to a single response. See [_Agent Chat_ Behavior](#defining-agent-group-chat-behavior) for details on configuring _Chat-Settings. +> Note: The default _Chat-Settings_ result in a conversation that is limited to a single response. See [`AgentChat` Behavior](#defining-agentgroupchat-behavior) for details on configuring _Chat-Settings. -#### Creating _Agent Group Chat_ with _Agents_: +#### Creating an `AgentGroupChat` with an `Agent`: ::: zone pivot="programming-language-csharp" ```csharp @@ -81,7 +81,7 @@ chat = AgentGroupChat(agents=[agent1, agent2]) ::: zone-end -#### Adding _Agents_ to a _Agent Group Chat_: +#### Adding an `Agent` to an `AgentGroupChat`: ::: zone pivot="programming-language-csharp" ```csharp @@ -120,13 +120,13 @@ chat.add_agent(agent=agent2) ::: zone-end -## Using _Agent Group Chat_ +## Using `AgentGroupChat` -_Agent Chat_ supports two modes of operation: _Single-Turn_ and _Multi-Turn_. In _single-turn_, a specific agent is designated to provide a response. In _multi-turn_, all agents in the conversation take turns responding until a termination criterion is met. In both modes, agents can collaborate by responding to one another to achieve a defined goal. +`AgentChat` supports two modes of operation: `Single-Turn` and `Multi-Turn`. In `single-turn`, a specific agent is designated to provide a response. In `multi-turn`, all agents in the conversation take turns responding until a termination criterion is met. In both modes, agents can collaborate by responding to one another to achieve a defined goal. ### Providing Input -Adding an input message to an _Agent Chat_ follows the same pattern as whit a _Chat History_ object. +Adding an input message to an `AgentChat` follows the same pattern as whit a `ChatHistory` object. ::: zone pivot="programming-language-csharp" ```csharp @@ -140,7 +140,7 @@ chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, "") ```python chat = AgentGroupChat() -await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content="")) +await chat.add_chat_message(message="") ``` ::: zone-end @@ -154,7 +154,7 @@ await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=" Note: The most recent message is provided first (descending order: newest to oldest). @@ -280,7 +280,7 @@ history = await chat.get_chat_messages() ::: zone-end -Since different agent types or configurations may maintain their own version of the conversation history, agent specific history is also available by specifing an agent. (For example: [_OpenAI Assistant_](./assistant-agent.md) versus [_Chat Completion Agent_](./chat-completion-agent.md).) +Since different agent types or configurations may maintain their own version of the conversation history, agent specific history is also available by specifing an agent. (For example: [`OpenAIAssistant`](./assistant-agent.md) versus [`ChatCompletionAgent`](./chat-completion-agent.md).) ::: zone pivot="programming-language-csharp" ```csharp @@ -319,7 +319,7 @@ history2 = await chat.get_chat_messages(agent=agent2) ::: zone-end -## Defining _Agent Group Chat_ Behavior +## Defining `AgentGroupChat` Behavior Collaboration among agents to solve complex tasks is a core agentic pattern. To use this pattern effectively, a system must be in place that not only determines which agent should respond during each turn but also assesses when the conversation has achieved its intended goal. This requires managing agent selection and establishing clear criteria for conversation termination, ensuring seamless cooperation between agents toward a solution. Both of these aspects are governed by the _Execution Settings_ property. @@ -329,7 +329,7 @@ The following sections, [Agent Selection](#agent-selection) and [Chat Terminatio In multi-turn invocation, agent selection is guided by a _Selection Strategy_. This strategy is defined by a base class that can be extended to implement custom behaviors tailored to specific needs. For convenience, two predefined concrete _Selection Strategies_ are also available, offering ready-to-use approaches for handling agent selection during conversations. -If known, an initial agent may be specified to always take the first turn. A history reducer may also be employed to limit token usage when using a strategy based on a _Kernel Function_. +If known, an initial agent may be specified to always take the first turn. A history reducer may also be employed to limit token usage when using a strategy based on a `KernelFunction`. ::: zone pivot="programming-language-csharp" @@ -414,14 +414,12 @@ REVIEWER_NAME = "Reviewer" WRITER_NAME = "Writer" agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions="", ) agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions="", @@ -472,7 +470,7 @@ chat = AgentGroupChat( In _multi-turn_ invocation, the _Termination Strategy_ dictates when the final turn takes place. This strategy ensures the conversation ends at the appropriate point. -This strategy is defined by a base class that can be extended to implement custom behaviors tailored to specific needs. For convenience, serveral predefined concrete _Selection Strategies_ are also available, offering ready-to-use approaches for defining termination criteria for an _Agent Chat_ conversations. +This strategy is defined by a base class that can be extended to implement custom behaviors tailored to specific needs. For convenience, serveral predefined concrete _Selection Strategies_ are also available, offering ready-to-use approaches for defining termination criteria for an `AgentChat` conversations. ::: zone pivot="programming-language-csharp" @@ -549,14 +547,12 @@ REVIEWER_NAME = "Reviewer" WRITER_NAME = "Writer" agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions="", ) agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions="", @@ -595,7 +591,7 @@ chat = AgentGroupChat( ### Resetting Chat Completion State -Regardless of whether _Agent Group Chat_ is invoked using the single-turn or multi-turn approach, the state of the _Agent Group Chat_ is updated to indicate it is _completed_ once the termination criteria is met. This ensures that the system recognizes when a conversation has fully concluded. To continue using an _Agent Group Chat_ instance after it has reached the _Completed_ state, this state must be reset to allow further interactions. Without resetting, additional interactions or agent responses will not be possible. +Regardless of whether `AgentGroupChat` is invoked using the single-turn or multi-turn approach, the state of the `AgentGroupChat` is updated to indicate it is _completed_ once the termination criteria is met. This ensures that the system recognizes when a conversation has fully concluded. To continue using an `AgentGroupChat` instance after it has reached the _Completed_ state, this state must be reset to allow further interactions. Without resetting, additional interactions or agent responses will not be possible. In the case of a multi-turn invocation that reaches the maximum turn limit, the system will cease agent invocation but will not mark the instance as _completed_. This allows for the possibility of extending the conversation without needing to reset the _Completion_ state. @@ -636,9 +632,9 @@ if chat.is_complete: ### Clear Full Conversation State -When done using an _Agent Chat_ where an [_OpenAI Assistant_](./assistant-agent.md) participated, it may be necessary to delete the remote _thread_ associated with the _assistant_. _Agent Chat_ supports resetting or clearing the entire conversation state, which includes deleting any remote _thread_ definition. This ensures that no residual conversation data remains linked to the assistant once the chat concludes. +When done using an `AgentChat` where an [`OpenAIAssistant`](./assistant-agent.md) participated, it may be necessary to delete the remote _thread_ associated with the _assistant_. `AgentChat` supports resetting or clearing the entire conversation state, which includes deleting any remote _thread_ definition. This ensures that no residual conversation data remains linked to the assistant once the chat concludes. -A full reset does not remove the _agents_ that had joined the _Agent Chat_ and leaves the _Agent Chat_ in a state where it can be reused. This allows for the continuation of interactions with the same agents without needing to reinitialize them, making future conversations more efficient. +A full reset does not remove the _agents_ that had joined the `AgentChat` and leaves the `AgentChat` in a state where it can be reused. This allows for the continuation of interactions with the same agents without needing to reinitialize them, making future conversations more efficient. ::: zone pivot="programming-language-csharp" ```csharp @@ -669,9 +665,9 @@ await chat.reset() ## How-To -For an end-to-end example for using _Agent Group Chat_ for _Agent_ collaboration, see: +For an end-to-end example for using `AgentGroupChat` for `Agent` collaboration, see: -- [How to Coordinate Agent Collaboration using _Agent Group Chat_](./examples/example-agent-collaboration.md) +- [How to Coordinate Agent Collaboration using `AgentGroupChat`](./examples/example-agent-collaboration.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/agent-functions.md b/semantic-kernel/Frameworks/agent/agent-functions.md index 3ada7e4f..879703b6 100644 --- a/semantic-kernel/Frameworks/agent/agent-functions.md +++ b/semantic-kernel/Frameworks/agent/agent-functions.md @@ -1,5 +1,5 @@ --- -title: Configuring Agents with Semantic Kernel Plugins. (Experimental) +title: Configuring Agents with Semantic Kernel Plugins. description: Describes how to use Semantic Kernal plugins and function calling with agents. zone_pivot_groups: programming-languages author: crickman @@ -10,14 +10,14 @@ ms.service: semantic-kernel --- # Configuring Agents with Semantic Kernel Plugins -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Functions and Plugins in Semantic Kernel -Function calling is a powerful tool that allows developers to add custom functionalities and expand the capabilities of AI applications. The _Semantic Kernel_ [Plugin](../../concepts/plugins/index.md) architecture offers a flexible framework to support [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md). For an _Agent_, integrating [Plugins](../../concepts/plugins/index.md) and [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is built on this foundational _Semantic Kernel_ feature. +Function calling is a powerful tool that allows developers to add custom functionalities and expand the capabilities of AI applications. The _Semantic Kernel_ [Plugin](../../concepts/plugins/index.md) architecture offers a flexible framework to support [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md). For an `Agent`, integrating [Plugins](../../concepts/plugins/index.md) and [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is built on this foundational _Semantic Kernel_ feature. -Once configured, an agent will choose when and how to call an available function, as it would in any usage outside of the _Agent Framework_. +Once configured, an agent will choose when and how to call an available function, as it would in any usage outside of the `Agent Framework`. ::: zone pivot="programming-language-csharp" @@ -46,11 +46,11 @@ Once configured, an agent will choose when and how to call an available function ## Adding Plugins to an Agent -Any [Plugin](../../concepts/plugins/index.md) available to an _Agent_ is managed within its respective _Kernel_ instance. This setup enables each _Agent_ to access distinct functionalities based on its specific role. +Any [Plugin](../../concepts/plugins/index.md) available to an `Agent` is managed within its respective `Kernel` instance. This setup enables each `Agent` to access distinct functionalities based on its specific role. -[Plugins](../../concepts/plugins/index.md) can be added to the _Kernel_ either before or after the _Agent_ is created. The process of initializing [Plugins](../../concepts/plugins/index.md) follows the same patterns used for any _Semantic Kernel_ implementation, allowing for consistency and ease of use in managing AI capabilities. +[Plugins](../../concepts/plugins/index.md) can be added to the `Kernel` either before or after the `Agent` is created. The process of initializing [Plugins](../../concepts/plugins/index.md) follows the same patterns used for any _Semantic Kernel_ implementation, allowing for consistency and ease of use in managing AI capabilities. -> Note: For a [_Chat Completion Agent_](./chat-completion-agent.md), the function calling mode must be explicitly enabled. [_OpenAI Assistant_](./assistant-agent.md) agent is always based on automatic function calling. +> Note: For a [`ChatCompletionAgent`](./chat-completion-agent.md), the function calling mode must be explicitly enabled. [`OpenAIAssistant`](./assistant-agent.md) agent is always based on automatic function calling. ::: zone pivot="programming-language-csharp" ```csharp @@ -85,18 +85,46 @@ ChatCompletionAgent CreateSpecificAgent(Kernel kernel, string credentials) ::: zone-end ::: zone pivot="programming-language-python" + +There are two ways to create a `ChatCompletionAgent` with plugins. + +#### Method 1: Specify Plugins via the Constructor + +You can directly pass a list of plugins to the constructor: + ```python +from semantic_kernel.agents import ChatCompletionAgent + +# Create the Chat Completion Agent instance by specifying a list of plugins +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + instructions="", + plugins=[SamplePlugin()] +) +``` + +> [!TIP] +> By default, auto-function calling is enabled. To disable it, set the `function_choice_behavior` argument to `function_choice_behavior=FunctionChoiceBehavior.Auto(auto_invoke=False)` in the constructor. With this setting, plugins are still broadcast to the model, but they are not automatically invoked. If execution settings specify the same `service_id` or `ai_model_id` as the AI service configuration, the function calling behavior defined in the execution settings (via `KernelArguments`) will take precedence over the function choice behavior set in the constructor. + +#### Method 2: Configure the Kernel Manually + +If no kernel is provided via the constructor, one is automatically created during model validation. Any plugins passed in take precedence and are added to the kernel. For more fine-grained control over the kernel's state, follow these steps: + +```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureChatPromptExecutionSettings +from semantic_kernel.functions import KernelFunctionFromPrompt +from semantic_kernel.kernel import Kernel + # Create the instance of the Kernel kernel = Kernel() -# Define the service ID -service_id = "" - # Add the chat completion service to the Kernel -kernel.add_service(AzureChatCompletion(service_id=service_id)) +kernel.add_service(AzureChatCompletion()) -# Get the AI Service settings for the specified service_id -settings = kernel.get_prompt_execution_settings_from_service_id(service_id=service_id) +# Get the AI Service settings +settings = kernel.get_prompt_execution_settings_from_service_id() # Configure the function choice behavior to auto invoke kernel functions settings.function_choice_behavior = FunctionChoiceBehavior.Auto() @@ -106,13 +134,16 @@ kernel.add_plugin(SamplePlugin(), plugin_name="") # Create the agent agent = ChatCompletionAgent( - service_id=service_id, kernel=kernel, name=, instructions=, arguments=KernelArguments(settings=settings), ) ``` + +> [!TIP] +> If a `service_id` is not specified when adding a service to the kernel, it defaults to `default`. When configuring multiple AI services on the kernel, it’s recommended to differentiate them using the `service_id` argument. This allows you to retrieve execution settings for a specific `service_id` and tie those settings to the desired service. + ::: zone-end ::: zone pivot="programming-language-java" @@ -160,27 +191,35 @@ ChatCompletionAgent CreateSpecificAgent(Kernel kernel) ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureChatPromptExecutionSettings +from semantic_kernel.functions import KernelFunctionFromPrompt +from semantic_kernel.kernel import Kernel + # Create the instance of the Kernel kernel = Kernel() -# Define the service ID -service_id = "" - # Add the chat completion service to the Kernel -kernel.add_service(AzureChatCompletion(service_id=service_id)) +kernel.add_service(AzureChatCompletion()) -# Get the AI Service settings for the specified service_id -settings = kernel.get_prompt_execution_settings_from_service_id(service_id=service_id) +# Create the AI Service settings +settings = AzureChatPromptExecutionSettings() # Configure the function choice behavior to auto invoke kernel functions settings.function_choice_behavior = FunctionChoiceBehavior.Auto() # Add the Plugin to the Kernel -kernel.add_plugin(SamplePlugin(), plugin_name="") +kernel.add_function( + plugin_name="", + function=KernelFunctionFromPrompt( + function_name="", + prompt="", + ) +) # Create the agent agent = ChatCompletionAgent( - service_id=service_id, kernel=kernel, name=, instructions=, @@ -198,14 +237,14 @@ agent = ChatCompletionAgent( ## Limitations for Agent Function Calling -When directly invoking a[_Chat Completion Agent_](./chat-completion-agent.md), all _Function Choice Behaviors_ are supported. However, when using an [_OpenAI Assistant_](./assistant-agent.md) or [_Agent Chat_](./agent-chat.md), only _Automatic_ [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is currently available. +When directly invoking a[`ChatCompletionAgent`](./chat-completion-agent.md), all _Function Choice Behaviors_ are supported. However, when using an [`OpenAIAssistant`](./assistant-agent.md) or [`AgentChat`](./agent-chat.md), only _Automatic_ [Function Calling](../../concepts/ai-services/chat-completion/function-calling/index.md) is currently available. ## How-To For an end-to-end example for using function calling, see: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index eac08b71..0c0c340b 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -1,5 +1,5 @@ --- -title: How to Stream Agent Responses. (Experimental) +title: How to Stream Agent Responses. description: Describes how to utilize streamed responses for agents and agent chat. zone_pivot_groups: programming-languages author: crickman @@ -10,8 +10,8 @@ ms.service: semantic-kernel --- # How to Stream Agent Responses -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## What is a Streamed Response? @@ -54,11 +54,11 @@ A streamed response delivers the message content in small, incremental chunks. T ## Streaming Agent Invocation -The _Agent Framework_ supports _streamed_ responses when using [_Agent Chat_](./agent-chat.md) or when directly invoking a [_Chat Completion Agent_](./chat-completion-agent.md) or [_OpenAI Assistant Agent_](./assistant-agent.md). In either mode, the framework delivers responses asynchronously as they are streamed. Alongside the streamed response, a consistent, non-streamed history is maintained to track the conversation. This ensures both real-time interaction and a reliable record of the conversation's flow. +The `Agent Framework` supports _streamed_ responses when using [`AgentChat`](./agent-chat.md) or when directly invoking a [`ChatCompletionAgent`](./chat-completion-agent.md) or [`OpenAIAssistantAgent`](./assistant-agent.md). In either mode, the framework delivers responses asynchronously as they are streamed. Alongside the streamed response, a consistent, non-streamed history is maintained to track the conversation. This ensures both real-time interaction and a reliable record of the conversation's flow. -### Streamed response from _Chat Completion Agent_ +### Streamed response from `ChatCompletionAgent` -When invoking a streamed response from a [_Chat Completion Agent_](./chat-completion-agent.md), the _Chat History_ is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the _Chat History_ reflects fully formed responses for consistency. +When invoking a streamed response from a [`ChatCompletionAgent`](./chat-completion-agent.md), the `ChatHistory` is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the `ChatHistory` reflects fully formed responses for consistency. ::: zone pivot="programming-language-csharp" ```csharp @@ -104,9 +104,9 @@ async for response in agent.invoke_stream(chat) ::: zone-end -### Streamed response from _OpenAI Assistant Agent_ +### Streamed response from `OpenAIAssistantAgent` -When invoking a streamed response from an [_OpenAI Assistant Agent_](./assistant-agent.md), an optional _Chat History_ can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. +When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-agent.md), an optional `ChatHistory` can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. ::: zone pivot="programming-language-csharp" ```csharp @@ -139,7 +139,7 @@ agent = OpenAIAssistantAgent(...) thread_id = await agent.create_thread() # Add user message to the conversation -await agent.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content="")) +await agent.add_chat_message(message="") # Generate the streamed agent response(s) async for response in agent.invoke_stream(thread_id=thread_id): @@ -154,9 +154,9 @@ async for response in agent.invoke_stream(thread_id=thread_id): ::: zone-end -## Streaming with _Agent Chat_ +## Streaming with `AgentChat` -When using [_Agent Chat_](./agent-chat.md), the full conversation history is always preserved and can be accessed directly through the [_Agent Chat_](./agent-chat.md) instance. Therefore, the key difference between streamed and non-streamed invocations lies in the delivery method and the resulting content type. In both cases, users can still access the complete history, but streamed responses provide real-time updates as the conversation progresses. This allows for greater flexibility in handling interactions, depending on the application's needs. +When using [`AgentChat`](./agent-chat.md), the full conversation history is always preserved and can be accessed directly through the [`AgentChat`](./agent-chat.md) instance. Therefore, the key difference between streamed and non-streamed invocations lies in the delivery method and the resulting content type. In both cases, users can still access the complete history, but streamed responses provide real-time updates as the conversation progresses. This allows for greater flexibility in handling interactions, depending on the application's needs. ::: zone pivot="programming-language-csharp" ```csharp @@ -202,6 +202,8 @@ chat = AgentGroupChat( termination_strategy=DefaultTerminationStrategy(maximum_iterations=10), ) +await chat.add_chat_message("") + # Invoke agents last_agent = None async for response in chat.invoke_stream(): diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index e972ea8a..35c851d4 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -1,5 +1,5 @@ --- -title: Create an Agent from a Semantic Kernel Template (Experimental) +title: Create an Agent from a Semantic Kernel Template description: Describes how to use a Semantic Kernel template to define an agent. zone_pivot_groups: programming-languages author: crickman @@ -10,8 +10,8 @@ ms.service: semantic-kernel --- # Create an Agent from a Semantic Kernel Template -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Prompt Templates in Semantic Kernel @@ -95,7 +95,7 @@ agent = ChatCompletionAgent( #### OpenAI Assistant Agent -Templated instructions are especially powerful when working with an [_OpenAI Assistant Agent_](./assistant-agent.md). With this approach, a single assistant definition can be created and reused multiple times, each time with different parameter values tailored to specific tasks or contexts. This enables a more efficient setup, allowing the same assistant framework to handle a wide range of scenarios while maintaining consistency in its core behavior. +Templated instructions are especially powerful when working with an [`OpenAIAssistantAgent`](./assistant-agent.md). With this approach, a single assistant definition can be created and reused multiple times, each time with different parameter values tailored to specific tasks or contexts. This enables a more efficient setup, allowing the same assistant framework to handle a wide range of scenarios while maintaining consistency in its core behavior. ::: zone pivot="programming-language-csharp" ```csharp @@ -208,7 +208,7 @@ agent = ChatCompletionAgent( ### Overriding Template Values for Direct Invocation -When invoking an agent directly, without using [_Agent Chat_](./agent-chat.md), the agent's parameters can be overridden as needed. This allows for greater control and customization of the agent's behavior during specific tasks, enabling you to modify its instructions or settings on the fly to suit particular requirements. +When invoking an agent directly, without using [`AgentChat`](./agent-chat.md), the agent's parameters can be overridden as needed. This allows for greater control and customization of the agent's behavior during specific tasks, enabling you to modify its instructions or settings on the fly to suit particular requirements. ::: zone pivot="programming-language-csharp" ```csharp @@ -263,6 +263,12 @@ chat = ChatHistory() override_arguments = KernelArguments(topic="Cat", length="3") +# Two ways to get a response from the agent + +# Get the response which returns a ChatMessageContent directly +response = await agent.get_response(chat, arguments=override_arguments) + +# or use the invoke method to return an AsyncIterable of ChatMessageContent async for response in agent.invoke(chat, arguments=override_arguments): # process agent response(s)... ``` @@ -280,7 +286,7 @@ async for response in agent.invoke(chat, arguments=override_arguments): For an end-to-end example for creating an agent from a _pmompt-template_, see: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) > [!div class="nextstepaction"] diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index e4d36c81..17ee3199 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -1,5 +1,5 @@ --- -title: Exploring the Semantic Kernel OpenAI Assistant Agent (Experimental) +title: Exploring the Semantic Kernel OpenAI Assistant Agent description: An exploration of the definition, behaviors, and usage patterns for a `OpenAIAssistantAgent` zone_pivot_groups: programming-languages author: crickman @@ -8,10 +8,10 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring the _Semantic Kernel_ _OpenAI Assistant Agent_ +# Exploring the _Semantic Kernel_ `OpenAIAssistantAgent` -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. Detailed API documentation related to this discussion is available at: @@ -24,7 +24,6 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`open_ai_assistant_base`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_base) - [`azure_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.azure_assistant_agent) - [`open_ai_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_agent) @@ -46,9 +45,9 @@ The _OpenAI Assistant API_ is a specialized interface designed for more advanced - [Assistant API in Azure](/azure/ai-services/openai/assistants-quickstart) -## Creating an _OpenAI Assistant Agent_ +## Creating an `OpenAIAssistantAgent` -Creating an _OpenAI Assistant_ requires invoking a remote service, which is handled asynchronously. To manage this, the _OpenAI Assistant Agent_ is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. +Creating an `OpenAIAssistant` requires invoking a remote service, which is handled asynchronously. To manage this, the `OpenAIAssistantAgent` is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. ::: zone pivot="programming-language-csharp" ```csharp @@ -66,20 +65,40 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python -azure_agent = await AzureAssistantAgent.create( - kernel=kernel, - service_id=service_id, - name="", - instructions="" +from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent + +# Set up the client and model using Azure OpenAI Resources +client, model = AzureAssistantAgent.setup_resources() + +# Define the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, ) # or -openai_agent = await OpenAIAssistantAgent.create( - kernel=kernel, - service_id=service_id, - name="", - instructions="" +# Set up the client and model using OpenAI Resources +client, model = OpenAIAssistantAgent.setup_resources() + +# Define the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the OpenAIAssistantAgent instance using the client and the assistant definition +agent = OpenAIAssistantAgent( + client=client, + definition=definition, ) ``` ::: zone-end @@ -91,9 +110,9 @@ openai_agent = await OpenAIAssistantAgent.create( ::: zone-end -## Retrieving an _OpenAI Assistant Agent_ +## Retrieving an `OpenAIAssistantAgent` -Once created, the identifier of the assistant may be access via its identifier. This identifier may be used to create an _OpenAI Assistant Agent_ from an existing assistant definition. +Once created, the identifier of the assistant may be access via its identifier. This identifier may be used to create an `OpenAIAssistantAgent` from an existing assistant definition. ::: zone pivot="programming-language-csharp" @@ -110,11 +129,29 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python -agent = await OpenAIAssistantAgent.retrieve(id=agent_id, kernel=kernel) +# Using Azure OpenAI Resources -# or +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + name="", + instructions="", +) -agent = await AzureAssistantAgent.retrieve(id=agent_id, kernel=kernel) +# Store the assistant ID +assistant_id = definition.id + +# Retrieve the assistant definition from the server based on the assistant ID +new_asst_definition = await client.beta.assistants.retrieve(assistant_id) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=new_asst_definition, +) ``` ::: zone-end @@ -125,9 +162,9 @@ agent = await AzureAssistantAgent.retrieve(id=agent_id, kernel=kernel) ::: zone-end -## Using an _OpenAI Assistant Agent_ +## Using an `OpenAIAssistantAgent` -As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your _OpenAI Assistant Agent_ are tied to this specific thread identifier which must be specified when calling the agent/ +As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your `OpenAIAssistantAgent` are tied to this specific thread identifier which must be specified when calling the agent/ ::: zone pivot="programming-language-csharp" ```csharp @@ -181,7 +218,7 @@ await agent.delete_thread(thread_id) ::: zone-end -## Deleting an _OpenAI Assistant Agent_ +## Deleting an `OpenAIAssistantAgent` Since the assistant's definition is stored remotely, it supports the capability to self-delete. This enables the agent to be removed from the system when it is no longer needed. @@ -217,12 +254,12 @@ is_deleted = agent._is_deleted ## How-To -For an end-to-end example for a _OpenAI Assistant Agent_, see: +For an end-to-end example for a `OpenAIAssistantAgent`, see: -- [How-To: _OpenAI Assistant Agent_ Code Interpreter](./examples/example-assistant-code.md) -- [How-To: _OpenAI Assistant Agent_ File Search](./examples/example-assistant-search.md) +- [How-To: `OpenAIAssistantAgent` Code Interpreter](./examples/example-assistant-code.md) +- [How-To: `OpenAIAssistantAgent` File Search](./examples/example-assistant-search.md) > [!div class="nextstepaction"] -> [Agent Collaboration in _Agent Chat_](./agent-chat.md) +> [Agent Collaboration in `AgentChat`](./agent-chat.md) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index b3e7a40d..d1044239 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -1,5 +1,5 @@ --- -title: Exploring the Semantic Kernel Chat Completion Agent (Experimental) +title: Exploring the Semantic Kernel Chat Completion Agent description: An exploration of the definition, behaviors, and usage patterns for a Chat Completion Agent zone_pivot_groups: programming-languages author: crickman @@ -10,8 +10,8 @@ ms.service: semantic-kernel --- # Exploring the _Semantic Kernel_ Chat Completion Agent -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. Detailed API documentation related to this discussion is available at: @@ -76,7 +76,7 @@ Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantic ## Creating a Chat Completion Agent -A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [_Kernel_](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [_Kernel_](../../concepts/kernel.md) instance. +A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. ::: zone pivot="programming-language-csharp" ```csharp @@ -104,11 +104,10 @@ ChatCompletionAgent agent = kernel = Kernel() # Add the AzureChatCompletion AI Service to the Kernel -kernel.add_service(AzureChatCompletion(service_id="")) +kernel.add_service(AzureChatCompletion()) # Create the agent agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="", instructions="", @@ -125,9 +124,9 @@ agent = ChatCompletionAgent( ## AI Service Selection -No different from using _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) directly, a _chat completion agent_ support the specification of a _service-selector_. A _service-selector_ indentifies which [AI service](../../concepts/ai-services/index.md) to target when the [_Kernel_](../../concepts/kernel.md) contains more than one. +No different from using _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a _service-selector_. A _service-selector_ indentifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. -> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no _service-selector_ is provided, the same _default_ logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the _Agent Framework_ +> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no _service-selector_ is provided, the same _default_ logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` ::: zone pivot="programming-language-csharp" ```csharp @@ -157,18 +156,26 @@ ChatCompletionAgent agent = ::: zone pivot="programming-language-python" ```python +from semantic_kernel.connectors.ai.open_ai import ( + AzureChatCompletion, + AzureChatPromptExecutionSettings, +) + # Define the Kernel kernel = Kernel() # Add the AzureChatCompletion AI Service to the Kernel -kernel.add_service(AzureChatCompletion(service_id="")) +kernel.add_service(AzureChatCompletion(service_id="service1")) +kernel.add_service(AzureChatCompletion(service_id="service2")) + +settings = AzureChatPromptExecutionSettings(service_id="service2") # Create the agent agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="", instructions="", + arguments=KernelArguments(settings=settings) ) ``` ::: zone-end @@ -179,11 +186,12 @@ agent = ChatCompletionAgent( ::: zone-end -## Conversing with _Chat Completion Agent_ - -Conversing with your _Chat Completion Agent_ is based on a _Chat History_ instance, no different from interacting with a _Chat Completion_ [AI service](../../concepts/ai-services/index.md). +## Conversing with `ChatCompletionAgent` ::: zone pivot="programming-language-csharp" + +Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a _Chat Completion_ [AI service](../../concepts/ai-services/index.md). + ```csharp // Define agent ChatCompletionAgent agent = ...; @@ -203,6 +211,11 @@ await foreach (ChatMessageContent response in agent.InvokeAsync(chat)) ::: zone-end ::: zone pivot="programming-language-python" + +There are multiple ways to converse with a `ChatCompletionAgent`. + +The easiest is to call and await `get_response`: + ```python # Define agent agent = ChatCompletionAgent(...) @@ -213,10 +226,44 @@ chat = ChatHistory() # Add the user message chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) +# Generate the agent response +response = await agent.get_response(chat) +# response is a `ChatMessageContent` object +``` +Otherwise, calling the `invoke` method returns an `AsyncIterable` of `ChatMessageContent`. + +```python +# Define agent +agent = ChatCompletionAgent(...) + +# Define the chat history +chat = ChatHistory() + +# Add the user message +chat.add_user_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + # Generate the agent response(s) async for response in agent.invoke(chat): # process agent response(s) ``` + +The `ChatCompletionAgent` also supports streaming in which the `invoke_stream` method returns an `AsyncIterable` of `StreamingChatMessageContent`: + +```python +# Define agent +agent = ChatCompletionAgent(...) + +# Define the chat history +chat = ChatHistory() + +# Add the user message +chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + +# Generate the agent response(s) +async for response in agent.invoke_stream(chat): + # process agent response(s) +``` + ::: zone-end ::: zone pivot="programming-language-java" @@ -228,10 +275,10 @@ async for response in agent.invoke(chat): #### How-To: -For an end-to-end example for a _Chat Completion Agent_, see: +For an end-to-end example for a `ChatCompletionAgent`, see: -- [How-To: _Chat Completion Agent_](./examples/example-chat-agent.md) +- [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) > [!div class="nextstepaction"] -> [Exploring _OpenAI Assistant Agent_](./assistant-agent.md) +> [Exploring `OpenAIAssistantAgent`](./assistant-agent.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index fc11bf27..d557c7fc 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -10,12 +10,12 @@ ms.service: semantic-kernel --- # How-To: Coordinate Agent Collaboration using Agent Group Chat -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. ## Overview -In this sample, we will explore how to use _Agent Group Chat_ to coordinate collboration of two different agents working to review and rewrite user provided content. Each agent is assigned a distinct role: +In this sample, we will explore how to use `AgentGroupChat` to coordinate collboration of two different agents working to review and rewrite user provided content. Each agent is assigned a distinct role: - **Reviewer**: Reviews and provides direction to _Writer_. - **Writer**: Updates user content based on _Reviewer_ input. @@ -61,7 +61,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -81,24 +81,21 @@ Start by installing the Semantic Kernel Python package. pip install semantic-kernel ``` +Next add the required imports. + ```python import asyncio import os -import copy +from semantic_kernel import Kernel from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent -from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( +from semantic_kernel.agents.strategies import ( KernelFunctionSelectionStrategy, -) -from semantic_kernel.agents.strategies.termination.kernel_function_termination_strategy import ( KernelFunctionTerminationStrategy, ) -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_decorator import kernel_function -from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt -from semantic_kernel.kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistoryTruncationReducer +from semantic_kernel.functions import KernelFunctionFromPrompt ``` ::: zone-end @@ -108,13 +105,12 @@ from semantic_kernel.kernel import Kernel ::: zone-end - ## Configuration -This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. - ::: zone pivot="programming-language-csharp" +This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. + ```powershell # OpenAI dotnet user-secrets set "OpenAISettings:ApiKey" "" @@ -173,7 +169,7 @@ public class Settings ::: zone-end ::: zone pivot="programming-language-python" -The quickest way to get started with the proper configuration to run the sample code is to create a `.env` file at the root of your project (where your script is run). +The quickest way to get started with the proper configuration to run the sample code is to create a `.env` file at the root of your project (where your script is run). The sample requires that you have Azure OpenAI or OpenAI resources available. Configure the following settings in your `.env` file for either Azure OpenAI or OpenAI: @@ -197,21 +193,20 @@ Once configured, the respective AI service classes will pick up the required var ::: zone-end - ## Coding The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [_Agent_ Definition](#agent-definition) - Create the two _Chat Completion Agent_ instances (_Reviewer_ and _Writer_). -3. [_Chat_ Definition](#chat-definition) - Create the _Agent Group Chat_ and associated strategies. +2. [`Agent` Definition](#agent-definition) - Create the two `ChatCompletionAgent` instances (_Reviewer_ and _Writer_). +3. [_Chat_ Definition](#chat-definition) - Create the `AgentGroupChat` and associated strategies. 4. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating any _Chat Completion Agent_, the configuration settings, plugins, and _Kernel_ must be initialized. +Prior to creating any `ChatCompletionAgent`, the configuration settings, plugins, and `Kernel` must be initialized. ::: zone pivot="programming-language-csharp" @@ -252,14 +247,12 @@ kernel = Kernel() ::: zone-end ::: zone pivot="programming-language-java" - > Agents are currently unavailable in Java. - ::: zone-end -Let's also create a second _Kernel_ instance via _cloning_ and add a plug-in that will allow the reivew to place updated content on the clip-board. - ::: zone pivot="programming-language-csharp" +Let's also create a second `Kernel` instance via _cloning_ and add a plug-in that will allow the reivew to place updated content on the clip-board. + ```csharp Kernel toolKernel = kernel.Clone(); toolKernel.Plugins.AddFromType(); @@ -313,7 +306,7 @@ private sealed class ClipboardAccess ### Agent Definition ::: zone pivot="programming-language-csharp" -Let's declare the agent names as `const` so they might be referenced in _Agent Group Chat_ strategies: +Let's declare the agent names as `const` so they might be referenced in `AgentGroupChat` strategies: ```csharp const string ReviewerName = "Reviewer"; @@ -374,7 +367,6 @@ ChatCompletionAgent agentReviewer = ::: zone pivot="programming-language-python" ```python agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions=""" @@ -425,7 +417,6 @@ ChatCompletionAgent agentWriter = The _Writer_ agent is similiar. It is given a single-purpose task, follow direction and rewrite the content. ```python agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions=""" @@ -446,9 +437,9 @@ Your sole responsibility is to rewrite content according to review suggestions. ### Chat Definition -Defining the _Agent Group Chat_ requires considering the strategies for selecting the _Agent_ turn and determining when to exit the _Chat_ loop. For both of these considerations, we will define a _Kernel Prompt Function_. +Defining the `AgentGroupChat` requires considering the strategies for selecting the `Agent` turn and determining when to exit the _Chat_ loop. For both of these considerations, we will define a _Kernel Prompt Function_. -The first to reason over _Agent_ selection: +The first to reason over `Agent` selection: ::: zone pivot="programming-language-csharp" @@ -481,8 +472,8 @@ KernelFunction selectionFunction = ::: zone pivot="programming-language-python" ```python selection_function = KernelFunctionFromPrompt( - function_name="selection", - prompt=f""" + function_name="selection", + prompt=f""" Examine the provided RESPONSE and choose the next participant. State only the name of the chosen participant without explanation. Never choose the participant named in the RESPONSE. @@ -499,7 +490,7 @@ Rules: RESPONSE: {{{{$lastmessage}}}} """ - ) +) ``` ::: zone-end @@ -532,11 +523,11 @@ KernelFunction terminationFunction = ::: zone pivot="programming-language-python" ```python - termination_keyword = "yes" +termination_keyword = "yes" - termination_function = KernelFunctionFromPrompt( - function_name="termination", - prompt=f""" +termination_function = KernelFunctionFromPrompt( + function_name="termination", + prompt=f""" Examine the RESPONSE and determine whether the content has been deemed satisfactory. If the content is satisfactory, respond with a single word without explanation: {termination_keyword}. If specific suggestions are being provided, it is not satisfactory. @@ -545,7 +536,7 @@ If no correction is suggested, it is satisfactory. RESPONSE: {{{{$lastmessage}}}} """ - ) +) ``` ::: zone-end @@ -575,7 +566,7 @@ history_reducer = ChatHistoryTruncationReducer(target_count=1) ::: zone-end -Finally we are ready to bring everything together in our _Agent Group Chat_ definition. +Finally we are ready to bring everything together in our `AgentGroupChat` definition. ::: zone pivot="programming-language-csharp" @@ -668,9 +659,9 @@ The `lastmessage` `history_variable_name` corresponds with the `KernelFunctionSe ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent Group Chat_. Start by creating creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `AgentGroupChat`. Start by creating creating an empty loop. -> Note: Unlike the other examples, no external history or _thread_ is managed. _Agent Group Chat_ manages the conversation history internally. +> Note: Unlike the other examples, no external history or _thread_ is managed. `AgentGroupChat` manages the conversation history internally. ::: zone pivot="programming-language-csharp" ```csharp @@ -700,9 +691,9 @@ while not is_complete: Now let's capture user input within the previous loop. In this case: - Empty input will be ignored - The term `EXIT` will signal that the conversation is completed -- The term `RESET` will clear the _Agent Group Chat_ history +- The term `RESET` will clear the `AgentGroupChat` history - Any term starting with `@` will be treated as a file-path whose content will be provided as input -- Valid input will be added to the _Agent Group Chat_ as a _User_ message. +- Valid input will be added to the `AgentGroupChat` as a _User_ message. ```csharp Console.WriteLine(); @@ -753,9 +744,9 @@ chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, input)); Now let's capture user input within the previous loop. In this case: - Empty input will be ignored. - The term `exit` will signal that the conversation is complete. -- The term `reset` will clear the _Agent Group Chat_ history. +- The term `reset` will clear the `AgentGroupChat` history. - Any term starting with `@` will be treated as a file-path whose content will be provided as input. -- Valid input will be added to the _Agent Group Chat_ as a _User_ message. +- Valid input will be added to the `AgentGroupChat` as a _User_ message. The operation logic inside the while loop looks like: @@ -790,7 +781,7 @@ if user_input.startswith("@") and len(user_input) > 1: continue # Add the current user_input to the chat -await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) +await chat.add_chat_message(message=user_input) ``` ::: zone-end @@ -800,7 +791,7 @@ await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=use ::: zone-end -To initate the _Agent_ collaboration in response to user input and display the _Agent_ responses, invoke the _Agent Group Chat_; however, first be sure to reset the _Completion_ state from any prior invocation. +To initate the `Agent` collaboration in response to user input and display the `Agent` responses, invoke the `AgentGroupChat`; however, first be sure to reset the _Completion_ state from any prior invocation. > Note: Service failures are being caught and displayed to avoid crashing the conversation loop. @@ -853,7 +844,6 @@ chat.is_complete = False ::: zone-end - ## Final ::: zone pivot="programming-language-csharp" @@ -1148,24 +1138,20 @@ import os from semantic_kernel import Kernel from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent -from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( +from semantic_kernel.agents.strategies import ( KernelFunctionSelectionStrategy, -) -from semantic_kernel.agents.strategies.termination.kernel_function_termination_strategy import ( KernelFunctionTerminationStrategy, ) -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.history_reducer.chat_history_truncation_reducer import ChatHistoryTruncationReducer -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt - -################################################################### -# The following sample demonstrates how to create a simple, # -# agent group chat that utilizes a Reviewer Chat Completion # -# Agent along with a Writer Chat Completion Agent to # -# complete a user's task. # -################################################################### +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistoryTruncationReducer +from semantic_kernel.functions import KernelFunctionFromPrompt + +""" +The following sample demonstrates how to create a simple, +agent group chat that utilizes a Reviewer Chat Completion +Agent along with a Writer Chat Completion Agent to +complete a user's task. +""" # Define agent names REVIEWER_NAME = "Reviewer" @@ -1185,7 +1171,6 @@ async def main(): # Create ChatCompletionAgents using the same kernel. agent_reviewer = ChatCompletionAgent( - service_id=REVIEWER_NAME, kernel=kernel, name=REVIEWER_NAME, instructions=""" @@ -1202,7 +1187,6 @@ RULES: ) agent_writer = ChatCompletionAgent( - service_id=WRITER_NAME, kernel=kernel, name=WRITER_NAME, instructions=""" @@ -1312,7 +1296,7 @@ RESPONSE: continue # Add the current user_input to the chat - await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) + await chat.add_chat_message(message=user_input) try: async for response in chat.invoke(): diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index f8fab56a..be6fdc4d 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -1,5 +1,5 @@ --- -title: How-To: _OpenAI Assistant Agent_ Code Interpreter (Experimental) +title: How-To: `OpenAIAssistantAgent` Code Interpreter description: A step-by-step walk-through of defining and utilizing the code-interpreter tool of an OpenAI Assistant Agent. zone_pivot_groups: programming-languages author: crickman @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _OpenAI Assistant Agent_ Code Interpreter +# How-To: `OpenAIAssistantAgent` Code Interpreter -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Overview -In this sample, we will explore how to use the _code-interpreter_ tool of an [_OpenAI Assistant Agent_](../assistant-agent.md) to complete data-analysis tasks. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will generate both image and text responses. This will demonstrate the versatility of this tool in performing quantitative analysis. +In this sample, we will explore how to use the _code-interpreter_ tool of an [`OpenAIAssistantAgent`](../assistant-agent.md) to complete data-analysis tasks. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will generate both image and text responses. This will demonstrate the versatility of this tool in performing quantitative analysis. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -56,7 +56,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -79,29 +79,27 @@ Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` da ::: zone-end ::: zone pivot="programming-language-python" + Start by creating a folder that will hold your script (`.py` file) and the sample resources. Include the following imports at the top of your `.py` file: ```python import asyncio import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_file_reference_content import StreamingFileReferenceContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingFileReferenceContent ``` Additionally, copy the `PopulationByAdmin1.csv` and `PopulationByCountry.csv` data files from the [_Semantic Kernel_ `learn_resources/resources` directory](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files to your working directory. ::: zone-end + ::: zone pivot="programming-language-java" > Agents are currently unavailable in Java. ::: zone-end - ## Configuration This sample requires configuration setting in order to connect to remote services. You will need to define settings for either _OpenAI_ or _Azure OpenAI_. @@ -164,6 +162,7 @@ public class Settings } ``` ::: zone-end + ::: zone pivot="programming-language-python" The quickest way to get started with the proper configuration to run the sample code is to create a `.env` file at the root of your project (where your script is run). @@ -198,17 +197,17 @@ Once configured, the respective AI service classes will pick up the required var The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [Agent Definition](#agent-definition) - Create the _OpenAI_Assistant_Agent_ with templatized instructions and plug-in. +2. [Agent Definition](#agent-definition) - Create the _OpenAI_Assistant`Agent` with templatized instructions and plug-in. 3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating an _OpenAI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. - ::: zone pivot="programming-language-csharp" +Prior to creating an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. + Instantiate the `Settings` class referenced in the previous [Configuration](#configuration) section. Use the settings to also create an `OpenAIClientProvider` that will be used for the [Agent Definition](#agent-definition) as well as file-upload. ```csharp @@ -239,11 +238,13 @@ OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByC ::: zone pivot="programming-language-python" +Prior to creating an `AzureAssistantAgent` or an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. + > [!TIP] > You may need to adjust the file paths depending upon where your files are located. ```python -# Let's form the file paths that we will later pass to the assistant +# Let's form the file paths that we will use as part of file upload csv_file_path_1 = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", @@ -256,7 +257,39 @@ csv_file_path_2 = os.path.join( "PopulationByCountry.csv", ) ``` -You may need to modify the path creation code based on the storage location of your CSV files. + +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Upload the files to the client +file_ids: list[str] = [] +for path in [csv_file_path_1, csv_file_path_2]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + +# Get the code interpreter tool and resources +code_interpreter_tools, code_interpreter_tool_resources = AzureAssistantAgent.configure_code_interpreter_tool( + file_ids=file_ids +) + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions=""" + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + name="SampleAssistantAgent", + tools=code_interpreter_tools, + tool_resources=code_interpreter_tool_resources, +) +``` + +We first set up the Azure OpenAI resources to obtain the client and model. Next, we upload the CSV files from the specified paths using the client's Files API. We then configure the `code_interpreter_tool` using the uploaded file IDs, which are linked to the assistant upon creation along with the model, instructions, and name. ::: zone-end @@ -270,7 +303,7 @@ You may need to modify the path creation code based on the storage location of y ::: zone pivot="programming-language-csharp" -We are now ready to instantiate an _OpenAI Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. +We are now ready to instantiate an `OpenAIAssistantAgent`. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. ```csharp Console.WriteLine("Defining agent..."); @@ -296,21 +329,13 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" -We are now ready to instantiate an _Azure Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. +We are now ready to instantiate an `AzureAssistantAgent`. The agent is configured with the client and the assistant definition. ```python -agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", - instructions=""" - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - enable_code_interpreter=True, - code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], +# Create the agent using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, ) ``` ::: zone-end @@ -323,7 +348,7 @@ agent = await AzureAssistantAgent.create( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent_. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. Let's also ensure the resources are removed at the end of execution to minimize unnecessary charges. @@ -369,11 +394,10 @@ try: while not is_complete: # agent interaction logic here finally: - print("Cleaning up resources...") - if agent is not None: - [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] - await agent.delete_thread(thread_id) - await agent.delete() + print("\nCleaning up resources...") + [await client.files.delete(file_id) for file_id in file_ids] + await client.beta.threads.delete(thread.id) + await client.beta.assistants.delete(agent.id) ``` ::: zone-end @@ -426,7 +450,7 @@ await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(rol ::: zone-end -Before invoking the _Agent_ response, let's add some helper methods to download any files that may be produced by the _Agent_. +Before invoking the `Agent` response, let's add some helper methods to download any files that may be produced by the `Agent`. ::: zone pivot="programming-language-csharp" Here we're place file content in the system defined temporary directory and then launching the system defined viewer application. @@ -513,7 +537,7 @@ async def download_response_image(agent, file_ids: list[str]): ::: zone-end -To generate an _Agent_ response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any generated _File References_ for download and review at the end of the response cycle. It's important to note that generated code is identified by the presence of a _Metadata_ key in the response message, distinguishing it from the conversational reply. +To generate an `Agent` response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any generated _File References_ for download and review at the end of the response cycle. It's important to note that generated code is identified by the presence of a _Metadata_ key in the response message, distinguishing it from the conversational reply. ::: zone pivot="programming-language-csharp" ```csharp @@ -743,19 +767,16 @@ import asyncio import logging import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_file_reference_content import StreamingFileReferenceContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingFileReferenceContent logging.basicConfig(level=logging.ERROR) -################################################################### -# The following sample demonstrates how to create a simple, # -# OpenAI assistant agent that utilizes the code interpreter # -# to analyze uploaded files. # -################################################################### +""" +The following sample demonstrates how to create a simple, +OpenAI assistant agent that utilizes the code interpreter +to analyze uploaded files. +""" # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( @@ -802,22 +823,43 @@ async def download_response_image(agent: AzureAssistantAgent, file_ids: list[str async def main(): - agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", + # Create the client using Azure OpenAI resources and configuration + client, model = AzureAssistantAgent.setup_resources() + + # Upload the files to the client + file_ids: list[str] = [] + for path in [csv_file_path_1, csv_file_path_2]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + + # Get the code interpreter tool and resources + code_interpreter_tools, code_interpreter_tool_resources = AzureAssistantAgent.configure_code_interpreter_tool( + file_ids=file_ids + ) + + # Create the assistant definition + definition = await client.beta.assistants.create( + model=model, instructions=""" - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - enable_code_interpreter=True, - code_interpreter_filenames=[csv_file_path_1, csv_file_path_2], + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + name="SampleAssistantAgent", + tools=code_interpreter_tools, + tool_resources=code_interpreter_tool_resources, + ) + + # Create the agent using the client and the assistant definition + agent = AzureAssistantAgent( + client=client, + definition=definition, ) print("Creating thread...") - thread_id = await agent.create_thread() + thread = await client.beta.threads.create() try: is_complete: bool = False @@ -829,14 +871,13 @@ async def main(): if user_input.lower() == "exit": is_complete = True + break - await agent.add_chat_message( - thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) - ) + await agent.add_chat_message(thread_id=thread.id, message=user_input) is_code = False last_role = None - async for response in agent.invoke_stream(thread_id=thread_id): + async for response in agent.invoke_stream(thread_id=thread.id): current_is_code = response.metadata.get("code", False) if current_is_code: @@ -858,16 +899,16 @@ async def main(): ]) if is_code: print("```\n") + print() await download_response_image(agent, file_ids) file_ids.clear() finally: print("\nCleaning up resources...") - if agent is not None: - [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] - await agent.delete_thread(thread_id) - await agent.delete() + [await client.files.delete(file_id) for file_id in file_ids] + await client.beta.threads.delete(thread.id) + await client.beta.assistants.delete(agent.id) if __name__ == "__main__": @@ -885,5 +926,5 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How-To: _OpenAI Assistant Agent_ Code File Search](./example-assistant-search.md) +> [How-To: `OpenAIAssistantAgent` Code File Search](./example-assistant-search.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index e05c069c..1aad1696 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -1,5 +1,5 @@ --- -title: How-To: _OpenAI Assistant Agent_ File Search (Experimental) +title: How-To: `OpenAIAssistantAgent` File Search description: A step-by-step walk-through of defining and utilizing the file-search tool of an OpenAI Assistant Agent. zone_pivot_groups: programming-languages author: crickman @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _OpenAI Assistant Agent_ File Search +# How-To: `OpenAIAssistantAgent` File Search -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. ## Overview -In this sample, we will explore how to use the _file-search_ tool of an [_OpenAI Assistant Agent_](../assistant-agent.md) to complete comprehension tasks. The approach will be step-by-step, ensuring clarity and precision throughout the process. As part of the task, the agent will provide document citations within the response. +In this sample, we will explore how to use the _file-search_ tool of an [`OpenAIAssistantAgent`](../assistant-agent.md) to complete comprehension tasks. The approach will be step-by-step, ensuring clarity and precision throughout the process. As part of the task, the agent will provide document citations within the response. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -54,7 +54,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -86,11 +86,8 @@ Start by creating a folder that will hold your script (`.py` file) and the sampl import asyncio import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_annotation_content import StreamingAnnotationContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingAnnotationContent ``` Additionally, copy the `Grimms-The-King-of-the-Golden-Mountain.txt`, `Grimms-The-Water-of-Life.txt` and `Grimms-The-White-Snake.txt` public domain content from [_Semantic Kernel_ `LearnResources` Project](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/learn_resources/resources). Add these files in your project folder. @@ -201,14 +198,14 @@ Once configured, the respective AI service classes will pick up the required var The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [Agent Definition](#agent-definition) - Create the _Chat_Completion_Agent_ with templatized instructions and plug-in. +2. [Agent Definition](#agent-definition) - Create the _Chat_Completion`Agent` with templatized instructions and plug-in. 3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating an _OpenAI Assistant Agent_, ensure the configuration settings are available and prepare the file resources. +Prior to creating an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. ::: zone pivot="programming-language-csharp" @@ -225,6 +222,16 @@ OpenAIClientProvider clientProvider = ``` ::: zone-end + +::: zone pivot="programming-language-python" +The class method `setup_resources()` on the Assistant Agent handles creating the client and returning it and the model to use based on the desired configuration. Pydantic settings are used to load environment variables first from environment variables or from the `.env` file. One may pass in the `api_key`, `api_version`, `deployment_name` or `endpoint`, which will take precedence over any environment variables configured. + +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() +``` +::: zone-end + ::: zone pivot="programming-language-java" > Agents are currently unavailable in Java. @@ -248,8 +255,21 @@ string storeId = operation.VectorStoreId; ::: zone pivot="programming-language-python" ```python def get_filepath_for_filename(filename: str) -> str: - base_directory = os.path.dirname(os.path.realpath(__file__)) + base_directory = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) return os.path.join(base_directory, filename) + +# Upload the files to the client +file_ids: list[str] = [] +for path in [get_filepath_for_filename(filename) for filename in filenames]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + +# Get the file search tool and resources +file_search_tools, file_search_tool_resources = AzureAssistantAgent.configure_file_search_tool(file_ids=file_ids) ``` ::: zone-end @@ -315,7 +335,7 @@ foreach (string fileName in _fileNames) ### Agent Definition -We are now ready to instantiate an _OpenAI Assistant Agent_. The agent is configured with its target model, _Instructions_, and the _File Search_ tool enabled. Additionally, we explicitly associate the _Vector Store_ with the _File Search_ tool. +We are now ready to instantiate an `OpenAIAssistantAgent`. The agent is configured with its target model, _Instructions_, and the _File Search_ tool enabled. Additionally, we explicitly associate the _Vector Store_ with the _File Search_ tool. ::: zone pivot="programming-language-csharp" @@ -345,18 +365,24 @@ OpenAIAssistantAgent agent = ::: zone pivot="programming-language-python" ```python -agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, instructions=""" The document store contains the text of fictional stories. Always analyze the document store to provide an answer to the user's question. Never rely on your knowledge of stories not included in the document store. Always format response using markdown. """, - enable_file_search=True, - vector_store_filenames=[get_filepath_for_filename(filename) for filename in filenames], + name="SampleAssistantAgent", + tools=file_search_tools, + tool_resources=file_search_tool_resources, +) + +# Create the agent using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, ) ``` ::: zone-end @@ -369,7 +395,7 @@ agent = await AzureAssistantAgent.create( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent_. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. Let's also ensure the resources are removed at the end of execution to minimize unnecessary charges. @@ -458,6 +484,7 @@ if not user_input: if user_input.lower() == "exit": is_complete = True + break await agent.add_chat_message( thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) @@ -471,7 +498,7 @@ await agent.add_chat_message( ::: zone-end -Before invoking the _Agent_ response, let's add a helper method to reformat the unicode annotation brackets to ANSI brackets. +Before invoking the `Agent` response, let's add a helper method to reformat the unicode annotation brackets to ANSI brackets. ::: zone pivot="programming-language-csharp" ```csharp @@ -492,7 +519,7 @@ private static string ReplaceUnicodeBrackets(this string content) => ::: zone-end -To generate an _Agent_ response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any associated _Citation Annotations_ for display at the end of the response cycle. Note each streamed chunk is being reformatted using the previous helper method. +To generate an `Agent` response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any associated _Citation Annotations_ for display at the end of the response cycle. Note each streamed chunk is being reformatted using the previous helper method. ::: zone pivot="programming-language-csharp" ```csharp @@ -702,18 +729,26 @@ public static class Program ::: zone pivot="programming-language-python" ```python +# Copyright (c) Microsoft. All rights reserved. + import asyncio import os -from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_annotation_content import StreamingAnnotationContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.kernel import Kernel +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.contents import StreamingAnnotationContent + +""" +The following sample demonstrates how to create a simple, +OpenAI assistant agent that utilizes the vector store +to answer questions based on the uploaded documents. +""" def get_filepath_for_filename(filename: str) -> str: - base_directory = os.path.dirname(os.path.realpath(__file__)) + base_directory = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) return os.path.join(base_directory, filename) @@ -725,22 +760,48 @@ filenames = [ async def main(): - agent = await AzureAssistantAgent.create( - kernel=Kernel(), - service_id="agent", - name="SampleAssistantAgent", + # Create the client using Azure OpenAI resources and configuration + client, model = AzureAssistantAgent.setup_resources() + + # Upload the files to the client + file_ids: list[str] = [] + for path in [get_filepath_for_filename(filename) for filename in filenames]: + with open(path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + file_ids.append(file.id) + + vector_store = await client.beta.vector_stores.create( + name="assistant_search", + file_ids=file_ids, + ) + + # Get the file search tool and resources + file_search_tools, file_search_tool_resources = AzureAssistantAgent.configure_file_search_tool( + vector_store_ids=vector_store.id + ) + + # Create the assistant definition + definition = await client.beta.assistants.create( + model=model, instructions=""" The document store contains the text of fictional stories. Always analyze the document store to provide an answer to the user's question. Never rely on your knowledge of stories not included in the document store. Always format response using markdown. """, - enable_file_search=True, - vector_store_filenames=[get_filepath_for_filename(filename) for filename in filenames], + name="SampleAssistantAgent", + tools=file_search_tools, + tool_resources=file_search_tool_resources, + ) + + # Create the agent using the client and the assistant definition + agent = AzureAssistantAgent( + client=client, + definition=definition, ) print("Creating thread...") - thread_id = await agent.create_thread() + thread = await client.beta.threads.create() try: is_complete: bool = False @@ -753,12 +814,10 @@ async def main(): is_complete = True break - await agent.add_chat_message( - thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) - ) + await agent.add_chat_message(thread_id=thread.id, message=user_input) footnotes: list[StreamingAnnotationContent] = [] - async for response in agent.invoke_stream(thread_id=thread_id): + async for response in agent.invoke_stream(thread_id=thread.id): footnotes.extend([item for item in response.items if isinstance(item, StreamingAnnotationContent)]) print(f"{response.content}", end="", flush=True) @@ -773,11 +832,10 @@ async def main(): ) finally: - print("Cleaning up resources...") - if agent is not None: - [await agent.delete_file(file_id) for file_id in agent.file_search_file_ids] - await agent.delete_thread(thread_id) - await agent.delete() + print("\nCleaning up resources...") + [await client.files.delete(file_id) for file_id in file_ids] + await client.beta.threads.delete(thread.id) + await client.beta.assistants.delete(agent.id) if __name__ == "__main__": @@ -795,5 +853,5 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How to Coordinate Agent Collaboration using _Agent Group Chat_](./example-agent-collaboration.md) +> [How to Coordinate Agent Collaboration using `AgentGroupChat`](./example-agent-collaboration.md) diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index ced3bb01..d24a85f8 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -1,5 +1,5 @@ --- -title: How-To: _Chat Completion Agent_ (Experimental) +title: How-To: `ChatCompletionAgent` description: A step-by-step walk-through of defining and utilizing the features of a Chat Completion Agent. zone_pivot_groups: programming-languages author: crickman @@ -8,14 +8,14 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# How-To: _Chat Completion Agent_ +# How-To: `ChatCompletionAgent` -> [!WARNING] -> The *Semantic Kernel Agent Framework* is in preview and is subject to change. +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. ## Overview -In this sample, we will explore configuring a plugin to access _GitHub_ API and provide templatized instructions to a [_Chat Completion Agent_](../chat-completion-agent.md) to answer questions about a _GitHub_ repository. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will provide document citations within the response. +In this sample, we will explore configuring a plugin to access _GitHub_ API and provide templatized instructions to a [`ChatCompletionAgent`](../chat-completion-agent.md) to answer questions about a _GitHub_ repository. The approach will be broken down step-by-step to high-light the key parts of the coding process. As part of the task, the agent will provide document citations within the response. Streaming will be used to deliver the agent's responses. This will provide real-time updates as the task progresses. @@ -55,7 +55,7 @@ The project file (`.csproj`) should contain the following `PackageReference` def ``` -The _Agent Framework_ is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): +The `Agent Framework` is experimental and requires warning suppression. This may addressed in as a property in the project file (`.csproj`): ```xml @@ -76,13 +76,11 @@ import sys from datetime import datetime from semantic_kernel.agents import ChatCompletionAgent -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents import AuthorRole, ChatHistory, ChatMessageContent +from semantic_kernel.functions import KernelArguments from semantic_kernel.kernel import Kernel -from semantic_kernel.functions.kernel_arguments import KernelArguments # Adjust the sys.path so we can use the GitHubPlugin and GitHubSettings classes # This is so we can run the code from the samples/learn_resources/agent_docs directory @@ -201,14 +199,14 @@ Once configured, the respective AI service classes will pick up the required var The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. -2. [_Agent_ Definition](#agent-definition) - Create the _Chat Completion Agent_ with templatized instructions and plug-in. +2. [`Agent` Definition](#agent-definition) - Create the `ChatCompletionAgent` with templatized instructions and plug-in. 3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. ### Setup -Prior to creating a _Chat Completion Agent_, the configuration settings, plugins, and _Kernel_ must be initialized. +Prior to creating a `ChatCompletionAgent`, the configuration settings, plugins, and `Kernel` must be initialized. ::: zone pivot="programming-language-csharp" @@ -293,7 +291,7 @@ settings.function_choice_behavior = FunctionChoiceBehavior.Auto() ### Agent Definition -Finally we are ready to instantiate a _Chat Completion Agent_ with its _Instructions_, associated _Kernel_, and the default _Arguments_ and _Execution Settings_. In this case, we desire to have the any plugin functions automatically executed. +Finally we are ready to instantiate a `ChatCompletionAgent` with its _Instructions_, associated `Kernel`, and the default _Arguments_ and _Execution Settings_. In this case, we desire to have the any plugin functions automatically executed. ::: zone pivot="programming-language-csharp" ```csharp @@ -328,7 +326,6 @@ Console.WriteLine("Ready!"); ::: zone pivot="programming-language-python" ```python agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="SampleAssistantAgent", instructions=f""" @@ -358,7 +355,7 @@ agent = ChatCompletionAgent( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the _Agent_. Start by creating a _Chat History_ object to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating a `ChatHistory` object to maintain the conversation state and creating an empty loop. ::: zone pivot="programming-language-csharp" ```csharp @@ -386,7 +383,7 @@ while not is_complete: ::: zone-end -Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the _Chat History_ as a _User_ message. +Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the `ChatHistory` as a _User_ message. ::: zone pivot="programming-language-csharp" ```csharp @@ -429,9 +426,9 @@ history.add_message(ChatMessageContent(role=AuthorRole.USER, content=user_input) ::: zone-end -To generate a _Agent_ response to user input, invoke the agent using _Arguments_ to provide the final template parameter that specifies the current date and time. +To generate a `Agent` response to user input, invoke the agent using _Arguments_ to provide the final template parameter that specifies the current date and time. -The _Agent_ response is then then displayed to the user. +The `Agent` response is then then displayed to the user. ::: zone pivot="programming-language-csharp" ```csharp @@ -628,7 +625,6 @@ async def main(): # Create the agent agent = ChatCompletionAgent( - service_id="agent", kernel=kernel, name="SampleAssistantAgent", instructions=f""" @@ -682,6 +678,6 @@ You may find the full [code](https://github.com/microsoft/semantic-kernel/blob/m > [!div class="nextstepaction"] -> [How-To: _OpenAI Assistant Agent_ Code Interpreter](./example-assistant-code.md) +> [How-To: `OpenAIAssistantAgent` Code Interpreter](./example-assistant-code.md) diff --git a/semantic-kernel/Frameworks/agent/index.md b/semantic-kernel/Frameworks/agent/index.md index 7d8299ea..67a76125 100644 --- a/semantic-kernel/Frameworks/agent/index.md +++ b/semantic-kernel/Frameworks/agent/index.md @@ -1,5 +1,5 @@ --- -title: Semantic Kernel Agent Framework (Experimental) +title: Semantic Kernel Agent Framework description: Introducing the Semantic Kernel Agent Framework zone_pivot_groups: programming-languages author: crickman @@ -10,10 +10,10 @@ ms.service: semantic-kernel --- # Semantic Kernel Agent Framework -> [!WARNING] -> The _Semantic Kernel Agent Framework_ is in preview and is subject to change. +> [!IMPORTANT] +> Single-agent features, such as ChatCompletionAgent and OpenAIAssistantAgent, are in the release candidate stage. These features are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. However, agent chat patterns are still in the experimental stage. These patterns are under active development and may change significantly before advancing to the preview or release candidate stage. -The _Semantic Kernel Agent Framework_ provides a platform within the Semantic Kernel eco-system that allow for the creation of AI **agents** and the ability to incorporate **agentic patterns** into any application based on the same patterns and features that exist in the core _Semantic Kernel_ framework. +The Semantic Kernel Agent Framework provides a platform within the Semantic Kernel eco-system that allow for the creation of AI **agents** and the ability to incorporate **agentic patterns** into any application based on the same patterns and features that exist in the core Semantic Kernel framework. ## What is an AI agent? @@ -21,7 +21,7 @@ An **AI agent** is a software entity designed to perform tasks autonomously or s Agents can send and receive messages, generating responses using a combination of models, tools, human inputs, or other customizable components. -Agents are designed to work collaboratively, enabling complex workflows by interacting with each other. The _Agent Framework_ allows for the creation of both simple and sophisticated agents, enhancing modularity and ease of maintenance +Agents are designed to work collaboratively, enabling complex workflows by interacting with each other. The `Agent Framework` allows for the creation of both simple and sophisticated agents, enhancing modularity and ease of maintenance ## What problems do AI agents solve? @@ -63,8 +63,8 @@ For .NET SDK, serveral NuGet packages are available. Package|Description --|-- -[Microsoft.SemanticKernel](https://www.nuget.org/packages/Microsoft.SemanticKernel)|This contains the core _Semantic Kernel_ libraries for getting started with the _Agent Framework_. This must be explicitly referenced by your application. -[Microsoft.SemanticKernel.Agents.Abstractions](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Abstractions)|Defines the core agent abstractions for the _Agent Framework_. Generally not required to be specified as it is included in both the `Microsoft.SemanticKernel.Agents.Core` and `Microsoft.SemanticKernel.Agents.OpenAI` packages. +[Microsoft.SemanticKernel](https://www.nuget.org/packages/Microsoft.SemanticKernel)|This contains the core _Semantic Kernel_ libraries for getting started with the `Agent Framework`. This must be explicitly referenced by your application. +[Microsoft.SemanticKernel.Agents.Abstractions](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Abstractions)|Defines the core agent abstractions for the `Agent Framework`. Generally not required to be specified as it is included in both the `Microsoft.SemanticKernel.Agents.Core` and `Microsoft.SemanticKernel.Agents.OpenAI` packages. [Microsoft.SemanticKernel.Agents.Core](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.Core)|Includes the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes. [Microsoft.SemanticKernel.Agents.OpenAI](https://www.nuget.org/packages/Microsoft.SemanticKernel.Agents.OpenAI)|Provides ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent`](./assistant-agent.md). @@ -74,7 +74,7 @@ Package|Description Module|Description --|-- -[semantic-kernel.agents](https://pypi.org/project/semantic-kernel/)|This is the _Semantic Kernel_ library for getting started with the _Agent Framework_. This must be explicitly referenced by your application. This module contains the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes, as well as the ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent` or `AzureOpenAssistant`](./assistant-agent.md). +[semantic-kernel.agents](https://pypi.org/project/semantic-kernel/)|This is the _Semantic Kernel_ library for getting started with the `Agent Framework`. This must be explicitly referenced by your application. This module contains the [`ChatCompletionAgent`](./chat-completion-agent.md) and [`AgentGroupChat`](./agent-chat.md) classes, as well as the ability to use the [OpenAI Assistant API](https://platform.openai.com/docs/assistants) via the [`OpenAIAssistantAgent` or `AzureOpenAssistant`](./assistant-agent.md). ::: zone-end diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md new file mode 100644 index 00000000..5dbc6bba --- /dev/null +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -0,0 +1,623 @@ +--- +title: Agent Framework Release Candidate Migration Guide +description: Describes the steps for developers to update their Agent Framework code to the latest abstractions. +zone_pivot_groups: programming-languages +author: moonbox3 +ms.topic: conceptual +ms.author: evmattso +ms.date: 02/26/2025 +ms.service: semantic-kernel +--- + +# Migration Guide for Updating from Old Code to New Code + +As we transition some agents from the experimental stage to the release candidate stage, we have updated the APIs to simplify and streamline their use. Refer to the specific scenario guide to learn how to update your existing code to work with the latest available APIs. + +::: zone pivot="programming-language-csharp" + +## OpenAIAssistantAgent C# Migration Guide + +We recently applied a significant shift around the [`OpenAIAssistantAgent`](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs) in the _Semantic Kernel Agent Framework_. + +These changes were applied in: + +- [PR #10583](https://github.com/microsoft/semantic-kernel/pull/10583) +- [PR #10616](https://github.com/microsoft/semantic-kernel/pull/10616) +- [PR #10633](https://github.com/microsoft/semantic-kernel/pull/10633) + +These changes are intended to: + +- Align with the pattern for using for our [`AzureAIAgent`](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Agents/AzureAI/AzureAIAgent.cs). +- Fix bugs around static initialization pattern. +- Avoid limiting features based on our abstraction of the underlying SDK. + +This guide provides step-by-step instructions for migrating your C# code from the old implementation to the new one. Changes include updates for creating assistants, managing the assistant lifecycle, handling threads, files, and vector stores. + +## 1. Client Instantiation + +Previously, `OpenAIClientProvider` was required for creating any `OpenAIAssistantAgent`. This dependency has been simplified. + +#### **New Way** +```csharp +OpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri(endpointUrl)); +AssistantClient assistantClient = client.GetAssistantClient(); +``` + +#### **Old Way (Deprecated)** +```csharp +var clientProvider = new OpenAIClientProvider(...); +``` + +## 2. Assistant Lifecycle + +### **Creating an Assistant** +You may now directly instantiate an `OpenAIAssistantAgent` using an existing or new Assistant definition from `AssistantClient`. + +##### **New Way** +```csharp +Assistant definition = await assistantClient.GetAssistantAsync(assistantId); +OpenAIAssistantAgent agent = new(definition, client); +``` + +Plugins can be directly included during initialization: +```csharp +KernelPlugin plugin = KernelPluginFactory.CreateFromType(); +Assistant definition = await assistantClient.GetAssistantAsync(assistantId); +OpenAIAssistantAgent agent = new(definition, client, [plugin]); +``` + +Creating a new assistant definition using an extension method: +```csharp +Assistant assistant = await assistantClient.CreateAssistantAsync( + model, + name, + instructions: instructions, + enableCodeInterpreter: true); +``` + +##### **Old Way (Deprecated)** +Previously, assistant definitions were managed indirectly. + +## 3. Invoking the Agent + +You may specify `RunCreationOptions` directly, enabling full access to underlying SDK capabilities. + +#### **New Way** +```csharp +RunCreationOptions options = new(); // configure as needed +var result = await agent.InvokeAsync(options); +``` + +#### **Old Way (Deprecated)** +```csharp +var options = new OpenAIAssistantInvocationOptions(); +``` + +## 4. Assistant Deletion + +You can directly manage assistant deletion with `AssistantClient`. + +```csharp +await assistantClient.DeleteAssistantAsync(agent.Id); +``` + +## 5. Thread Lifecycle + +### **Creating a Thread** +Threads are now created directly using `AssistantClient`. + +##### **New Way** +```csharp +AssistantThread thread = await assistantClient.CreateThreadAsync(); +``` + +Using a convenience extension method: +```csharp +string threadId = await assistantClient.CreateThreadAsync(messages: [new ChatMessageContent(AuthorRole.User, "")]); +``` + +##### **Old Way (Deprecated)** +Previously, thread management was indirect or agent-bound. + +### **Thread Deletion** +```csharp +await assistantClient.DeleteThreadAsync(thread.Id); +``` + +## 6. File Lifecycle + +File creation and deletion now utilize `OpenAIFileClient`. + +### **File Upload** +```csharp +string fileId = await client.UploadAssistantFileAsync(stream, ""); +``` + +### **File Deletion** +```csharp +await client.DeleteFileAsync(fileId); +``` + +## 7. Vector Store Lifecycle + +Vector stores are managed directly via `VectorStoreClient` with convenient extension methods. + +### **Vector Store Creation** +```csharp +string vectorStoreId = await client.CreateVectorStoreAsync([fileId1, fileId2], waitUntilCompleted: true); +``` + +### **Vector Store Deletion** +```csharp +await client.DeleteVectorStoreAsync(vectorStoreId); +``` + +## Backwards Compatibility + +Deprecated patterns are marked with `[Obsolete]`. To suppress obsolete warnings (`CS0618`), update your project file as follows: + +```xml + + $(NoWarn);CS0618 + +``` + +This migration guide helps you transition smoothly to the new implementation, simplifying client initialization, resource management, and integration with the **Semantic Kernel .NET SDK**. + +::: zone-end +::: zone pivot="programming-language-python" + +For developers upgrading to Semantic Kernel Python 1.22.0 or later, the ChatCompletionAgent and OpenAI Assistant abstractions have been updated. + +These changes were applied in: + +- [PR #10666](https://github.com/microsoft/semantic-kernel/pull/10666) +- [PR #10667](https://github.com/microsoft/semantic-kernel/pull/10667) +- [PR #10701](https://github.com/microsoft/semantic-kernel/pull/10701) +- [PR #10707](https://github.com/microsoft/semantic-kernel/pull/10707) + +This guide provides step-by-step instructions for migrating your Python code from the old implementation to the new implementation. + +## `ChatCompletionAgent` + +The `ChatCompletionAgent` has been updated to simplify service configuration, plugin handling, and function calling behaviors. Below are the key changes you should consider when migrating. + +### 1. Specifying the Service + +You can now specify the service directly as part of the agent constructor: + +#### New Way + +```python +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="", + instructions="", +) +``` + +Note: If both a kernel and a service are provided, the service will take precedence if it shares the same service_id or ai_model_id. Otherwise, if they are separate, the first AI service registered on the kernel will be used. + +#### Old Way (Still Valid) + +Previously, you would first add a service to a kernel and then pass the kernel to the agent: + +```python +kernel = Kernel() +kernel.add_service(AzureChatCompletion()) + +agent = ChatCompletionAgent( + kernel=kernel, + name="", + instructions="", +) +``` + +### 2. Adding Plugins + +Plugins can now be supplied directly through the constructor: + +#### New Way + +```python +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="", + instructions="", + plugins=[SamplePlugin()], +) +``` + +#### Old Way (Still Valid) + +Plugins previously had to be added to the kernel separately: + +```python +kernel = Kernel() +kernel.add_plugin(SamplePlugin()) + +agent = ChatCompletionAgent( + kernel=kernel, + name="", + instructions="", +) +``` + +Note: Both approaches are valid, but directly specifying plugins simplifies initialization. + +### 3. Invoking the Agent + +You now have two ways to invoke the agent. The new method directly retrieves a single response, while the old method supports streaming. + +#### New Way (Single Response) + +```python +chat_history = ChatHistory() +chat_history.add_user_message("") +response = await agent.get_response(chat_history) +# response is of type ChatMessageContent +``` + +#### Old Way (Still Valid) + +```python +chat_history = ChatHistory() +chat_history.add_user_message("") +async for response in agent.invoke(chat_history): + # handle response +``` + +### 4. Controlling Function Calling + +Function calling behavior can now be controlled directly when specifying the service within the agent constructor: + +```python +agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="", + instructions="", + plugins=[MenuPlugin()], + function_choice_behavior=FunctionChoiceBehavior.Auto( + filters={"included_functions": ["get_specials", "get_item_price"]} + ), +) +``` + +Note: Previously, function calling configuration required separate setup on the kernel or service object. If execution settings specify the same `service_id` or `ai_model_id` as the AI service configuration, the function calling behavior defined in the execution settings (via `KernelArguments`) will take precedence over the function choice behavior set in the constructor. + +These updates enhance simplicity and configurability, making the ChatCompletionAgent easier to integrate and maintain. + +## `OpenAIAssistantAgent` + +The `AzureAssistantAgent` and `OpenAIAssistantAgent` changes include updates for creating assistants, creating threads, handling plugins, using the code interpreter tool, working with the file search tool, and adding chat messages to a thread. + +## Setting up Resources + +### Old Way + +The `AsyncAzureOpenAI` client was created as part of creating the Agent object. + +```python +agent = await AzureAssistantAgent.create( + deployment_name="optional-deployment-name", + api_key="optional-api-key", + endpoint="optional-endpoint", + ad_token="optional-ad-token", + ad_token_provider=optional_callable, + default_headers={"optional_header": "optional-header-value"}, + env_file_path="optional-env-file-path", + env_file_encoding="optional-env-file-encoding", + ..., +) +``` + +### New Way + +The agent provides a static method to create the required client for the specified resources, where method-level keyword arguments take precedence over environment variables and values in an existing `.env` file. + +```python +client, model = AzureAssistantAgent.setup_resources( + ad_token="optional-ad-token", + ad_token_provider=optional_callable, + api_key="optional-api-key", + api_version="optional-api-version", + base_url="optional-base-url", + default_headers="optional-default-headers", + deployment_name="optional-deployment-name", + endpoint="optional-endpoint", + env_file_path="optional-env-file-path", + env_file_encoding="optional-env-file-encoding", + token_scope="optional-token-scope", +) +``` + +## 1. Creating an Assistant + +### Old Way +```python +agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, +) +``` +or +```python +agent = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=, + instructions=, + enable_code_interpreter=True, +) +``` + +### New Way +```python +# Azure AssistantAgent + +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the agent using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` +or +```python +# OpenAI Assistant Agent + +# Create the client using OpenAI resources and configuration +client, model = OpenAIAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the agent using the client and the assistant definition +agent = OpenAIAssistantAgent( + client=client, + definition=definition, +) +``` + +## 2. Creating a Thread + +### Old Way +```python +thread_id = await agent.create_thread() +``` + +### New Way +```python +thread = await agent.client.beta.threads.create() +# Use thread.id for the thread_id string +``` + +## 3. Handling Plugins + +### Old Way +```python +# Create the instance of the Kernel +kernel = Kernel() + +# Add the sample plugin to the kernel +kernel.add_plugin(plugin=MenuPlugin(), plugin_name="menu") + +agent = await AzureAssistantAgent.create( + kernel=kernel, + name="", + instructions="" +) +``` +*Note: It is still possible to manage plugins via the kernel. If you do not supply a kernel, a kernel is automatically created at agent creation time and the plugins will be added to that instance.* + +### New Way +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="", + name="", +) + +# Create the agent with plugins passed in as a list +agent = AzureAssistantAgent( + client=client, + definition=definition, + plugins=[MenuPlugin()], +) +``` + +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/getting_started_with_agents/openai_assistant/step2_plugins.py) for full details. + +## 4. Using the Code Interpreter Tool + +### Old Way +```python +csv_file_path = ... + +agent = await AzureAssistantAgent.create( + kernel=kernel, + name="", + instructions="", + enable_code_interpreter=True, + code_interpreter_filenames=[csv_file_path], +) +``` + +### New Way +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +csv_file_path = ... + +# Load the CSV file as a FileObject +with open(csv_file_path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + +# Get the code interpreter tool and resources +code_interpreter_tool, code_interpreter_tool_resource = AzureAssistantAgent.configure_code_interpreter_tool(file.id) + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + name="", + instructions=".", + tools=code_interpreter_tool, + tool_resources=code_interpreter_tool_resource, +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` + +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/agents/openai_assistant/openai_assistant_file_manipulation.py) for full details. + +## 5. Working with the File Search Tool + +### Old Way +```python +pdf_file_path = ... + +agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + vector_store_filenames=[pdf_file_path], +) +``` + +### New Way + +```python +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +pdf_file_path = ... + +# Load the employees PDF file as a FileObject +with open(pdf_file_path, "rb") as file: + file = await client.files.create(file=file, purpose="assistants") + +# Create a vector store specifying the file ID to be used for file search +vector_store = await client.beta.vector_stores.create( + name="step4_assistant_file_search", + file_ids=[file.id], +) + +file_search_tool, file_search_tool_resources = AzureAssistantAgent.configure_file_search_tool(vector_store.id) + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + instructions="Find answers to the user's questions in the provided file.", + name="FileSearch", + tools=file_search_tool, + tool_resources=file_search_tool_resources, +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` + +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/getting_started_with_agents/openai_assistant/step4_assistant_tool_file_search.py) for full details. + +## 6. Adding Chat Messages to a Thread + +### Old Way +```python +await agent.add_chat_message( + thread_id=thread_id, + message=ChatMessageContent(role=AuthorRole.USER, content=user_input) +) +``` + +### New Way +*Note: The old method still works if you pass in a `ChatMessageContent`, but you can now also pass a simple string.* +```python +await agent.add_chat_message( + thread_id=thread_id, + message=user_input, +) +``` + +## 7. Cleaning Up Resources + +### Old Way +```python +await agent.delete_file(file_id) +await agent.delete_thread(thread_id) +await agent.delete() +``` + +### New Way +```python +await client.files.delete(file_id) +await client.beta.threads.delete(thread.id) +await client.beta.assistants.delete(agent.id) +``` + +## Handling Structured Outputs + +### Old Way +*Unavailable in the old way* + +### New Way +```python +# Define a Pydantic model that represents the structured output from the OpenAI service +class ResponseModel(BaseModel): + response: str + items: list[str] + +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Create the assistant definition +definition = await client.beta.assistants.create( + model=model, + name="", + instructions="", + response_format=AzureAssistantAgent.configure_response_format(ResponseModel), +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, +) +``` +Refer to the [sample implementation](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/agents/openai_assistant/openai_assistant_structured_outputs.py) for full details. + +This migration guide should help you update your code to the new implementation, leveraging client-based configuration and enhanced features. + +::: zone-end +::: zone pivot="programming-language-java" +> Agents are unavailable in Java. +::: zone-end diff --git a/semantic-kernel/support/migration/toc.yml b/semantic-kernel/support/migration/toc.yml index 91c21c5f..1718b55c 100644 --- a/semantic-kernel/support/migration/toc.yml +++ b/semantic-kernel/support/migration/toc.yml @@ -9,4 +9,6 @@ - name: Memory Store to Vector Store Migration href: memory-store-migration.md - name: Kernel Events and Filters Migration - href: kernel-events-and-filters-migration.md \ No newline at end of file + href: kernel-events-and-filters-migration.md +- name: Agent Framework Release Candidate Migration Guide + href: agent-framework-rc-migration-guide.md \ No newline at end of file From 8d59c5c3a3a878f90ccab733105ba7669e514ba7 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Fri, 28 Feb 2025 11:55:58 +0900 Subject: [PATCH 042/117] Update title (#474) --- .../support/migration/agent-framework-rc-migration-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md index 5dbc6bba..a7eb9cfd 100644 --- a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -9,7 +9,7 @@ ms.date: 02/26/2025 ms.service: semantic-kernel --- -# Migration Guide for Updating from Old Code to New Code +# Agent Framework Release Candidate Migration Guide As we transition some agents from the experimental stage to the release candidate stage, we have updated the APIs to simplify and streamline their use. Refer to the specific scenario guide to learn how to update your existing code to work with the latest available APIs. From e96632decd0027ede30ecd8a60e4177a6d98093c Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Fri, 28 Feb 2025 12:58:11 +0900 Subject: [PATCH 043/117] Merge main to live: updating Migration Guide title (#477) * Update Agent Framework related doc and code samples. Add migration code for Python (#469) * Update OpenAI assistant related code samples. Add migration code for Python * improve migration guide * Update semantic-kernel/support/migration/openai-assistant-agent-migration-guide.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update semantic-kernel/support/migration/openai-assistant-agent-migration-guide.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Replace italics with code format. * update bookmarks * Update Python docs * Add dotnet migration guide. * update formatting in migration guide * fix headers * Fix header again * update guide to include rc * Small update to include new method get_response * Update important tags with some experimental (group chat) and some release candidate --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update title (#474) --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../support/migration/agent-framework-rc-migration-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md index 5dbc6bba..a7eb9cfd 100644 --- a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -9,7 +9,7 @@ ms.date: 02/26/2025 ms.service: semantic-kernel --- -# Migration Guide for Updating from Old Code to New Code +# Agent Framework Release Candidate Migration Guide As we transition some agents from the experimental stage to the release candidate stage, we have updated the APIs to simplify and streamline their use. Refer to the specific scenario guide to learn how to update your existing code to work with the latest available APIs. From bdb611802db99b49de5dde32f7717c18c34b7ce7 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Fri, 28 Feb 2025 08:57:47 -0800 Subject: [PATCH 044/117] Update semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../out-of-the-box-connectors/azure-ai-search-connector.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md index 9f75aca2..a85ee0d4 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md @@ -41,7 +41,7 @@ The Azure AI Search Vector Store connector can be used to access and manage data | Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • DateTimeOffset
    • *and iterables of each of these types*
    | | Supported vector property types | list[float], list[int], ndarray | | Supported index types |
    • Hnsw
    • Flat
    | -| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
  • Hamming
  • | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    • Hamming
    | | Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | | Supports multiple vectors in a record | Yes | | IsFilterable supported? | Yes | From 3f2fcdbfda0626de92d0e710769424dd1134de42 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Fri, 28 Feb 2025 08:57:59 -0800 Subject: [PATCH 045/117] Update semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../out-of-the-box-connectors/mongodb-connector.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md index 152d579b..8671e1a3 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md @@ -40,7 +40,7 @@ The MongoDB Vector Store connector can be used to access and manage data in Mong | Collection maps to | MongoDB Collection + Index | | Supported key property types | string | | Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and iterables of each of these types*
    | -| Supported vector property types |
    • list[float]
    • list[int]
  • ndarray
  • | +| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | | Supported index types |
    • Hnsw
    • IvfFlat
    | | Supported distance functions |
    • CosineDistance
    • DotProductSimilarity
    • EuclideanDistance
    | | Supported filter clauses |
    • EqualTo
    • AnyTagsEqualTo
    | From 76657f9fce2a924dc54dbda356edcbf007063c0f Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Fri, 28 Feb 2025 08:58:13 -0800 Subject: [PATCH 046/117] Update semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../out-of-the-box-connectors/weaviate-connector.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md index 574cb9d0..b3efc390 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md @@ -39,7 +39,7 @@ The Weaviate Vector Store connector can be used to access and manage data in Wea | Collection maps to | Weaviate Collection | | Supported key property types | Guid | | Supported data property types |
    • string
    • byte
    • short
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • *and iterables of each of these types*
    | -| Supported vector property types |
    • list[float]
    • list[int]
  • ndarray
  • | +| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | | Supported index types |
    • Hnsw
    • Flat
    • Dynamic
    | | Supported distance functions |
    • CosineDistance
    • NegativeDotProductSimilarity
    • EuclideanSquaredDistance
    • Hamming
    • ManhattanDistance
    | | Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | From 24c208a34b81bd7f18eb2593c8bfec1c78c15460 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Fri, 28 Feb 2025 08:58:23 -0800 Subject: [PATCH 047/117] Update semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../out-of-the-box-connectors/chroma-connector.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md index c02e9fd1..17dc15cc 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md @@ -33,7 +33,7 @@ following characteristics. | Collection maps to | Chroma collection | | Supported key property types | string | | Supported data property types | All types that are supported by System.Text.Json (either built-in or by using a custom converter) | -| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray]
    | +| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | | Supported index types |
    • HNSW
    | | Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | | Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | From 46168824a7af450bb4e200ee31d4b519ce02fdb4 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 4 Mar 2025 10:30:33 +0100 Subject: [PATCH 048/117] extended docs --- .../concepts/ai-services/realtime.md | 128 ++++++++++++++++-- 1 file changed, 115 insertions(+), 13 deletions(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index 65eddf90..2dc801ba 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -10,16 +10,16 @@ ms.service: semantic-kernel # Realtime API integrations for Semantic Kernel -The first realtime API integration for Semantic Kernel has been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes. +The first realtime API integration for Semantic Kernel have been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes and we might need to make breaking changes to the API in Semantic Kernel as we learn from customers how to use this and as we add other providers of these kinds of models and APIs. ## Realtime Client abstraction -To support different realtime api's from different vendors, using different protocols, a new client abstraction has been added to the kernel. This client is used to connect to the realtime service and send and receive messages. +To support different realtime APIs from different vendors, using different protocols, a new client abstraction has been added to the kernel. This client is used to connect to the realtime service and send and receive messages. The client is responsible for handling the connection to the service, sending messages, and receiving messages. The client is also responsible for handling any errors that occur during the connection or message sending/receiving process. ### Realtime API -Any realtime client consists of the following methods: +Any realtime client implements the following methods: | Method | Description | | ---------------- | ------------------------------------------------------------------------------------------------------------------ | @@ -29,9 +29,9 @@ Any realtime client consists of the following methods: | `receive` | This is a asynchronous generator method that listens for messages from the service and yields them as they arrive. | | `send` | Sends a message to the service | -## Python implementations +### Python implementations -The python version of semantic kernel currently supports the following realtime clients: +The python version of Semantic Kernel currently supports the following realtime clients: | Client | Protocol | Modalities | Function calling enabled | Description | | ------ | --------- | ------------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -47,28 +47,32 @@ To get started with the Realtime API, you need to install the `semantic-kernel` pip install semantic-kernel[realtime] ``` -Then you can create a kernel and add the realtime client to it. +Depending on how you want to handle audio, you might need additional packages to interface with speakers and microphones, like `pyaudio` or `sounddevice`. + +### Websocket clients + +Then you can create a kernel and add the realtime client to it, this shows how to do that with a AzureRealtimeWebsocket connection, you can replace AzureRealtimeWebsocket with OpenAIRealtimeWebsocket without any further changes. ```python from semantic_kernel.connectors.ai.open_ai import ( AzureRealtimeWebsocket, + AzureRealtimeExecutionSettings, ListenEvents, - OpenAIRealtimeExecutionSettings, ) from semantic_kernel.contents import RealtimeAudioEvent, RealtimeTextEvent # this will use environment variables to get the api key, endpoint, api version and deployment name. realtime_client = AzureRealtimeWebsocket() -settings = OpenAIRealtimeExecutionSettings() +settings = AzureRealtimeExecutionSettings(voice='alloy') async with realtime_client(settings=settings, create_response=True): async for event in realtime_client.receive(): match event: - # receiving a piece of audio + # receiving a piece of audio (and send it to a undefined audio player) case RealtimeAudioEvent(): await audio_player.add_audio(event.audio) # receiving a piece of audio transcript case RealtimeTextEvent(): - # the model returns both audio and transcript of the audio, which we will print + # Semantic Kernel parses the transcript to a TextContent object captured in a RealtimeTextEvent print(event.text.text, end="") case _: # OpenAI Specific events @@ -81,8 +85,106 @@ async with realtime_client(settings=settings, create_response=True): There are two important things to note, the first is that the `realtime_client` is an async context manager, this means that you can use it in an async function and use `async with` to create the session. The second is that the `receive` method is an async generator, this means that you can use it in a for loop to receive messages as they arrive. -In this simple example, we are passing the audio to a unspecified `audio_player` object, and printing the transcript as it arrives. +### WebRTC client + +The setup of a WebRTC connection is a bit more complex and so we need a extra parameter when creating the client. This parameter, `audio_track` needs to be a object that implements the `MediaStreamTrack` protocol of the `aiortc` package, this is also demonstrated in the samples that are linked below. + +To create a client that uses WebRTC, you would do the following: + +```python +from semantic_kernel.connectors.ai.open_ai import ( + ListenEvents, + OpenAIRealtimeExecutionSettings, + OpenAIRealtimeWebRTC, +) +from aiortc.mediastreams import MediaStreamTrack + +class AudioRecorderWebRTC(MediaStreamTrack): + # implement the MediaStreamTrack methods. + +realtime_client = OpenAIRealtimeWebRTC(audio_track=AudioRecorderWebRTC()) +# Create the settings for the session +settings = OpenAIRealtimeExecutionSettings( + instructions=""" +You are a chat bot. Your name is Mosscap and +you have one goal: figure out what people need. +Your full name, should you need to know it, is +Splendid Speckled Mosscap. You communicate +effectively, but you tend to answer with long +flowery prose. +""", + voice="shimmer", +) +audio_player = AudioPlayer +async with realtime_client(settings=settings, create_response=True): + async for event in realtime_client.receive(): + match event.event_type: + # receiving a piece of audio (and send it to a undefined audio player) + case "audio": + await audio_player.add_audio(event.audio) + case "text": + # the model returns both audio and transcript of the audio, which we will print + print(event.text.text, end="") + case "service": + # OpenAI Specific events + if event.service_type == ListenEvents.SESSION_UPDATED: + print("Session updated") + if event.service_type == ListenEvents.RESPONSE_CREATED: + print("\nMosscap (transcript): ", end="") +``` + +Both of these samples receive the audio as RealtimeAudioEvent and then they pass that to a unspecified audio_player object. + +### Audio output callback + +Next to this we have a parameter called `audio_output_callback` on the `receive` method and on the class creation. This callback will be called first before any further handling of the audio and gets a `numpy` array of the audio data, instead of it being parsed into AudioContent and returned as a RealtimeAudioEvent that you can then handle, which is what happens above. This has shown to give smoother audio output because there is less overhead between the audio data coming in and it being given to the player. + +This example shows how to define and use the `audio_output_callback`: + +```python +from semantic_kernel.connectors.ai.open_ai import ( + ListenEvents, + OpenAIRealtimeExecutionSettings, + OpenAIRealtimeWebRTC, +) +from aiortc.mediastreams import MediaStreamTrack + +class AudioRecorderWebRTC(MediaStreamTrack): + # implement the MediaStreamTrack methods. + +class AudioPlayer: + async def play_audio(self, content: np.ndarray): + # implement the audio player + +realtime_client = OpenAIRealtimeWebRTC(audio_track=AudioRecorderWebRTC()) +# Create the settings for the session +settings = OpenAIRealtimeExecutionSettings( + instructions=""" +You are a chat bot. Your name is Mosscap and +you have one goal: figure out what people need. +Your full name, should you need to know it, is +Splendid Speckled Mosscap. You communicate +effectively, but you tend to answer with long +flowery prose. +""", + voice="shimmer", +) +audio_player = AudioPlayer +async with realtime_client(settings=settings, create_response=True): + async for event in realtime_client.receive(audio_output_callback=audio_player.play_audio): + match event.event_type: + # no need to handle case: "audio" + case "text": + # the model returns both audio and transcript of the audio, which we will print + print(event.text.text, end="") + case "service": + # OpenAI Specific events + if event.service_type == ListenEvents.SESSION_UPDATED: + print("Session updated") + if event.service_type == ListenEvents.RESPONSE_CREATED: + print("\nMosscap (transcript): ", end="") +``` -There is also a `audio_output_callback` parameter on the client creation or on the `receive` method, this callback will be called first, and leads to smoother playback compared to the above example. +### Samples -See the samples in our repo [link to follow]. +There are four samples in [our repo](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/concepts/realtime), they cover both the basics using both websockets and WebRTC, as well as a more complex setup including function calling. Finally there is a more [complex demo](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/demos/call_automation) that uses [Azure Communication Services](https://learn.microsoft.com/en-us/azure/communication-services/) to allow you to call your Semantic Kernel enhanced realtime API. From 7f46baeaa98d94cb518522122c341ec1f5284d45 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 4 Mar 2025 10:48:20 +0100 Subject: [PATCH 049/117] fixed link --- semantic-kernel/concepts/ai-services/realtime.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index 2dc801ba..fca6caa6 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -187,4 +187,4 @@ async with realtime_client(settings=settings, create_response=True): ### Samples -There are four samples in [our repo](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/concepts/realtime), they cover both the basics using both websockets and WebRTC, as well as a more complex setup including function calling. Finally there is a more [complex demo](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/demos/call_automation) that uses [Azure Communication Services](https://learn.microsoft.com/en-us/azure/communication-services/) to allow you to call your Semantic Kernel enhanced realtime API. +There are four samples in [our repo](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/concepts/realtime), they cover both the basics using both websockets and WebRTC, as well as a more complex setup including function calling. Finally there is a more [complex demo](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/demos/call_automation) that uses [Azure Communication Services](/azure/communication-services/) to allow you to call your Semantic Kernel enhanced realtime API. From 6941538d8ef310f4a7ca7d6c966c15d717707e41 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 4 Mar 2025 10:51:12 +0100 Subject: [PATCH 050/117] fixed header --- semantic-kernel/concepts/ai-services/realtime.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index fca6caa6..afdd4568 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -1,6 +1,6 @@ --- title: Realtime AI Integrations for Semantic Kernel -description: Learn about realtime AI integrations available in Semantic Kernel. +description: Learn about realtime multi-modal AI integrations available in Semantic Kernel. author: eavanvalkenburg ms.topic: conceptual ms.author: edvan @@ -8,7 +8,7 @@ ms.date: 02/26/2025 ms.service: semantic-kernel --- -# Realtime API integrations for Semantic Kernel +# Realtime Multi-modal APIs The first realtime API integration for Semantic Kernel have been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes and we might need to make breaking changes to the API in Semantic Kernel as we learn from customers how to use this and as we add other providers of these kinds of models and APIs. From 8d555834dc1a16468bfc30593c65e77c9618d433 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Wed, 5 Mar 2025 08:20:16 -0800 Subject: [PATCH 051/117] Sync fork to branch --- semantic-kernel/Frameworks/agent/TOC.yml | 2 + .../Frameworks/agent/assistant-agent.md | 2 +- .../Frameworks/agent/azure-ai-agent.md | 506 ++++++++++++++++++ .../Frameworks/agent/chat-completion-agent.md | 6 +- 4 files changed, 512 insertions(+), 4 deletions(-) create mode 100644 semantic-kernel/Frameworks/agent/azure-ai-agent.md diff --git a/semantic-kernel/Frameworks/agent/TOC.yml b/semantic-kernel/Frameworks/agent/TOC.yml index c85787ea..2d6709d8 100644 --- a/semantic-kernel/Frameworks/agent/TOC.yml +++ b/semantic-kernel/Frameworks/agent/TOC.yml @@ -6,6 +6,8 @@ href: chat-completion-agent.md - name: OpenAI Assistant Agent href: assistant-agent.md +- name: Azure AI Agent + href: azure-ai-agent.md - name: Agent Collaboration href: agent-chat.md - name: Create an Agent from a Template diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index 17ee3199..c40b1926 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -8,7 +8,7 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring the _Semantic Kernel_ `OpenAIAssistantAgent` +# Exploring the Semantic Kernel `OpenAIAssistantAgent` > [!IMPORTANT] > This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md new file mode 100644 index 00000000..1eb27431 --- /dev/null +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -0,0 +1,506 @@ +--- +title: Exploring the Semantic Kernel Azure AI Agent Agent +description: An exploration of the definition, behaviors, and usage patterns for an Azure AI Agent +zone_pivot_groups: programming-languages +author: moonbox3 +ms.topic: tutorial +ms.author: evmattso +ms.date: 03/05/2025 +ms.service: semantic-kernel +--- +# Exploring the Semantic Kernel `AzureAIAgent` + +> [!IMPORTANT] +> This feature is in the experimental stage. Features at this stage are still under development and subject to change before advancing to the preview or release candidate stage. + +Detailed API documentation related to this discussion is available at: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +> Updated Semantic Kernel Python API Docs are coming soon. + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## What is an `AzureAIAgent`? + +An `AzureAIAgent` is a specialized agent within the Semantic Kernel framework, designed to provide advanced conversational capabilities with seamless tool integration. It automates tool calling, eliminating the need for manual parsing and invocation. The agent also securely manages conversation history using threads, reducing the overhead of maintaining state. Additionally, the `AzureAIAgent` supports a variety of built-in tools, including file retrieval, code execution, and data interaction via Bing, Azure AI Search, Azure Functions, and OpenAPI. + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +To set up the required resources, follow the "Quickstart: Create a new agent" guide [here](/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure). + +You will need to install the optional Semantic Kernel azure dependencies if you haven't already via: + +```bash +pip install semantic-kernel[azure] +``` + +Before running an `AzureAIAgent`, modify your .env file to include: + +```bash +AZURE_AI_AGENT_PROJECT_CONNECTION_STRING = "" +AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" +``` + +or + +```bash +AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" +AZURE_AI_AGENT_ENDPOINT = "" +AZURE_AI_AGENT_SUBSCRIPTION_ID = "" +AZURE_AI_AGENT_RESOURCE_GROUP_NAME = "" +AZURE_AI_AGENT_PROJECT_NAME = "" +``` + +The project connection string is of the following format: `;;;`. See here for information on obtaining the values to populate the connection string. + +The `.env` should be placed in the root directory. + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## Configuring the AI Project Client + +Ensure that your `AzureAIAgent` resources are configured with at least a Basic or Standard SKU (the Standard SKU is required to do more advanced operations like AI Search). + +To begin, create the project client as follows: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +```python +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, +): + # Your operational code here +``` + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## Creating an `AzureAIAgent` + +To create an `AzureAIAgent`, you start by configuring and initializing the agent project through the Azure AI service and then integrate it with Semantic Kernel: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +```python +from azure.identity.aio import DefaultAzureCredential +from semantic_kernel.agents.azure_ai import AzureAIAgent, AzureAIAgentSettings + +ai_agent_settings = AzureAIAgentSettings.create() + +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, +): + # 1. Create an agent on the Azure AI agent service + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + name="", + instructions="", + ) + + # 2. Create a Semantic Kernel agent to use the Azure AI agent + agent = AzureAIAgent( + client=client, + definition=agent_definition, + ) +``` + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## Interacting with an `AzureAIAgent` + +Interaction with the `AzureAIAgent` is straightforward. The agent maintains the conversation history automatically using a thread: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +```python +USER_INPUTS = ["Hello", "What's your name?"] + +thread = await client.agents.create_thread() + +try: + for user_input in USER_INPUTS: + await agent.add_chat_message(thread_id=thread.id, message=user_input) + response = await agent.get_response(thread_id=thread.id) + print(response) +finally: + await client.agents.delete_thread(thread.id) +``` + +Python also supports invoking an agent in a streaming and a non-streaming fashion: + +```python +# Streaming +for user_input in USER_INPUTS: + await agent.add_chat_message(thread_id=thread.id, message=user_input) + async for content in agent.invoke_stream(thread_id=thread.id): + print(content.content, end="", flush=True) +``` + +```python +# Non-streaming +for user_input in USER_INPUTS: + await agent.add_chat_message(thread_id=thread.id, message=user_input) + async for content in agent.invoke(thread_id=thread.id): + print(content.content) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## Using Plugins with an `AzureAIAgent` + +Semantic Kernel supports extending an `AzureAIAgent` with custom plugins for enhanced functionality: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +```python +from semantic_kernel.functions import kernel_function + +class SamplePlugin: + @kernel_function(description="Provides sample data.") + def get_data(self) -> str: + return "Sample data" + +ai_agent_settings = AzureAIAgentSettings.create() + +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + ) + + agent = AzureAIAgent( + client=client, + definition=agent_definition, + plugins=[SamplePlugin()] + ) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## Advanced Features + +An `AzureAIAgent` can leverage advanced tools such as code interpreters, file search, OpenAPI and Azure AI Search integration for dynamic and powerful interactions: + +### Code Interpreter + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" +```python +from azure.ai.projects.models import CodeInterpreterTool + +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + code_interpreter = CodeInterpreterTool() + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +### File Search + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" +```python +from azure.ai.projects.models import FileSearchTool + +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + tools=file_search.definitions, + tool_resources=file_search.resources, + ) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +### OpenAPI Integration + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +```python +from azure.ai.projects.models import OpenApiTool, OpenApiAnonymousAuthDetails + +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + openapi_spec_file_path = "sample/filepath/..." + with open(os.path.join(openapi_spec_file_path, "spec_one.json")) as file_one: + openapi_spec_one = json.loads(file_one.read()) + with open(os.path.join(openapi_spec_file_path, "spec_two.json")) as file_two: + openapi_spec_two = json.loads(file_two.read()) + + # Note that connection or managed identity auth setup requires additional setup in Azure + auth = OpenApiAnonymousAuthDetails() + openapi_tool_one = OpenApiTool( + name="", + spec=openapi_spec_one, + description="", + auth=auth, + ) + openapi_tool_two = OpenApiTool( + name="", + spec=openapi_spec_two, + description="", + auth=auth, + ) + + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + tools=openapi_tool_one.definitions + openapi_tool_two.definitions, + ) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +### AzureAI Search + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +```python +from azure.ai.projects.models import AzureAISearchTool, ConnectionType + +async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + conn_list = await client.connections.list() + + ai_search_conn_id = "" + for conn in conn_list: + if conn.connection_type == ConnectionType.AZURE_AI_SEARCH: + ai_search_conn_id = conn.id + break + + ai_search = AzureAISearchTool( + index_connection_id=ai_search_conn_id, + index_name=AZURE_AI_SEARCH_INDEX_NAME, + ) + + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + instructions="Answer questions using your index.", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + headers={"x-ms-enable-preview": "true"}, + ) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +### Retrieving Existing `AzureAIAgent` + +An existing agent can be retrieved and reused by specifying its assistant ID: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" +```python +agent_definition = await client.agents.get_agent(assistant_id="your-agent-id") +agent = AzureAIAgent(client=client, definition=agent_definition) +``` +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## Deleting an `AzureAIAgent` + +Agents and their associated threads can be deleted when no longer needed: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" +```python +await client.agents.delete_thread(thread.id) +await client.agents.delete_agent(agent.id) +``` + +If working with a vector store or files, they can be deleted as well: + +```python +await client.agents.delete_file(file_id=file.id) +await client.agents.delete_vector_store(vector_store_id=vector_store.id) +``` + +> [!TIP] +> To remove a file from a vector store, use: +> ```python +> await client.agents.delete_vector_store_file(vector_store_id=vector_store.id, file_id=file.id) +> ``` +> This operation detaches the file from the vector store but does not permanently delete it. +> To fully delete the file, call: +> ```python +> await client.agents.delete_file(file_id=file.id) +> ``` + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +## How-To + +For practical examples of using an `AzureAIAgent`, see our code samples on GitHub: + +::: zone pivot="programming-language-csharp" + +> TODO(crickman) Azure AI Agents are currently unavailable in .NET. + +::: zone-end + +::: zone pivot="programming-language-python" + +- [Getting Started with Azure AI Agents](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/getting_started_with_agents/azure_ai_agent) +- [Advanced Azure AI Agent Code Samples](https://github.com/microsoft/semantic-kernel/tree/main/python/samples/concepts/agents/azure_ai_agent) + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + +> [!div class="nextstepaction"] +> [Agent Collaboration in `AgentChat`](./agent-chat.md) \ No newline at end of file diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index d1044239..7e95e1b4 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -8,7 +8,7 @@ ms.author: crickman ms.date: 09/13/2024 ms.service: semantic-kernel --- -# Exploring the _Semantic Kernel_ Chat Completion Agent +# Exploring the Semantic Kernel `ChatCompletionAgent` > [!IMPORTANT] > This feature is in the release candidate stage. Features at this stage are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. @@ -224,7 +224,7 @@ agent = ChatCompletionAgent(...) chat = ChatHistory() # Add the user message -chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) +chat.add_user_message(user_input)) # Generate the agent response response = await agent.get_response(chat) @@ -240,7 +240,7 @@ agent = ChatCompletionAgent(...) chat = ChatHistory() # Add the user message -chat.add_user_message(ChatMessageContent(role=AuthorRole.USER, content=input)) +chat.add_user_message(user_input) # Generate the agent response(s) async for response in agent.invoke(chat): From 1e2bd838eb8e16d8e96dc52aa173aae269a2e7d6 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Wed, 5 Mar 2025 11:27:25 -0800 Subject: [PATCH 052/117] ChatCompletion tweak --- semantic-kernel/Frameworks/agent/chat-completion-agent.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 7e95e1b4..f5a8aa81 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -1,5 +1,5 @@ --- -title: Exploring the Semantic Kernel Chat Completion Agent +title: Exploring the Semantic Kernel ChatCompletionAgent description: An exploration of the definition, behaviors, and usage patterns for a Chat Completion Agent zone_pivot_groups: programming-languages author: crickman @@ -74,7 +74,7 @@ Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantic ::: zone-end -## Creating a Chat Completion Agent +## Creating a `ChatCompletionAgent` A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. From 469b32f227433400997224fe68fa7d33fbd690e2 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Wed, 5 Mar 2025 11:36:27 -0800 Subject: [PATCH 053/117] Assistant overview update --- .../Frameworks/agent/assistant-agent.md | 36 ++++++++----------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index c40b1926..755850dc 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -51,15 +51,13 @@ Creating an `OpenAIAssistant` requires invoking a remote service, which is handl ::: zone pivot="programming-language-csharp" ```csharp -OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.CreateAsync( - OpenAIClientProvider.ForAzureOpenAI(/*<...service configuration>*/), - new OpenAIAssistantDefinition("") - { - Name = "", - Instructions = "", - }, - new Kernel()); +AssistantClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(...).GetAssistantClient(); +Assistant assistant = + await this.AssistantClient.CreateAssistantAsync( + "", + "", + instructions: ""); +OpenAIAssistantAgent agent = new(assistant, client); ``` ::: zone-end @@ -119,11 +117,9 @@ Once created, the identifier of the assistant may be access via its identifier. For .NET, the agent identifier is exposed as a `string` via the property defined by any agent. ```csharp -OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.RetrieveAsync( - OpenAIClientProvider.ForAzureOpenAI(/*<...service configuration>*/), - "", - new Kernel()); +AssistantClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(...).GetAssistantClient(); +Assistant assistant = await this.AssistantClient.GetAssistantAsync(""); +OpenAIAssistantAgent agent = new(assistant, client); ``` ::: zone-end @@ -220,20 +216,18 @@ await agent.delete_thread(thread_id) ## Deleting an `OpenAIAssistantAgent` -Since the assistant's definition is stored remotely, it supports the capability to self-delete. This enables the agent to be removed from the system when it is no longer needed. +Since the assistant's definition is stored remotely, it will persist if not deleted. +Deleting an assistant definition may be performed directly with the `AssistantClient`. -> Note: Attempting to use an agent instance after being deleted results in an exception. +> Note: Attempting to use an agent instance after being deleted will result in a service exception. ::: zone pivot="programming-language-csharp" For .NET, the agent identifier is exposed as a `string` via the [`Agent.Id`](/dotnet/api/microsoft.semantickernel.agents.agent.id) property defined by any agent. ```csharp -// Perform the deletion -await agent.DeleteAsync(); - -// Inspect whether an agent has been deleted -bool isDeleted = agent.IsDeleted(); +AssistantClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(...).GetAssistantClient(); +Assistant assistant = await this.AssistantClient.DeleteAssistantAsync(""); ``` ::: zone-end From dd3cb7758672146815e329a92278559e78b22bbb Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Wed, 5 Mar 2025 12:59:25 -0800 Subject: [PATCH 054/117] Update assistant "howto" --- .../Frameworks/agent/assistant-agent.md | 2 - .../examples/example-agent-collaboration.md | 4 - .../agent/examples/example-assistant-code.md | 115 +++++++++--------- .../examples/example-assistant-search.md | 110 ++++++++--------- 4 files changed, 114 insertions(+), 117 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index 755850dc..44fa6e9f 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -17,8 +17,6 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-csharp" - [`OpenAIAssistantAgent`](/dotnet/api/microsoft.semantickernel.agents.openai.openaiassistantagent) -- [`OpenAIAssistantDefinition`](/dotnet/api/microsoft.semantickernel.agents.openai.openaiassistantdefinition) -- [`OpenAIClientProvider`](/dotnet/api/microsoft.semantickernel.agents.openai.openaiclientprovider) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index d557c7fc..832b8fac 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -862,8 +862,6 @@ Try using these suggested inputs: 8. its good, but is it ready for my college professor? ```csharp -// Copyright (c) Microsoft. All rights reserved. - using System; using System.ComponentModel; using System.Diagnostics; @@ -1131,8 +1129,6 @@ You can try using one of the suggested inputs. As the agent chat begins, the age > You can reference any file by providing `@`. To reference the "WomensSuffrage" text from above, download it [here](https://github.com/microsoft/semantic-kernel/blob/main/python/samples/learn_resources/resources/WomensSuffrage.txt) and place it in your current working directory. You can then reference it with `@WomensSuffrage.txt`. ```python -# Copyright (c) Microsoft. All rights reserved. - import asyncio import os diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index be6fdc4d..aadcba51 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -208,13 +208,12 @@ The full example code is provided in the [Final](#final) section. Refer to that Prior to creating an `OpenAIAssistantAgent`, ensure the configuration settings are available and prepare the file resources. -Instantiate the `Settings` class referenced in the previous [Configuration](#configuration) section. Use the settings to also create an `OpenAIClientProvider` that will be used for the [Agent Definition](#agent-definition) as well as file-upload. +Instantiate the `Settings` class referenced in the previous [Configuration](#configuration) section. Use the settings to also create an `AzureOpenAIClient` that will be used for the [Agent Definition](#agent-definition) as well as file-upload. ```csharp Settings settings = new(); -OpenAIClientProvider clientProvider = - OpenAIClientProvider.ForAzureOpenAI(new AzureCliCredential(), new Uri(settings.AzureOpenAI.Endpoint)); +AzureOpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri(settings.AzureOpenAI.Endpoint)); ``` ::: zone-end @@ -226,11 +225,11 @@ OpenAIClientProvider clientProvider = ::: zone pivot="programming-language-csharp" -Use the `OpenAIClientProvider` to access an `OpenAIFileClient` and upload the two data-files described in the previous [Configuration](#configuration) section, preserving the _File Reference_ for final clean-up. +Use the `AzureOpenAIClient` to access an `OpenAIFileClient` and upload the two data-files described in the previous [Configuration](#configuration) section, preserving the _File Reference_ for final clean-up. ```csharp Console.WriteLine("Uploading files..."); -OpenAIFileClient fileClient = clientProvider.Client.GetOpenAIFileClient(); +OpenAIFileClient fileClient = client.GetOpenAIFileClient(); OpenAIFile fileDataCountryDetail = await fileClient.UploadFileAsync("PopulationByAdmin1.csv", FileUploadPurpose.Assistants); OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByCountry.csv", FileUploadPurpose.Assistants); ``` @@ -303,27 +302,27 @@ We first set up the Azure OpenAI resources to obtain the client and model. Next, ::: zone pivot="programming-language-csharp" -We are now ready to instantiate an `OpenAIAssistantAgent`. The agent is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. +We are now ready to instantiate an `OpenAIAssistantAgent` by first creating an assistant definition. The assistant is configured with its target model, _Instructions_, and the _Code Interpreter_ tool enabled. Additionally, we explicitly associate the two data files with the _Code Interpreter_ tool. ```csharp Console.WriteLine("Defining agent..."); -OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.CreateAsync( - clientProvider, - new OpenAIAssistantDefinition(settings.AzureOpenAI.ChatModelDeployment) - { - Name = "SampleAssistantAgent", - Instructions = - """ - Analyze the available data to provide an answer to the user's question. - Always format response using markdown. - Always include a numerical index that starts at 1 for any lists or tables. - Always sort lists in ascending order. - """, - EnableCodeInterpreter = true, - CodeInterpreterFileIds = [fileDataCountryList.Id, fileDataCountryDetail.Id], - }, - new Kernel()); +AssistantClient assistantClient = client.GetAssistantClient(); + Assistant assistant = + await assistantClient.CreateAssistantAsync( + settings.AzureOpenAI.ChatModelDeployment, + name: "SampleAssistantAgent", + instructions: + """ + Analyze the available data to provide an answer to the user's question. + Always format response using markdown. + Always include a numerical index that starts at 1 for any lists or tables. + Always sort lists in ascending order. + """, + enableCodeInterpreter: true, + codeInterpreterFileIds: [fileDataCountryList.Id, fileDataCountryDetail.Id]); + +// Create agent +OpenAIAssistantAgent agent = new(assistant, assistantClient); ``` ::: zone-end @@ -355,7 +354,7 @@ Let's also ensure the resources are removed at the end of execution to minimize ::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine("Creating thread..."); -string threadId = await agent.CreateThreadAsync(); +AssistantThread thread = await assistantClient.CreateThreadAsync(); Console.WriteLine("Ready!"); @@ -374,8 +373,8 @@ finally Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - agent.DeleteThreadAsync(threadId), - agent.DeleteAsync(), + assistantClient.DeleteThreadAsync(thread.Id), + assistantClient.DeleteAssistantAsync(assistant.Id), fileClient.DeleteFileAsync(fileDataCountryList.Id), fileClient.DeleteFileAsync(fileDataCountryDetail.Id), ]); @@ -424,7 +423,7 @@ if (input.Trim().Equals("EXIT", StringComparison.OrdinalIgnoreCase)) break; } -await agent.AddChatMessageAsync(threadId, new ChatMessageContent(AuthorRole.User, input)); +await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); Console.WriteLine(); ``` @@ -542,7 +541,7 @@ To generate an `Agent` response to user input, invoke the agent by specifying th ::: zone pivot="programming-language-csharp" ```csharp bool isCode = false; -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(threadId)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) { if (isCode != (response.Metadata?.ContainsKey(OpenAIAssistantAgent.CodeInterpreterMetadataKey) ?? false)) { @@ -604,17 +603,21 @@ Try using these suggested inputs: ::: zone pivot="programming-language-csharp" ```csharp +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents.OpenAI; +using Microsoft.SemanticKernel.ChatCompletion; +using OpenAI; +using OpenAI.Assistants; +using OpenAI.Files; using System; +using System.ClientModel; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Threading.Tasks; -using Azure.Identity; -using Microsoft.SemanticKernel; -using Microsoft.SemanticKernel.Agents.OpenAI; -using Microsoft.SemanticKernel.ChatCompletion; -using OpenAI.Files; namespace AgentsSample; @@ -625,35 +628,39 @@ public static class Program // Load configuration from environment variables or user secrets. Settings settings = new(); - OpenAIClientProvider clientProvider = - OpenAIClientProvider.ForAzureOpenAI(new AzureCliCredential(), new Uri(settings.AzureOpenAI.Endpoint)); + // Initialize the clients + AzureOpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri(settings.AzureOpenAI.Endpoint)); + //OpenAIClient client = OpenAIAssistantAgent.CreateOpenAIClient(new ApiKeyCredential(settings.OpenAI.ApiKey))); + AssistantClient assistantClient = client.GetAssistantClient(); + OpenAIFileClient fileClient = client.GetOpenAIFileClient(); + // Upload files Console.WriteLine("Uploading files..."); - OpenAIFileClient fileClient = clientProvider.Client.GetOpenAIFileClient(); OpenAIFile fileDataCountryDetail = await fileClient.UploadFileAsync("PopulationByAdmin1.csv", FileUploadPurpose.Assistants); OpenAIFile fileDataCountryList = await fileClient.UploadFileAsync("PopulationByCountry.csv", FileUploadPurpose.Assistants); - Console.WriteLine("Defining agent..."); - OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.CreateAsync( - clientProvider, - new OpenAIAssistantDefinition(settings.AzureOpenAI.ChatModelDeployment) - { - Name = "SampleAssistantAgent", - Instructions = + // Define assistant + Console.WriteLine("Defining assistant..."); + Assistant assistant = + await assistantClient.CreateAssistantAsync( + settings.AzureOpenAI.ChatModelDeployment, + name: "SampleAssistantAgent", + instructions: """ Analyze the available data to provide an answer to the user's question. Always format response using markdown. Always include a numerical index that starts at 1 for any lists or tables. Always sort lists in ascending order. """, - EnableCodeInterpreter = true, - CodeInterpreterFileIds = [fileDataCountryList.Id, fileDataCountryDetail.Id], - }, - new Kernel()); + enableCodeInterpreter: true, + codeInterpreterFileIds: [fileDataCountryList.Id, fileDataCountryDetail.Id]); + // Create agent + OpenAIAssistantAgent agent = new(assistant, assistantClient); + + // Create the conversation thread Console.WriteLine("Creating thread..."); - string threadId = await agent.CreateThreadAsync(); + AssistantThread thread = await assistantClient.CreateThreadAsync(); Console.WriteLine("Ready!"); @@ -676,12 +683,12 @@ public static class Program break; } - await agent.AddChatMessageAsync(threadId, new ChatMessageContent(AuthorRole.User, input)); + await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); Console.WriteLine(); bool isCode = false; - await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(threadId)) + await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) { if (isCode != (response.Metadata?.ContainsKey(OpenAIAssistantAgent.CodeInterpreterMetadataKey) ?? false)) { @@ -709,8 +716,8 @@ public static class Program Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - agent.DeleteThreadAsync(threadId), - agent.DeleteAsync(), + assistantClient.DeleteThreadAsync(thread.Id), + assistantClient.DeleteAssistantAsync(assistant.Id), fileClient.DeleteFileAsync(fileDataCountryList.Id), fileClient.DeleteFileAsync(fileDataCountryDetail.Id), ]); @@ -761,8 +768,6 @@ public static class Program ::: zone pivot="programming-language-python" ```python -# Copyright (c) Microsoft. All rights reserved. - import asyncio import logging import os diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index 1aad1696..80b8c2e0 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -209,16 +209,12 @@ Prior to creating an `OpenAIAssistantAgent`, ensure the configuration settings a ::: zone pivot="programming-language-csharp" -Instantiate the `Settings` class referenced in the previous [Configuration](#configuration) section. Use the settings to also create an `OpenAIClientProvider` that will be used for the [Agent Definition](#agent-definition) as well as file-upload and the creation of a `VectorStore`. +Instantiate the `Settings` class referenced in the previous [Configuration](#configuration) section. Use the settings to also create an `AzureOpenAIClient` that will be used for the [Agent Definition](#agent-definition) as well as file-upload and the creation of a `VectorStore`. ```csharp - Settings settings = new(); -OpenAIClientProvider clientProvider = - OpenAIClientProvider.ForAzureOpenAI( - new AzureCliCredential(), - new Uri(settings.AzureOpenAI.Endpoint)); +AzureOpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri(settings.AzureOpenAI.Endpoint)); ``` ::: zone-end @@ -242,11 +238,11 @@ Now create an empty _Vector Store for use with the _File Search_ tool: ::: zone pivot="programming-language-csharp" -Use the `OpenAIClientProvider` to access a `VectorStoreClient` and create a `VectorStore`. +Use the `AzureOpenAIClient` to access a `VectorStoreClient` and create a `VectorStore`. ```csharp Console.WriteLine("Creating store..."); -VectorStoreClient storeClient = clientProvider.Client.GetVectorStoreClient(); +VectorStoreClient storeClient = client.GetVectorStoreClient(); CreateVectorStoreOperation operation = await storeClient.CreateVectorStoreAsync(waitUntilCompleted: true); string storeId = operation.VectorStoreId; ``` @@ -317,7 +313,7 @@ Now upload those files and add them to the _Vector Store_ by using the previousl Dictionary fileReferences = []; Console.WriteLine("Uploading files..."); -OpenAIFileClient fileClient = clientProvider.Client.GetOpenAIFileClient(); +OpenAIFileClient fileClient = client.GetOpenAIFileClient(); foreach (string fileName in _fileNames) { OpenAIFile fileInfo = await fileClient.UploadFileAsync(fileName, FileUploadPurpose.Assistants); @@ -339,27 +335,26 @@ We are now ready to instantiate an `OpenAIAssistantAgent`. The agent is configur ::: zone pivot="programming-language-csharp" -We will utilize the `OpenAIClientProvider` again as part of creating the `OpenAIAssistantAgent`: +We will utilize the `AzureOpenAIClient` again as part of creating the `OpenAIAssistantAgent`: ```csharp -Console.WriteLine("Defining agent..."); -OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.CreateAsync( - clientProvider, - new OpenAIAssistantDefinition(settings.AzureOpenAI.ChatModelDeployment) - { - Name = "SampleAssistantAgent", - Instructions = +Console.WriteLine("Defining assistant..."); +Assistant assistant = + await assistantClient.CreateAssistantAsync( + settings.AzureOpenAI.ChatModelDeployment, + name: "SampleAssistantAgent", + instructions: """ The document store contains the text of fictional stories. Always analyze the document store to provide an answer to the user's question. Never rely on your knowledge of stories not included in the document store. Always format response using markdown. """, - EnableFileSearch = true, - VectorStoreId = storeId, - }, - new Kernel()); + enableFileSearch: true, + vectorStoreId: storeId); + +// Create agent +OpenAIAssistantAgent agent = new(assistant, assistantClient); ``` ::: zone-end @@ -402,7 +397,7 @@ Let's also ensure the resources are removed at the end of execution to minimize ::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine("Creating thread..."); -string threadId = await agent.CreateThreadAsync(); +AssistantThread thread = await assistantClient.CreateThreadAsync(); Console.WriteLine("Ready!"); @@ -420,8 +415,8 @@ finally Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - agent.DeleteThreadAsync(threadId), - agent.DeleteAsync(), + assistantClient.DeleteThreadAsync(thread.Id), + assistantClient.DeleteAssistantAsync(assistant.Id), storeClient.DeleteVectorStoreAsync(storeId), ..fileReferences.Select(fileReference => fileClient.DeleteFileAsync(fileReference.Key)) ]); @@ -471,7 +466,7 @@ if (input.Trim().Equals("EXIT", StringComparison.OrdinalIgnoreCase)) break; } -await agent.AddChatMessageAsync(threadId, new ChatMessageContent(AuthorRole.User, input)); +await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); Console.WriteLine(); ``` ::: zone-end @@ -524,7 +519,7 @@ To generate an `Agent` response to user input, invoke the agent by specifying th ::: zone pivot="programming-language-csharp" ```csharp List footnotes = []; -await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(threadId)) +await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(thread.Id)) { // Capture annotations for footnotes footnotes.AddRange(chunk.Items.OfType()); @@ -586,13 +581,18 @@ Try using these suggested inputs: ::: zone pivot="programming-language-csharp" ```csharp using System; +using System.ClientModel; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; +using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents; using Microsoft.SemanticKernel.Agents.OpenAI; using Microsoft.SemanticKernel.ChatCompletion; +using OpenAI; +using OpenAI.Assistants; using OpenAI.Files; using OpenAI.VectorStores; @@ -616,21 +616,21 @@ public static class Program // Load configuration from environment variables or user secrets. Settings settings = new(); - OpenAIClientProvider clientProvider = - OpenAIClientProvider.ForAzureOpenAI( - new AzureCliCredential(), - new Uri(settings.AzureOpenAI.Endpoint)); + // Initialize the clients + AzureOpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri(settings.AzureOpenAI.Endpoint)); + //OpenAIClient client = OpenAIAssistantAgent.CreateOpenAIClient(new ApiKeyCredential(settings.OpenAI.ApiKey))); + AssistantClient assistantClient = client.GetAssistantClient(); + OpenAIFileClient fileClient = client.GetOpenAIFileClient(); + VectorStoreClient storeClient = client.GetVectorStoreClient(); + // Create the vector store Console.WriteLine("Creating store..."); - VectorStoreClient storeClient = clientProvider.Client.GetVectorStoreClient(); CreateVectorStoreOperation operation = await storeClient.CreateVectorStoreAsync(waitUntilCompleted: true); string storeId = operation.VectorStoreId; - // Retain file references. - Dictionary fileReferences = []; - + // Upload files and retain file references. Console.WriteLine("Uploading files..."); - OpenAIFileClient fileClient = clientProvider.Client.GetOpenAIFileClient(); + Dictionary fileReferences = []; foreach (string fileName in _fileNames) { OpenAIFile fileInfo = await fileClient.UploadFileAsync(fileName, FileUploadPurpose.Assistants); @@ -638,28 +638,28 @@ public static class Program fileReferences.Add(fileInfo.Id, fileInfo); } - - Console.WriteLine("Defining agent..."); - OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.CreateAsync( - clientProvider, - new OpenAIAssistantDefinition(settings.AzureOpenAI.ChatModelDeployment) - { - Name = "SampleAssistantAgent", - Instructions = + // Define assistant + Console.WriteLine("Defining assistant..."); + Assistant assistant = + await assistantClient.CreateAssistantAsync( + settings.AzureOpenAI.ChatModelDeployment, + name: "SampleAssistantAgent", + instructions: """ The document store contains the text of fictional stories. Always analyze the document store to provide an answer to the user's question. Never rely on your knowledge of stories not included in the document store. Always format response using markdown. """, - EnableFileSearch = true, - VectorStoreId = storeId, - }, - new Kernel()); + enableFileSearch: true, + vectorStoreId: storeId); + // Create agent + OpenAIAssistantAgent agent = new(assistant, assistantClient); + + // Create the conversation thread Console.WriteLine("Creating thread..."); - string threadId = await agent.CreateThreadAsync(); + AssistantThread thread = await assistantClient.CreateThreadAsync(); Console.WriteLine("Ready!"); @@ -681,11 +681,11 @@ public static class Program break; } - await agent.AddChatMessageAsync(threadId, new ChatMessageContent(AuthorRole.User, input)); + await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); Console.WriteLine(); List footnotes = []; - await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(threadId)) + await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(thread.Id)) { // Capture annotations for footnotes footnotes.AddRange(chunk.Items.OfType()); @@ -713,8 +713,8 @@ public static class Program Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - agent.DeleteThreadAsync(threadId), - agent.DeleteAsync(), + assistantClient.DeleteThreadAsync(thread.Id), + assistantClient.DeleteAssistantAsync(assistant.Id), storeClient.DeleteVectorStoreAsync(storeId), ..fileReferences.Select(fileReference => fileClient.DeleteFileAsync(fileReference.Key)) ]); @@ -729,8 +729,6 @@ public static class Program ::: zone pivot="programming-language-python" ```python -# Copyright (c) Microsoft. All rights reserved. - import asyncio import os From aed7121129594e63bbdbc9d3195eadb06781be12 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Wed, 5 Mar 2025 13:05:24 -0800 Subject: [PATCH 055/117] Update assistant retrieval with template --- .../Frameworks/agent/agent-templates.md | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index 35c851d4..dd7eb9f0 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -100,16 +100,17 @@ Templated instructions are especially powerful when working with an [`OpenAIAssi ::: zone pivot="programming-language-csharp" ```csharp // Retrieve an existing assistant definition by identifier -OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.RetrieveAsync( - this.GetClientProvider(), - "", - new Kernel(), - new KernelArguments() - { - { "topic", "Dog" }, - { "length", "3" }, - }); +AzureOpenAIClient client = OpenAIAssistantAgent.CreateAzureOpenAIClient(new AzureCliCredential(), new Uri("")); +AssistantClient assistantClient = client.GetAssistantClient(); +Assistant assistant = await client.GetAssistantAsync(); +OpenAIAssistantAgent agent = new(assistant, assistantClient, new KernelPromptTemplateFactory(), PromptTemplateConfig.SemanticKernelTemplateFormat) +{ + Arguments = new KernelArguments() + { + { "topic", "Dog" }, + { "length", "3" }, + } +} ``` ::: zone-end From fbaaf72e7d70d2ab20614b5a7e9fa95e4a712b82 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Wed, 5 Mar 2025 16:05:54 -0800 Subject: [PATCH 056/117] Update semantic-kernel/concepts/ai-services/realtime.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/concepts/ai-services/realtime.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index afdd4568..80eec9ad 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -10,8 +10,7 @@ ms.service: semantic-kernel # Realtime Multi-modal APIs -The first realtime API integration for Semantic Kernel have been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes and we might need to make breaking changes to the API in Semantic Kernel as we learn from customers how to use this and as we add other providers of these kinds of models and APIs. - +The first realtime API integration for Semantic Kernel has been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes and we might need to make breaking changes to the API in Semantic Kernel as we learn from customers how to use this and as we add other providers of these kinds of models and APIs. ## Realtime Client abstraction To support different realtime APIs from different vendors, using different protocols, a new client abstraction has been added to the kernel. This client is used to connect to the realtime service and send and receive messages. From fa454983a30bc2e3c69dda0a0f7ee118457273c6 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 6 Mar 2025 15:08:22 +0100 Subject: [PATCH 057/117] add some agent language --- semantic-kernel/concepts/ai-services/realtime.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index 80eec9ad..8f0e2c25 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -11,10 +11,11 @@ ms.service: semantic-kernel # Realtime Multi-modal APIs The first realtime API integration for Semantic Kernel has been added, it is currently only available in Python and considered experimental. This is because the underlying services are still being developed and are subject to changes and we might need to make breaking changes to the API in Semantic Kernel as we learn from customers how to use this and as we add other providers of these kinds of models and APIs. + ## Realtime Client abstraction To support different realtime APIs from different vendors, using different protocols, a new client abstraction has been added to the kernel. This client is used to connect to the realtime service and send and receive messages. -The client is responsible for handling the connection to the service, sending messages, and receiving messages. The client is also responsible for handling any errors that occur during the connection or message sending/receiving process. +The client is responsible for handling the connection to the service, sending messages, and receiving messages. The client is also responsible for handling any errors that occur during the connection or message sending/receiving process. Considering the way these models work, they can be considered agents more then regular chat completions, therefore they also take instructions, rather then a system message, they keep their own internal state and can be invoked to do work on our behalf. ### Realtime API From d740612f83ebe3f6eb85e24ec16474fc0e3bc0a2 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:58:00 +0000 Subject: [PATCH 058/117] Adding migration guide and making updates to other parts of the docs to match --- .../embedding-generation.md | 7 +- .../how-to/build-your-own-connector.md | 36 +++-- .../vector-store-connectors/vector-search.md | 58 +++---- .../migration/vectordata-march-2025.md | 149 ++++++++++++++++++ 4 files changed, 196 insertions(+), 54 deletions(-) create mode 100644 semantic-kernel/support/migration/vectordata-march-2025.md diff --git a/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md b/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md index 736d5d59..f3769bec 100644 --- a/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md +++ b/semantic-kernel/concepts/vector-store-connectors/embedding-generation.md @@ -99,11 +99,12 @@ public async Task GenerateEmbeddingsAndSearchAsync( await textEmbeddingGenerationService.GenerateEmbeddingAsync(descriptionText); // Search using the already generated embedding. - List> searchResult = await collection.VectorizedSearchAsync(searchEmbedding).ToListAsync(); + VectorSearchResults searchResult = await collection.VectorizedSearchAsync(searchEmbedding); + List> resultItems = await searchResult.Results.ToListAsync(); // Print the first search result. - Console.WriteLine("Score for first result: " + searchResult.FirstOrDefault()?.Score); - Console.WriteLine("Hotel description for first result: " + searchResult.FirstOrDefault()?.Record.Description); + Console.WriteLine("Score for first result: " + resultItems.FirstOrDefault()?.Score); + Console.WriteLine("Hotel description for first result: " + resultItems.FirstOrDefault()?.Record.Description); } ``` diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md index d149432d..368d2c91 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md @@ -107,26 +107,31 @@ There may be cases where the database doesn't support excluding vectors in which returning them is acceptable. 1.9 *`IVectorizedSearch.VectorizedSearchAsync`* implementations should also -respect the `IncludeVectors` option provided via `VectorSearchOptions` where possible. +respect the `IncludeVectors` option provided via `VectorSearchOptions` where possible. 1.10 *`IVectorizedSearch.VectorizedSearchAsync`* implementations should simulate -the `Top` and `Skip` functionality requested via `VectorSearchOptions` if the database +the `Top` and `Skip` functionality requested via `VectorSearchOptions` if the database does not support this natively. To simulate this behavior, the implementation should fetch a number of results equal to Top + Skip, and then skip the first Skip number of results before returning the remaining results. 1.11 *`IVectorizedSearch.VectorizedSearchAsync`* implementations should ignore -the `IncludeTotalCount` option provided via `VectorSearchOptions` if the database +the `IncludeTotalCount` option provided via `VectorSearchOptions` if the database does not support this natively. -1.12 *`IVectorizedSearch.VectorizedSearchAsync`* implementations should default -to the first vector if the `VectorPropertyName` option was not provided via `VectorSearchOptions`. -If a user does provide this value, the expected name should be the property name from the data model -and not any customized name that the property may be stored under in the database. E.g. let's say -the user has a data model property called `TextEmbedding` and they decorated the property with a -`JsonPropertyNameAttribute` that indicates that it should be serialized as `text_embedding`. Assuming -that the database is json based, it means that the property should be stored in the database with the -name `text_embedding`. When specifying the `VectorPropertyName` option, the user should always provide +1.12 *`IVectorizedSearch.VectorizedSearchAsync`* implementations should not require +`VectorPropertyName` or `VectorProperty` to be specified if only one vector exists on the data model. +In this case that single vector should automatically become the search target. If no vector or +multiple vectors exists on the data model, and no `VectorPropertyName` or `VectorProperty` is provided +the search method should throw. + +When using `VectorPropertyName`, if a user does provide this value, the expected name should be the +property name from the data model and not any customized name that the property may be stored under +in the database. E.g. let's say the user has a data model property called `TextEmbedding` and they +decorated the property with a `JsonPropertyNameAttribute` that indicates that it should be serialized +as `text_embedding`. Assuming that the database is json based, it means that the property should be +stored in the database with the name `text_embedding`. + When specifying the `VectorPropertyName` option, the user should always provide `TextEmbedding` as the value. This is to ensure that where different connectors are used with the same data model, the user can always use the same property names, even though the storage name of the property may be different. @@ -299,7 +304,12 @@ To support this scenario, the connector must fulfil the following requirements: - Use the `VectorStoreRecordDefinition` to create collections / indexes. - Avoid doing reflection on the data model if a custom mapper and `VectorStoreRecordDefinition` is supplied -### 10. Support Vector Store Record Collection factory +### 10. Support Vector Store Record Collection factory (Deprecated) + +> [!IMPORTANT] +> Support for Vector Store Record Collection factories is now deprecated. The recommended pattern is to unseal +> the VectorStore class and make the `GetCollection` method virtual so that it can be overridden by developers +> who require custom construction of collections. The `IVectorStore.GetCollection` method can be used to create instances of `IVectorStoreRecordCollection`. Some connectors however may allow or require users to provide additional configuration options @@ -404,7 +414,7 @@ into small enough batches already so that parallel requests will succeed without E.g. here is an example where batching is simulated with requests happening in parallel. ```csharp -public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) +public Task DeleteBatchAsync(IEnumerable keys, CancellationToken cancellationToken = default) { if (keys == null) { diff --git a/semantic-kernel/concepts/vector-store-connectors/vector-search.md b/semantic-kernel/concepts/vector-store-connectors/vector-search.md index 27d71481..580f2a43 100644 --- a/semantic-kernel/concepts/vector-store-connectors/vector-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/vector-search.md @@ -19,7 +19,7 @@ Semantic Kernel provides vector search capabilities as part of its Vector Store ## Vector Search -The `VectorizedSearchAsync` method allows searching using data that has already been vectorized. This method takes a vector and an optional `VectorSearchOptions` class as input. +The `VectorizedSearchAsync` method allows searching using data that has already been vectorized. This method takes a vector and an optional `VectorSearchOptions` class as input. This method is available on the following interfaces: 1. `IVectorizedSearch` @@ -70,20 +70,17 @@ See [the documentation for each connector](./out-of-the-box-connectors/index.md) It is also important for the search vector type to match the target vector that is being searched, e.g. if you have two vectors on the same record with different vector types, make sure that the search vector you supply matches the type of the specific vector you are targeting. -See [VectorPropertyName](#vectorpropertyname) for how to pick a target vector if you have more than one per record. +See [VectorProperty](#vectorproperty) for how to pick a target vector if you have more than one per record. ## Vector Search Options -The following options can be provided using the `VectorSearchOptions` class. - -### VectorPropertyName +The following options can be provided using the `VectorSearchOptions` class. -The `VectorPropertyName` option can be used to specify the name of the vector property to target during the search. -If none is provided, the first vector found on the data model or specified in the record definition will be used. +### VectorProperty -Note that when specifying the `VectorPropertyName`, use the name of the property as defined on the data model or in the record definition. -Use this property name even if the property may be stored under a different name in the vector store. The storage name may e.g. be different -because of custom serialization settings. +The `VectorProperty` option can be used to specify the vector property to target during the search. +If none is provided and the data model contains only one vector, that vector will be used. +If the data model contains no vector or multiple vectors and `VectorProperty` is not provided, the search method will throw. ```csharp using Microsoft.Extensions.VectorData; @@ -93,13 +90,13 @@ var vectorStore = new InMemoryVectorStore(); var collection = vectorStore.GetCollection("skproducts"); // Create the vector search options and indicate that we want to search the FeatureListEmbedding property. -var vectorSearchOptions = new VectorSearchOptions +var vectorSearchOptions = new VectorSearchOptions { - VectorPropertyName = nameof(Product.FeatureListEmbedding) + VectorProperty = r -> r.FeatureListEmbedding }; // This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. -var searchResult = await collection.VectorizedSearchAsync(searchVector, vectorSearchOptions).Results.ToListAsync(); +var searchResult = await collection.VectorizedSearchAsync(searchVector, vectorSearchOptions); public sealed class Product { @@ -128,7 +125,7 @@ Top and Skip can be used to do paging if you wish to retrieve a large number of ```csharp // Create the vector search options and indicate that we want to skip the first 40 results and then get the next 20. -var vectorSearchOptions = new VectorSearchOptions +var vectorSearchOptions = new VectorSearchOptions { Top = 20, Skip = 40 @@ -157,7 +154,7 @@ The default value for `IncludeVectors` is `false`. ```csharp // Create the vector search options and indicate that we want to include vectors in the search results. -var vectorSearchOptions = new VectorSearchOptions +var vectorSearchOptions = new VectorSearchOptions { IncludeVectors = true }; @@ -172,9 +169,9 @@ await foreach (var result in searchResult.Results) } ``` -### VectorSearchFilter +### Filter and OldFilter -The `VectorSearchFilter` option can be used to provide a filter for filtering the records in the chosen collection +The vector search filter option can be used to provide a filter for filtering the records in the chosen collection before applying the vector search. This has multiple benefits: @@ -191,22 +188,16 @@ set the `IsFilterable` property to true when defining your data model or when cr > [!TIP] > For more information on how to set the `IsFilterable` property, refer to [VectorStoreRecordDataAttribute parameters](./defining-your-data-model.md#vectorstorerecorddataattribute-parameters) or [VectorStoreRecordDataProperty configuration settings](./schema-with-record-definition.md#vectorstorerecorddataproperty-configuration-settings). -To create a filter use the `VectorSearchFilter` class. You can combine multiple filter clauses together in one `VectorSearchFilter`. -All filter clauses are combined with `and`. -Note that when providing a property name when constructing the filter, use the name of the property as defined on the data model or in the record definition. -Use this property name even if the property may be stored under a different name in the vector store. The storage name may e.g. be different -because of custom serialization settings. +Filters are expressed using LINQ expressions based on the type of the data model. +The set of LINQ expressions supported will vary depending on the functionality supported +by each database, but all databases support a broad base of common expressions, e.g. equals, +not equals, and, or, etc. ```csharp -// Filter where Category == 'External Definitions' and Tags contain 'memory'. -var filter = new VectorSearchFilter() - .EqualTo(nameof(Glossary.Category), "External Definitions") - .AnyTagEqualTo(nameof(Glossary.Tags), "memory"); - // Create the vector search options and set the filter on the options. -var vectorSearchOptions = new VectorSearchOptions +var vectorSearchOptions = new VectorSearchOptions { - Filter = filter + Filter = r => r.Category == "External Definitions" && r.Tags.Contains("memory") }; // This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. @@ -242,15 +233,6 @@ sealed class Glossary } ``` -#### EqualTo filter clause - -Use `EqualTo` for a direct comparison between property and value. - -#### AnyTagEqualTo filter clause - -Use `AnyTagEqualTo` to check if any of the strings, stored in a tag property in the vector store, contains a provided value. -For a property to be considered a tag property, it needs to be a List, array or other enumerable of string. - ::: zone-end ::: zone pivot="programming-language-python" diff --git a/semantic-kernel/support/migration/vectordata-march-2025.md b/semantic-kernel/support/migration/vectordata-march-2025.md new file mode 100644 index 00000000..1689e9b3 --- /dev/null +++ b/semantic-kernel/support/migration/vectordata-march-2025.md @@ -0,0 +1,149 @@ +--- +title: VectorData changes - March 2025 +description: Describes the changes included in the March 2025 VectorData release and how to migrate +zone_pivot_groups: programming-languages +author: westey-m +ms.topic: conceptual +ms.author: westey +ms.service: semantic-kernel +--- +::: zone pivot="programming-language-csharp" + +# VectorData changes - March 2025 + +## Linq based filtering + +When doing vector searches it is possible to create a filter (in addition to the vector similarity) +that act on data properties to constrain the list of records matched. + +This filter is changing to support more filtering options. Previously the filter would +have been expressed using a custom `VectorSearchFilter` type, but with this update the filter +would be expressed using LINQ expressions. + +The old filter clause is still preserved in a property called OldFilter, and will be removed in future. + +```csharp +// Before +var searchResult = await collection.VectorizedSearchAsync( + searchVector, + new() { Filter = new VectorSearchFilter().EqualTo(nameof(Glossary.Category), "External Definitions") }); + +// After +var searchResult = await collection.VectorizedSearchAsync( + searchVector, + new() { Filter = g => g.Category == "External Definitions" }); + +// The old filter option is still available +var searchResult = await collection.VectorizedSearchAsync( + searchVector, + new() { OldFilter = new VectorSearchFilter().EqualTo(nameof(Glossary.Category), "External Definitions") }); +``` + +## Target Property Selection for Search + +When doing a vector search, it is possible to choose the vector property that the search should +be executed against. +Previously this was done via an option on the `VectorSearchOptions` class called `VectorPropertyName`. +`VectorPropertyName` was a string that could contain the name of the target property. + +`VectorPropertyName` is being obsoleted in favour of a new property called `VectorProperty`. +`VectorProperty` is an expression that references the required property directly. + +```csharp +// Before +var options = new VectorSearchOptions() { VectorPropertyName = "DescriptionEmbedding" }; + +// After +var options = new VectorSearchOptions() { VectorProperty = r => r.DescriptionEmbedding }; +``` + +Specifying `VectorProperty` will remain optional just like `VectorPropertyName` was optional. +The behavior when not specifying the property name is changing. +Previously if not specifying a target property, and more than one vector property existed on the +data model, the search would target the first available vector property in the schema. + +Since the property which is 'first' can change in many circumstances unrelated to the search code, using this +strategy is risky. We are therefore changing this behavior, so that if there are more than +one vector property, one must be chosen. + +## `VectorSearchOptions` change to generic type + +The `VectorSearchOptions` class is changing to `VectorSearchOptions`, to accomodate the +LINQ based filtering and new property selectors metioned above. + +If you are currently constructing the options class without providing the name of the options class +there will be no change. E.g. `VectorizedSearchAsync(embedding, new() { Top = 5 })`. + +On the other hand if you are specifying the options type, you will need to add the record type as a +generic parameter. + +```csharp +// Before +var options = new VectorSearchOptions() { Top = 5 }; + +// After +var options = new VectorSearchOptions() { Top = 5 }; +``` + +## Removal of collection factories in favour of inheritance/decorator pattern + +Each VectorStore implementation allows you to pass a custom factory to use for +constructing collections. This pattern is being removed and the recommended approach +is now to inherit from the VectorStore where you want custom construction and override +the GetCollection method. + +```csharp +// Before +var vectorStore = new QdrantVectorStore( + new QdrantClient("localhost"), + new() + { + VectorStoreCollectionFactory = new CustomQdrantCollectionFactory(productDefinition) + }); + +// After +public class QdrantCustomCollectionVectorStore(QdrantClient qdrantClient) : QdrantVectorStore(qdrantClient) +{ + public override IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + { + // custom construction logic... + } +} + +var vectorStore = new QdrantCustomCollectionVectorStore(new QdrantClient("localhost")); +``` + +## Removal of DeleteRecordOptions and UpsertRecordOptions + +The `DeleteRecordOptions` and `UpsertRecordOptions` parameters have been removed from the +`DeleteAsync`, `DeleteBatchAsync`, `UpsertAsync` and `UpsertBatchAsync` methods on the +`IVectorStoreRecordCollection` interface. + +These parameters were all optional and the options classes did not contain any options to set. + +If you were passing these options in the past, you will need to remove these with this update. + +```csharp + +// Before +collection.DeleteAsync("mykey", new DeleteRecordOptions(), cancellationToken ); + +// After +collection.DeleteAsync("mykey", cancellationToken ); +``` + +::: zone-end +::: zone pivot="programming-language-python" + +## Not Applicable + +These changes are currently only applicable in C# + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +These changes are currently only applicable in C# + +::: zone-end From b55c4518baafcdff551bba7200ec9babf7d1d9ef Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Thu, 6 Mar 2025 18:24:44 +0000 Subject: [PATCH 059/117] Adding Hybrid search page --- .../concepts/vector-store-connectors/TOC.yml | 2 + .../vector-store-connectors/hybrid-search.md | 270 ++++++++++++++++++ .../vector-store-connectors/vector-search.md | 4 +- .../migration/vectordata-march-2025.md | 1 + 4 files changed, 275 insertions(+), 2 deletions(-) create mode 100644 semantic-kernel/concepts/vector-store-connectors/hybrid-search.md diff --git a/semantic-kernel/concepts/vector-store-connectors/TOC.yml b/semantic-kernel/concepts/vector-store-connectors/TOC.yml index b7cfa81b..85f04702 100644 --- a/semantic-kernel/concepts/vector-store-connectors/TOC.yml +++ b/semantic-kernel/concepts/vector-store-connectors/TOC.yml @@ -12,6 +12,8 @@ href: embedding-generation.md - name: Vector Search href: vector-search.md +- name: Hybrid Search + href: hybrid-search.md - name: Serialization of data models href: serialization.md - name: Legacy Memory Stores diff --git a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md new file mode 100644 index 00000000..52f25fc9 --- /dev/null +++ b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md @@ -0,0 +1,270 @@ +--- +title: Hybrid search using Semantic Kernel Vector Store connectors (Preview) +description: Describes the different options you can use when doing a hybrid search using Semantic Kernel vector store connectors. +zone_pivot_groups: programming-languages +author: westey-m +ms.topic: conceptual +ms.author: westey +ms.date: 03/06/2025 +ms.service: semantic-kernel +--- +# Hybrid search using Semantic Kernel Vector Store connectors (Preview) + +> [!WARNING] +> The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. + +::: zone pivot="programming-language-csharp" + +Semantic Kernel provides hybrid search capabilities as part of its Vector Store abstractions. This supports filtering and many other options, which this article will explain in more detail. + +Currently the type of hybrid search supported is based on a vector search, plus a keyword search, both of which are executed in parallel, after which a union of the two result sets +are returned. Sparse vector based hybrid search is not currently supported. + +To execute a hybrid search, your database schema needs to have a vector field and a string field with full text search capabilities enabled. +If you are creating a collection using the semantic kernel vector storage connectors, make sure to enable the `IsFullTextSearchable` option +on the string field that you want to target for the keywords search. + +> [!TIP] +> For more information on how to enable `IsFullTextSearchable` see [VectorStoreRecordDataAttribute parameters](./defining-your-data-model.md#vectorstorerecorddataattribute-parameters) + +## Hybrid Search + +The `HybridSearchAsync` method allows searching using a vector and an `ICollection` of string keywords. It also takes an optional `HybridSearchOptions` class as input. +This method is available on the following interface: + +1. `IKeywordHybridSearch` + +Only connectors for databases that currently support vector plus keyword hybrid search are implementing this interface. + +Assuming you have a collection that already contains data, you can easily do a hybrid search on it. Here is an example using Qdrant. + +```csharp +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.Extensions.VectorData; +using Qdrant.Client; + +// Placeholder embedding generation method. +async Task> GenerateEmbeddingAsync(string textToVectorize) +{ + // your logic here +} + +// Create a Qdrant VectorStore object and choose an existing collection that already contains records. +IVectorStore vectorStore = new QdrantVectorStore(new QdrantClient("localhost")); +IKeywordHybridSearch collection = (IKeywordHybridSearch)vectorStore.GetCollection("skhotels"); + +// Generate a vector for your search text, using your chosen embedding generation implementation. +ReadOnlyMemory searchVector = await GenerateEmbeddingAsync("I'm looking for a hotel where customer happiness is the priority."); + +// Do the search, passing an options object with a Top value to limit resulst to the single top match. +var searchResult = await collection.HybridSearchAsync(searchVector, ["happiness", "hotel", "customer"], new() { Top = 1 }); + +// Inspect the returned hotel. +await foreach (var record in searchResult.Results) +{ + Console.WriteLine("Found hotel description: " + record.Record.Description); + Console.WriteLine("Found record score: " + record.Score); +} +``` + +> [!TIP] +> For more information on how to generate embeddings see [embedding generation](./embedding-generation.md). + +## Supported Vector Types + +`HybridSearchAsync` takes a generic type as the vector parameter. +The types of vectors supported by each data store vary. +See [the documentation for each connector](./out-of-the-box-connectors/index.md) for the list of supported vector types. + +It is also important for the search vector type to match the target vector that is being searched, e.g. if you have two vectors +on the same record with different vector types, make sure that the search vector you supply matches the type of the specific vector +you are targeting. +See [VectorProperty and AdditionalProperty](#vectorproperty-and-additionalproperty) for how to pick a target vector if you have more than one per record. + +## Hybrid Search Options + +The following options can be provided using the `VectorSearchOptions` class. + +### VectorProperty and AdditionalProperty + +The `VectorProperty` and `AdditionalProperty` options can be used to specify the vector property and full text search property to target during the search. + +If no `VectorProperty` is provided and the data model contains only one vector, that vector will be used. +If the data model contains no vector or multiple vectors and `VectorProperty` is not provided, the search method will throw. + +If no `AdditionalProperty` is provided and the data model contains only one full text search property, that property will be used. +If the data model contains no full text search property or multiple full text search properties and `AdditionalProperty` is not provided, the search method will throw. + +```csharp +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.Extensions.VectorData; +using Qdrant.Client; + +var vectorStore = new QdrantVectorStore(new QdrantClient("localhost")); +var collection = (IKeywordHybridSearch)vectorStore.GetCollection("skproducts"); + +// Create the vector search options and indicate that we want +// to search the DescriptionEmbedding vector property and the +// Description full text search property. +var hybridSearchOptions = new HybridSearchOptions +{ + VectorProperty = r => r.DescriptionEmbedding, + AdditionalProperty = r => r.Description +}; + +// This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. +var searchResult = await collection.HybridSearchAsync(searchVector, ["happiness", "hotel", "customer"], hybridSearchOptions); + +public sealed class Product +{ + [VectorStoreRecordKey] + public int Key { get; set; } + + [VectorStoreRecordData(IsFullTextSearchable = true)] + public string Name { get; set; } + + [VectorStoreRecordData(IsFullTextSearchable = true)] + public string Description { get; set; } + + [VectorStoreRecordData] + public List FeatureList { get; set; } + + [VectorStoreRecordVector(1536)] + public ReadOnlyMemory DescriptionEmbedding { get; set; } + + [VectorStoreRecordVector(1536)] + public ReadOnlyMemory FeatureListEmbedding { get; set; } +} +``` + +### Top and Skip + +The `Top` and `Skip` options allow you to limit the number of results to the Top n results and +to skip a number of results from the top of the resultset. +Top and Skip can be used to do paging if you wish to retrieve a large number of results using separate calls. + +```csharp +// Create the vector search options and indicate that we want to skip the first 40 results and then get the next 20. +var hybridSearchOptions = new HybridSearchOptions +{ + Top = 20, + Skip = 40 +}; + +// This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. +var searchResult = await collection.HybridSearchAsync(searchVector, ["happiness", "hotel", "customer"], hybridSearchOptions); + +// Iterate over the search results. +await foreach (var result in searchResult.Results) +{ + Console.WriteLine(result.Record.Description); +} +``` + +The default values for `Top` is 3 and `Skip` is 0. + +### IncludeVectors + +The `IncludeVectors` option allows you to specify whether you wish to return vectors in the search results. +If `false`, the vector properties on the returned model will be left null. +Using `false` can significantly reduce the amount of data retrieved from the vector store during search, +making searches more efficient. + +The default value for `IncludeVectors` is `false`. + +```csharp +// Create the hybrid search options and indicate that we want to include vectors in the search results. +var hybridSearchOptions = new HybridSearchOptions +{ + IncludeVectors = true +}; + +// This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. +var searchResult = await collection.HybridSearchAsync(searchVector, ["happiness", "hotel", "customer"], hybridSearchOptions); + +// Iterate over the search results. +await foreach (var result in searchResult.Results) +{ + Console.WriteLine(result.Record.FeatureList); +} +``` + +### Filter and OldFilter + +The vector search filter option can be used to provide a filter for filtering the records in the chosen collection +before applying the vector search. + +This has multiple benefits: + +- Reduce latency and processing cost, since only records remaining after filtering need to be compared with the search vector and therefore fewer vector comparisons have to be done. +- Limit the resultset for e.g. access control purposes, by excluding data that the user shouldn't have access to. + +Note that in order for fields to be used for filtering, many vector stores require those fields to be indexed first. +Some vector stores will allow filtering using any field, but may optionally allow indexing to improve filtering performance. + +If creating a collection via the Semantic Kernel vector store abstractions and you wish to enable filtering on a field, +set the `IsFilterable` property to true when defining your data model or when creating your record definition. + +> [!TIP] +> For more information on how to set the `IsFilterable` property, refer to [VectorStoreRecordDataAttribute parameters](./defining-your-data-model.md#vectorstorerecorddataattribute-parameters) or [VectorStoreRecordDataProperty configuration settings](./schema-with-record-definition.md#vectorstorerecorddataproperty-configuration-settings). + +Filters are expressed using LINQ expressions based on the type of the data model. +The set of LINQ expressions supported will vary depending on the functionality supported +by each database, but all databases support a broad base of common expressions, e.g. equals, +not equals, and, or, etc. + +```csharp +// Create the vector search options and set the filter on the options. +var hybridSearchOptions = new HybridSearchOptions +{ + Filter = r => r.Category == "External Definitions" && r.Tags.Contains("memory") +}; + +// This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. +var searchResult = await collection.HybridSearchAsync(searchVector, ["happiness", "hotel", "customer"], hybridSearchOptions); + +// Iterate over the search results. +await foreach (var result in searchResult.Results) +{ + Console.WriteLine(result.Record.Definition); +} + +sealed class Glossary +{ + [VectorStoreRecordKey] + public ulong Key { get; set; } + + // Category is marked as filterable, since we want to filter using this property. + [VectorStoreRecordData(IsFilterable = true)] + public string Category { get; set; } + + // Tags is marked as filterable, since we want to filter using this property. + [VectorStoreRecordData(IsFilterable = true)] + public List Tags { get; set; } + + [VectorStoreRecordData] + public string Term { get; set; } + + [VectorStoreRecordData(IsFullTextSearchable = true)] + public string Definition { get; set; } + + [VectorStoreRecordVector(1536)] + public ReadOnlyMemory DefinitionEmbedding { get; set; } +} +``` + +::: zone-end +::: zone pivot="programming-language-python" + +## Coming soon + +More information coming soon. + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +More information coming soon. + +::: zone-end diff --git a/semantic-kernel/concepts/vector-store-connectors/vector-search.md b/semantic-kernel/concepts/vector-store-connectors/vector-search.md index 580f2a43..fd661aaf 100644 --- a/semantic-kernel/concepts/vector-store-connectors/vector-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/vector-search.md @@ -64,7 +64,7 @@ await foreach (var record in searchResult.Results) ## Supported Vector Types `VectorizedSearchAsync` takes a generic type as the vector parameter. -The types of vectors supported y each data store vary. +The types of vectors supported by each data store vary. See [the documentation for each connector](./out-of-the-box-connectors/index.md) for the list of supported vector types. It is also important for the search vector type to match the target vector that is being searched, e.g. if you have two vectors @@ -92,7 +92,7 @@ var collection = vectorStore.GetCollection("skproducts"); // Create the vector search options and indicate that we want to search the FeatureListEmbedding property. var vectorSearchOptions = new VectorSearchOptions { - VectorProperty = r -> r.FeatureListEmbedding + VectorProperty = r => r.FeatureListEmbedding }; // This snippet assumes searchVector is already provided, having been created using the embedding model of your choice. diff --git a/semantic-kernel/support/migration/vectordata-march-2025.md b/semantic-kernel/support/migration/vectordata-march-2025.md index 1689e9b3..31eed51a 100644 --- a/semantic-kernel/support/migration/vectordata-march-2025.md +++ b/semantic-kernel/support/migration/vectordata-march-2025.md @@ -5,6 +5,7 @@ zone_pivot_groups: programming-languages author: westey-m ms.topic: conceptual ms.author: westey +ms.date: 03/06/2025 ms.service: semantic-kernel --- ::: zone pivot="programming-language-csharp" From 1f520a47e2eb5ee0c7631978c2c0c476b9bc48e6 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Thu, 6 Mar 2025 19:26:07 +0000 Subject: [PATCH 060/117] Make small improvements to march25 updates --- .../how-to/build-your-own-connector.md | 2 +- .../vector-store-connectors/hybrid-search.md | 12 ++++++------ .../vector-store-connectors/vector-search.md | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md index 368d2c91..f23a4668 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md @@ -422,7 +422,7 @@ public Task DeleteBatchAsync(IEnumerable keys, CancellationToken cancell } // Remove records in parallel. - var tasks = keys.Select(key => this.DeleteAsync(key, options, cancellationToken)); + var tasks = keys.Select(key => this.DeleteAsync(key, cancellationToken)); return Task.WhenAll(tasks); } ``` diff --git a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md index 52f25fc9..a11df0af 100644 --- a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md @@ -22,10 +22,10 @@ are returned. Sparse vector based hybrid search is not currently supported. To execute a hybrid search, your database schema needs to have a vector field and a string field with full text search capabilities enabled. If you are creating a collection using the semantic kernel vector storage connectors, make sure to enable the `IsFullTextSearchable` option -on the string field that you want to target for the keywords search. +on the string field that you want to target for the keyword search. > [!TIP] -> For more information on how to enable `IsFullTextSearchable` see [VectorStoreRecordDataAttribute parameters](./defining-your-data-model.md#vectorstorerecorddataattribute-parameters) +> For more information on how to enable `IsFullTextSearchable` refer to [VectorStoreRecordDataAttribute parameters](./defining-your-data-model.md#vectorstorerecorddataattribute-parameters) or [VectorStoreRecordDataProperty configuration settings](./schema-with-record-definition.md#vectorstorerecorddataproperty-configuration-settings) ## Hybrid Search @@ -83,7 +83,7 @@ See [VectorProperty and AdditionalProperty](#vectorproperty-and-additionalproper ## Hybrid Search Options -The following options can be provided using the `VectorSearchOptions` class. +The following options can be provided using the `HybridSearchOptions` class. ### VectorProperty and AdditionalProperty @@ -103,7 +103,7 @@ using Qdrant.Client; var vectorStore = new QdrantVectorStore(new QdrantClient("localhost")); var collection = (IKeywordHybridSearch)vectorStore.GetCollection("skproducts"); -// Create the vector search options and indicate that we want +// Create the hybrid search options and indicate that we want // to search the DescriptionEmbedding vector property and the // Description full text search property. var hybridSearchOptions = new HybridSearchOptions @@ -189,7 +189,7 @@ await foreach (var result in searchResult.Results) } ``` -### Filter and OldFilter +### Filter The vector search filter option can be used to provide a filter for filtering the records in the chosen collection before applying the vector search. @@ -214,7 +214,7 @@ by each database, but all databases support a broad base of common expressions, not equals, and, or, etc. ```csharp -// Create the vector search options and set the filter on the options. +// Create the hybrid search options and set the filter on the options. var hybridSearchOptions = new HybridSearchOptions { Filter = r => r.Category == "External Definitions" && r.Tags.Contains("memory") diff --git a/semantic-kernel/concepts/vector-store-connectors/vector-search.md b/semantic-kernel/concepts/vector-store-connectors/vector-search.md index fd661aaf..8d6855ac 100644 --- a/semantic-kernel/concepts/vector-store-connectors/vector-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/vector-search.md @@ -169,7 +169,7 @@ await foreach (var result in searchResult.Results) } ``` -### Filter and OldFilter +### Filter The vector search filter option can be used to provide a filter for filtering the records in the chosen collection before applying the vector search. From eb1019997ad9411fc7751b9c520174c4f475822a Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 6 Mar 2025 14:17:53 -0800 Subject: [PATCH 061/117] Improve Python plugin docs part 1 --- .../concepts/plugins/adding-native-plugins.md | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/semantic-kernel/concepts/plugins/adding-native-plugins.md b/semantic-kernel/concepts/plugins/adding-native-plugins.md index 68d3dcba..58b17f75 100644 --- a/semantic-kernel/concepts/plugins/adding-native-plugins.md +++ b/semantic-kernel/concepts/plugins/adding-native-plugins.md @@ -110,10 +110,10 @@ class LightsPlugin: ::: zone-end +::: zone pivot="programming-language-csharp" > [!TIP] > Because the LLMs are predominantly trained on Python code, it is recommended to use snake_case for function names and parameters (even if you're using C# or Java). This will help the AI agent better understand the function and its parameters. -::: zone pivot="programming-language-csharp" > [!TIP] > Your functions can specify `Kernel`, `KernelArguments`, `ILoggerFactory`, `ILogger`, `IAIServiceSelector`, `CultureInfo`, `IFormatProvider`, `CancellationToken` as parameters and these will not be advertised to the LLM and will be automatically set when the function is called. > If you rely on `KernelArguments` instead of explicit input arguments then your code will be responsible for performing type conversions. @@ -156,14 +156,20 @@ public enum Brightness ::: zone pivot="programming-language-python" ```python +from enum import Enum from typing import TypedDict +class Brightness(Enum): + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + class LightModel(TypedDict): id: int name: str is_on: bool | None - brightness: int | None - hex: str | None + brightness: Brightness | None + color: Annotated[str | None, "The color of the light with a hex code (ensure you include the # symbol)"] ``` ::: zone-end @@ -176,8 +182,24 @@ class LightModel(TypedDict): > [!NOTE] > While this is a "fun" example, it does a good job showing just how complex a plugin's parameters can be. In this single case, we have a complex object with _four_ different types of properties: an integer, string, boolean, and enum. Semantic Kernel's value is that it can automatically generate the schema for this object and pass it to the AI agent and marshal the parameters generated by the AI agent into the correct object. +::: zone pivot="programming-language-csharp" + +Once you're done authoring your plugin class, you can add it to the kernel using the `AddFromType<>` or `AddFromObject` methods. + +::: zone-end + +::: zone pivot="programming-language-python" + +Once you're done authoring your plugin class, you can add it to the kernel using the `add_plugin` method. + +::: zone-end + +::: zone pivot="programming-language-java" + Once you're done authoring your plugin class, you can add it to the kernel using the `AddFromType<>` or `AddFromObject` methods. +::: zone-end + > [!TIP] > When creating a function, always ask yourself "how can I give the AI additional help to use this function?" This can include using specific input types (avoid strings where possible), providing descriptions, and examples. From 79e8f9c328d8f6788b48e9cbb2c0d4410f79657a Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Thu, 6 Mar 2025 15:09:15 -0800 Subject: [PATCH 062/117] Updated AzureAIAgent and consistency updates --- .../Frameworks/agent/assistant-agent.md | 36 ++ .../Frameworks/agent/azure-ai-agent.md | 319 +++++++++++++----- .../Frameworks/agent/chat-completion-agent.md | 31 ++ 3 files changed, 308 insertions(+), 78 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index 44fa6e9f..31a6b0ea 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -43,6 +43,42 @@ The _OpenAI Assistant API_ is a specialized interface designed for more advanced - [Assistant API in Azure](/azure/ai-services/openai/assistants-quickstart) +## Preparing Your Development Environment + +To proceed with developing an `OpenAIAIAssistantAgent`, configure your development environment with the appropriate packages. + +::: zone pivot="programming-language-csharp" + +Add the `Microsoft.SemanticKernel.Agents.OpenAI` package to your project: + +```pwsh +dotnet add package Microsoft.SemanticKernel.Agents.AzureAI --prerelease +``` + +You may also want to include the `Azure.Identity` package: + +```pwsh +dotnet add package Azure.Identity +``` +::: zone-end + +::: zone pivot="programming-language-python" + +Install the `semantic-kernel` package with the optional _Azure_ dependencies: + +```bash +pip install semantic-kernel[azure] +``` + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + + ## Creating an `OpenAIAssistantAgent` Creating an `OpenAIAssistant` requires invoking a remote service, which is handled asynchronously. To manage this, the `OpenAIAssistantAgent` is instantiated through a static factory method, ensuring the process occurs in a non-blocking manner. This method abstracts the complexity of the asynchronous call, returning a promise or future once the assistant is fully initialized and ready for use. diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 1eb27431..e15aaa6d 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -17,7 +17,7 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-csharp" -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +- [`OpenAIAssistantAgent`](dotnet/api/microsoft.semantickernel.agents.azureai) ::: zone-end @@ -37,43 +37,40 @@ Detailed API documentation related to this discussion is available at: An `AzureAIAgent` is a specialized agent within the Semantic Kernel framework, designed to provide advanced conversational capabilities with seamless tool integration. It automates tool calling, eliminating the need for manual parsing and invocation. The agent also securely manages conversation history using threads, reducing the overhead of maintaining state. Additionally, the `AzureAIAgent` supports a variety of built-in tools, including file retrieval, code execution, and data interaction via Bing, Azure AI Search, Azure Functions, and OpenAPI. -::: zone pivot="programming-language-csharp" +To use an `AzureAIAgent`, an _Azure AI Foundry Project_ must be utilized. The following articles provide an overview of the _Azure AI Foundry_, how to create and configure a project, and the agent service: -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +- [What is Azure AI Foundry?](azure/ai-foundry/what-is-ai-foundry) +- [The Azure AI Foundry SDK](azure/ai-foundry/how-to/develop/sdk-overview) +- [What is Azure AI Agent Service](azure/ai-services/agents/overview) +- [Quickstart: Create a new agent](azure/ai-services/agents/quickstart) -::: zone-end +## Preparing Your Development Environment -::: zone pivot="programming-language-python" +To proceed with developing an `AzureAIAgent`, configure your development environment with the appropriate packages. -To set up the required resources, follow the "Quickstart: Create a new agent" guide [here](/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure). +::: zone pivot="programming-language-csharp" -You will need to install the optional Semantic Kernel azure dependencies if you haven't already via: +Add the `Microsoft.SemanticKernel.Agents.AzureAI` package to your project: -```bash -pip install semantic-kernel[azure] +```pwsh +dotnet add package Microsoft.SemanticKernel.Agents.AzureAI --prerelease ``` -Before running an `AzureAIAgent`, modify your .env file to include: +You may also want to include the `Azure.Identity` package: -```bash -AZURE_AI_AGENT_PROJECT_CONNECTION_STRING = "" -AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" +```pwsh +dotnet add package Azure.Identity ``` -or - -```bash -AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" -AZURE_AI_AGENT_ENDPOINT = "" -AZURE_AI_AGENT_SUBSCRIPTION_ID = "" -AZURE_AI_AGENT_RESOURCE_GROUP_NAME = "" -AZURE_AI_AGENT_PROJECT_NAME = "" -``` +::: zone-end -The project connection string is of the following format: `;;;`. See here for information on obtaining the values to populate the connection string. +::: zone pivot="programming-language-python" -The `.env` should be placed in the root directory. +Install the `semantic-kernel` package with the optional _Azure_ dependencies: +```bash +pip install semantic-kernel[azure] +``` ::: zone-end ::: zone pivot="programming-language-java" @@ -82,20 +79,46 @@ The `.env` should be placed in the root directory. ::: zone-end -## Configuring the AI Project Client -Ensure that your `AzureAIAgent` resources are configured with at least a Basic or Standard SKU (the Standard SKU is required to do more advanced operations like AI Search). +## Configuring the AI Project Client -To begin, create the project client as follows: +Accessing an `AzureAIAgent` first requires the creation of a project client that is configured for a specific _Foundry Project_, most commonly by providing a connection string ([The Azure AI Foundry SDK: Getting Started with Projects](azure/ai-foundry/how-to/develop/sdk-overview#get-started-with-projects)). ::: zone pivot="programming-language-csharp" -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +```c# +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +``` + +The `AgentsClient` may be accessed from the `AIProjectClient`: + +```c# +AgentsClient agentsClient = client.GetAgentsClient(); +``` ::: zone-end ::: zone pivot="programming-language-python" +Modify your the `.env` file in the root directory to include: + +```bash +AZURE_AI_AGENT_PROJECT_CONNECTION_STRING = "" +AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" +``` + +or + +```bash +AZURE_AI_AGENT_ENDPOINT = "" +AZURE_AI_AGENT_SUBSCRIPTION_ID = "" +AZURE_AI_AGENT_RESOURCE_GROUP_NAME = "" +AZURE_AI_AGENT_PROJECT_NAME = "" +AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" +``` + +Once the configuration is defined, the client may be created: + ```python async with ( DefaultAzureCredential() as creds, @@ -118,8 +141,20 @@ To create an `AzureAIAgent`, you start by configuring and initializing the agent ::: zone pivot="programming-language-csharp" -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +```c# +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +AgentsClient agentsClient = client.GetAgentsClient(); + +// 1. Define an agent on the Azure AI agent service +Agent definition = agentsClient.CreateAgentAsync( + "", + name: "", + description: "", + instructions: ""); +// 2. Create a Semantic Kernel agent based on the agent definition +AzureAIAgent agent = new(definition, agentsClient); +``` ::: zone-end ::: zone pivot="programming-language-python" @@ -134,14 +169,14 @@ async with ( DefaultAzureCredential() as creds, AzureAIAgent.create_client(credential=creds) as client, ): - # 1. Create an agent on the Azure AI agent service + # 1. Define an agent on the Azure AI agent service agent_definition = await client.agents.create_agent( model=ai_agent_settings.model_deployment_name, name="", instructions="", ) - # 2. Create a Semantic Kernel agent to use the Azure AI agent + # 2. Create a Semantic Kernel agent based on the agent definition agent = AzureAIAgent( client=client, definition=agent_definition, @@ -161,13 +196,26 @@ async with ( Interaction with the `AzureAIAgent` is straightforward. The agent maintains the conversation history automatically using a thread: ::: zone pivot="programming-language-csharp" - -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. - +```c# +AgentThread thread = await agentsClient.CreateThreadAsync(); +try +{ + ChatMessageContent message = new(AuthorRole.User, ""); + await agent.AddChatMessageAsync(threadId, message); + await foreach (ChatMessageContent response in agent.InvokeAsync(thread.Id)) + { + Console.WriteLine(response.Content); + } +} +finally +{ + await this.AgentsClient.DeleteThreadAsync(thread.Id); + await this.AgentsClient.DeleteAgentAsync(agent.Id); +} +``` ::: zone-end ::: zone pivot="programming-language-python" - ```python USER_INPUTS = ["Hello", "What's your name?"] @@ -182,22 +230,36 @@ finally: await client.agents.delete_thread(thread.id) ``` -Python also supports invoking an agent in a streaming and a non-streaming fashion: +Optionally, an agent may be invoked as: ```python -# Streaming for user_input in USER_INPUTS: await agent.add_chat_message(thread_id=thread.id, message=user_input) - async for content in agent.invoke_stream(thread_id=thread.id): - print(content.content, end="", flush=True) + async for content in agent.invoke(thread_id=thread.id): + print(content.content) ``` +::: zone-end + +An agent may also produce a _streamed_ response: + +::: zone pivot="programming-language-csharp" +```c# +ChatMessageContent message = new(AuthorRole.User, ""); +await agent.AddChatMessageAsync(threadId, message); +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) +{ + Console.Write(response.Content); +} +``` +::: zone-end + +::: zone pivot="programming-language-python" ```python -# Non-streaming for user_input in USER_INPUTS: await agent.add_chat_message(thread_id=thread.id, message=user_input) - async for content in agent.invoke(thread_id=thread.id): - print(content.content) + async for content in agent.invoke_stream(thread_id=thread.id): + print(content.content, end="", flush=True) ``` ::: zone-end @@ -212,13 +274,22 @@ for user_input in USER_INPUTS: Semantic Kernel supports extending an `AzureAIAgent` with custom plugins for enhanced functionality: ::: zone pivot="programming-language-csharp" - -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. - +```c# +Plugin plugin = KernelPluginFactory.CreateFromType(); +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +AgentsClient agentsClient = client.GetAgentsClient(); + +Agent definition = agentsClient.CreateAgentAsync( + "", + name: "", + description: "", + instructions: ""); + +AzureAIAgent agent = new(definition, agentsClient, plugins: [plugin]); +``` ::: zone-end ::: zone pivot="programming-language-python" - ```python from semantic_kernel.functions import kernel_function @@ -253,14 +324,39 @@ async with ( ## Advanced Features -An `AzureAIAgent` can leverage advanced tools such as code interpreters, file search, OpenAPI and Azure AI Search integration for dynamic and powerful interactions: +An `AzureAIAgent` can leverage advanced tools such as: -### Code Interpreter +- [Code Interpreter](#code-interpreter) +- [File Search](#file-search) +- [OpenAPI integration](#openapi-integration) +- [Azure AI Search integration](#azureai-search-integration) -::: zone pivot="programming-language-csharp" +### Code Interpreter -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +Code Interpreter allows the agents to write and run Python code in a sandboxed execution environment ([Azure AI Agent Service Code Interpreter](azure/ai-services/agents/how-to/tools/code-interpreter)). +::: zone pivot="programming-language-csharp" +```c# +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +AgentsClient agentsClient = client.GetAgentsClient(); + +Agent definition = agentsClient.CreateAgentAsync( + "", + name: "", + description: "", + instructions: "", + tools: [new CodeInterpreterToolDefinition()], + toolResources: + new() + { + CodeInterpreter = new() + { + FileIds = { ... }, + } + })); + +AzureAIAgent agent = new(definition, agentsClient); +``` ::: zone-end ::: zone pivot="programming-language-python" @@ -288,10 +384,31 @@ async with ( ### File Search -::: zone pivot="programming-language-csharp" +File search augments agents with knowledge from outside its model ([Azure AI Agent Service File Search Tool](azure/ai-services/agents/how-to/tools/file-search)). -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +::: zone pivot="programming-language-csharp" +```c# +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +AgentsClient agentsClient = client.GetAgentsClient(); + +Agent definition = agentsClient.CreateAgentAsync( + "", + name: "", + description: "", + instructions: "", + tools: [new FileSearchToolDefinition()], + toolResources: + new() + { + FileSearch = new() + { + VectorStoreIds = { ... }, + } + })); + +AzureAIAgent agent = new(definition, agentsClient); +``` ::: zone-end ::: zone pivot="programming-language-python" @@ -319,14 +436,34 @@ async with ( ### OpenAPI Integration -::: zone pivot="programming-language-csharp" - -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +Connects your agent to an external API ([How to use Azure AI Agent Service with OpenAPI Specified Tools](azure/ai-services/agents/how-to/tools/openapi-spec)). +::: zone pivot="programming-language-csharp" +```c# +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +AgentsClient agentsClient = client.GetAgentsClient(); + +string apiJsonSpecification = ...; // An Open API JSON specification + +Agent definition = agentsClient.CreateAgentAsync( + "", + name: "", + description: "", + instructions: "", + tools: [ + new OpenApiToolDefinition( + "", + "", + BinaryData.FromString(apiJsonSpecification), + new OpenApiAnonymousAuthDetails()) + ], +); + +AzureAIAgent agent = new(definition, agentsClient); +``` ::: zone-end ::: zone pivot="programming-language-python" - ```python from azure.ai.projects.models import OpenApiTool, OpenApiAnonymousAuthDetails @@ -368,16 +505,38 @@ async with ( ::: zone-end -### AzureAI Search - -::: zone pivot="programming-language-csharp" +### AzureAI Search Integration -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +Use an existing Azure AI Search index with with your agent ([Use an existing AI Search index](azure/ai-services/agents/how-to/tools/azure-ai-search)). +::: zone pivot="programming-language-csharp" +```c# +AIProjectClient client = AzureAIAgent.CreateAzureAIClient("", new AzureCliCredential()); +AgentsClient agentsClient = client.GetAgentsClient(); + +ConnectionsClient cxnClient = client.GetConnectionsClient(); +ListConnectionsResponse searchConnections = await cxnClient.GetConnectionsAsync(AzureAIP.ConnectionType.AzureAISearch); +ConnectionResponse searchConnection = searchConnections.Value[0]; + +Agent definition = agentsClient.CreateAgentAsync( + "", + name: "", + description: "", + instructions: "", + tools: [new AzureAIP.AzureAISearchToolDefinition()], + toolResources: new() + { + AzureAISearch = new() + { + IndexList = { new AzureAIP.IndexResource(searchConnection.Id, "") } + } + }); + +AzureAIAgent agent = new(definition, agentsClient); +``` ::: zone-end ::: zone pivot="programming-language-python" - ```python from azure.ai.projects.models import AzureAISearchTool, ConnectionType @@ -420,7 +579,10 @@ An existing agent can be retrieved and reused by specifying its assistant ID: ::: zone pivot="programming-language-csharp" -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +```c# +Agent definition = agentsClient.GetAgentAsync(""); +AzureAIAgent agent = new(definition, agentsClient); +``` ::: zone-end @@ -443,8 +605,10 @@ Agents and their associated threads can be deleted when no longer needed: ::: zone pivot="programming-language-csharp" -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. - +```c# +await agentsClient.DeleteThreadAsync(thread.Id); +await agentsClient.DeleteAgentAsync(agent.Id); +``` ::: zone-end ::: zone pivot="programming-language-python" @@ -453,24 +617,20 @@ await client.agents.delete_thread(thread.id) await client.agents.delete_agent(agent.id) ``` -If working with a vector store or files, they can be deleted as well: +If working with a vector store or files, they may be deleted as well: + +::: zone pivot="programming-language-csharp" +```c# +await agentsClient.DeleteVectorStoreAsync(""); +await agentsClient.DeleteFileAsync(""); +``` +::: zone-end +::: zone pivot="programming-language-python" ```python await client.agents.delete_file(file_id=file.id) await client.agents.delete_vector_store(vector_store_id=vector_store.id) ``` - -> [!TIP] -> To remove a file from a vector store, use: -> ```python -> await client.agents.delete_vector_store_file(vector_store_id=vector_store.id, file_id=file.id) -> ``` -> This operation detaches the file from the vector store but does not permanently delete it. -> To fully delete the file, call: -> ```python -> await client.agents.delete_file(file_id=file.id) -> ``` - ::: zone-end ::: zone pivot="programming-language-java" @@ -479,13 +639,16 @@ await client.agents.delete_vector_store(vector_store_id=vector_store.id) ::: zone-end +> More information on the _file search_ tool is described in the [Azure AI Agent Service file search tool](azure/ai-services/agents/how-to/tools/file-search) article. + ## How-To For practical examples of using an `AzureAIAgent`, see our code samples on GitHub: ::: zone pivot="programming-language-csharp" -> TODO(crickman) Azure AI Agents are currently unavailable in .NET. +- [Getting Started with Azure AI Agents](https://github.com/microsoft/semantic-kernel/tree/main/dotnet/samples/GettingStartedWithAgents/AzureAIAgent) +- [Advanced Azure AI Agent Code Samples](https://github.com/microsoft/semantic-kernel/tree/main/dotnet/samples/Concepts/Agents) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 1ed293ef..fe38eb9a 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -74,6 +74,37 @@ Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantic ::: zone-end +## Preparing Your Development Environment + +To proceed with developing an `AzureAIAgent`, configure your development environment with the appropriate packages. + +::: zone pivot="programming-language-csharp" + +Add the `Microsoft.SemanticKernel.Agents.Core` package to your project: + +```pwsh +dotnet add package Microsoft.SemanticKernel.Agents.Core --prerelease +``` + +::: zone-end + +::: zone pivot="programming-language-python" + +Install the `semantic-kernel` package: + +```bash +pip install semantic-kernel +``` + +::: zone-end + +::: zone pivot="programming-language-java" + +> Agents are currently unavailable in Java. + +::: zone-end + + ## Creating a `ChatCompletionAgent` A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. From 588d7a52eb0e0ea3f4dfd3e407b44df02a10b566 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 6 Mar 2025 15:32:23 -0800 Subject: [PATCH 063/117] Improve Python plugin docs part 2 --- .../concepts/plugins/adding-native-plugins.md | 55 ++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/plugins/adding-native-plugins.md b/semantic-kernel/concepts/plugins/adding-native-plugins.md index 58b17f75..790af385 100644 --- a/semantic-kernel/concepts/plugins/adding-native-plugins.md +++ b/semantic-kernel/concepts/plugins/adding-native-plugins.md @@ -18,6 +18,7 @@ Behind the scenes, Semantic Kernel will then use the descriptions you provide, a ## Providing the LLM with the right information When authoring a plugin, you need to provide the AI agent with the right information to understand the capabilities of the plugin and its functions. This includes: + - The name of the plugin - The names of the functions - The descriptions of the functions @@ -34,6 +35,7 @@ Below, we'll walk through the two different ways of providing your AI agent with The easiest way to create a native plugin is to start with a class and then add methods annotated with the `KernelFunction` attribute. It is also recommended to liberally use the `Description` annotation to provide the AI agent with the necessary information to understand the function. ::: zone pivot="programming-language-csharp" + ```csharp public class LightsPlugin { @@ -73,9 +75,11 @@ public class LightsPlugin } } ``` + ::: zone-end ::: zone pivot="programming-language-python" + ```python from typing import List, Optional, Annotated @@ -102,6 +106,7 @@ class LightsPlugin: return light return None ``` + ::: zone-end ::: zone pivot="programming-language-java" @@ -122,6 +127,7 @@ class LightsPlugin: If your function has a complex object as an input variable, Semantic Kernel will also generate a schema for that object and pass it to the AI agent. Similar to functions, you should provide `Description` annotations for properties that are non-obvious to the AI. Below is the definition for the `LightState` class and the `Brightness` enum. ::: zone pivot="programming-language-csharp" + ```csharp using System.Text.Json.Serialization; @@ -152,9 +158,11 @@ public enum Brightness High } ``` + ::: zone-end ::: zone pivot="programming-language-python" + ```python from enum import Enum from typing import TypedDict @@ -171,6 +179,7 @@ class LightModel(TypedDict): brightness: Brightness | None color: Annotated[str | None, "The color of the light with a hex code (ensure you include the # symbol)"] ``` + ::: zone-end ::: zone pivot="programming-language-java" @@ -204,6 +213,7 @@ Once you're done authoring your plugin class, you can add it to the kernel using > When creating a function, always ask yourself "how can I give the AI additional help to use this function?" This can include using specific input types (avoid strings where possible), providing descriptions, and examples. ::: zone pivot="programming-language-csharp" + #### Adding a plugin using the `AddFromObject` method The `AddFromObject` method allows you to add an instance of the plugin class directly to the plugin collection in case you want to directly control how the plugin is constructed. @@ -347,9 +357,11 @@ builder.Services.AddTransient((serviceProvider)=> { return new Kernel(serviceProvider, pluginCollection); }); ``` + ::: zone-end ::: zone pivot="programming-language-python" + #### Adding a plugin using the `add_plugin` method The `add_plugin` method allows you to add a plugin instance to the kernel. Below is an example of how you can construct the `LightsPlugin` class and add it to the kernel. @@ -371,8 +383,8 @@ lights_plugin = LightsPlugin(lights) # Add the plugin to the kernel kernel.add_plugin(lights_plugin) ``` -::: zone-end +::: zone-end ::: zone pivot="programming-language-java" @@ -519,7 +531,48 @@ This approach eliminates the need to manually provide and update the return type ::: zone-end +::: zone pivot="programming-language-python" + +### Providing more details about the functions + +When creating a plugin in Python, you can provide additional information about the functions in the `kernel_function` decorator. This information will be used by the AI agent to understand the functions better. + +```python + +from typing import List, Optional, Annotated + +class LightsPlugin: + def __init__(self, lights: List[LightModel]): + self._lights = lights + + @kernel_function(name="GetLights", description="Gets a list of lights and their current state") + async def get_lights(self) -> List[LightModel]: + """Gets a list of lights and their current state.""" + return self._lights + + @kernel_function(name="ChangeState", description="Changes the state of the light") + async def change_state( + self, + change_state: LightModel + ) -> Optional[LightModel]: + """Changes the state of the light.""" + for light in self._lights: + if light["id"] == change_state["id"]: + light["is_on"] = change_state.get("is_on", light["is_on"]) + light["brightness"] = change_state.get("brightness", light["brightness"]) + light["hex"] = change_state.get("hex", light["hex"]) + return light + return None +``` + +The sample above shows how to override the function name and provide a description for the function. By default, the function name is the name of the function and the description is empty. If the function name is descriptive enough, you won't need a description, which will save you tokens. However, if the function behavior is not obvious from the name, you should provide a description for the AI. + +Because the LLMs are predominantly trained on Python code, it is recommended to use function names that follow the [Python naming conventions](https://peps.python.org/pep-0008/#function-and-variable-names), which means you rarely need to override the function names if you follow the conventions in your Python code. + +::: zone-end + ## Next steps + Now that you know how to create a plugin, you can now learn how to use them with your AI agent. Depending on the type of functions you've added to your plugins, there are different patterns you should follow. For retrieval functions, refer to the [using retrieval functions](./using-data-retrieval-functions-for-rag.md) article. For task automation functions, refer to the [using task automation functions](./using-task-automation-functions.md) article. > [!div class="nextstepaction"] From 6dd37770fd269067c818778d99bd0bcb8c7636b3 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 6 Mar 2025 15:36:51 -0800 Subject: [PATCH 064/117] remove empty line --- semantic-kernel/concepts/plugins/adding-native-plugins.md | 1 - 1 file changed, 1 deletion(-) diff --git a/semantic-kernel/concepts/plugins/adding-native-plugins.md b/semantic-kernel/concepts/plugins/adding-native-plugins.md index 790af385..f2f8be4e 100644 --- a/semantic-kernel/concepts/plugins/adding-native-plugins.md +++ b/semantic-kernel/concepts/plugins/adding-native-plugins.md @@ -538,7 +538,6 @@ This approach eliminates the need to manually provide and update the return type When creating a plugin in Python, you can provide additional information about the functions in the `kernel_function` decorator. This information will be used by the AI agent to understand the functions better. ```python - from typing import List, Optional, Annotated class LightsPlugin: From c0014716eee426305cd238fafda6f46209884763 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Thu, 6 Mar 2025 15:39:08 -0800 Subject: [PATCH 065/117] Fix errors --- .../Frameworks/agent/azure-ai-agent.md | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index e15aaa6d..3be48add 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -39,10 +39,11 @@ An `AzureAIAgent` is a specialized agent within the Semantic Kernel framework, d To use an `AzureAIAgent`, an _Azure AI Foundry Project_ must be utilized. The following articles provide an overview of the _Azure AI Foundry_, how to create and configure a project, and the agent service: -- [What is Azure AI Foundry?](azure/ai-foundry/what-is-ai-foundry) -- [The Azure AI Foundry SDK](azure/ai-foundry/how-to/develop/sdk-overview) -- [What is Azure AI Agent Service](azure/ai-services/agents/overview) -- [Quickstart: Create a new agent](azure/ai-services/agents/quickstart) +- [What is Azure AI Foundry?](/azure/ai-foundry/what-is-ai-foundry) +- [The Azure AI Foundry SDK](/azure/ai-foundry/how-to/develop/sdk-overview) +- [What is Azure AI Agent Service](/azure/ai-services/agents/overview) +- [Quickstart: Create a new agent](/azure/ai-services/agents/quickstart) + ## Preparing Your Development Environment @@ -82,7 +83,7 @@ pip install semantic-kernel[azure] ## Configuring the AI Project Client -Accessing an `AzureAIAgent` first requires the creation of a project client that is configured for a specific _Foundry Project_, most commonly by providing a connection string ([The Azure AI Foundry SDK: Getting Started with Projects](azure/ai-foundry/how-to/develop/sdk-overview#get-started-with-projects)). +Accessing an `AzureAIAgent` first requires the creation of a project client that is configured for a specific _Foundry Project_, most commonly by providing a connection string ([The Azure AI Foundry SDK: Getting Started with Projects](/azure/ai-foundry/how-to/develop/sdk-overview#get-started-with-projects)). ::: zone pivot="programming-language-csharp" @@ -333,7 +334,7 @@ An `AzureAIAgent` can leverage advanced tools such as: ### Code Interpreter -Code Interpreter allows the agents to write and run Python code in a sandboxed execution environment ([Azure AI Agent Service Code Interpreter](azure/ai-services/agents/how-to/tools/code-interpreter)). +Code Interpreter allows the agents to write and run Python code in a sandboxed execution environment ([Azure AI Agent Service Code Interpreter](/azure/ai-services/agents/how-to/tools/code-interpreter)). ::: zone pivot="programming-language-csharp" ```c# @@ -384,7 +385,7 @@ async with ( ### File Search -File search augments agents with knowledge from outside its model ([Azure AI Agent Service File Search Tool](azure/ai-services/agents/how-to/tools/file-search)). +File search augments agents with knowledge from outside its model ([Azure AI Agent Service File Search Tool](/azure/ai-services/agents/how-to/tools/file-search)). ::: zone pivot="programming-language-csharp" @@ -436,7 +437,7 @@ async with ( ### OpenAPI Integration -Connects your agent to an external API ([How to use Azure AI Agent Service with OpenAPI Specified Tools](azure/ai-services/agents/how-to/tools/openapi-spec)). +Connects your agent to an external API ([How to use Azure AI Agent Service with OpenAPI Specified Tools](/azure/ai-services/agents/how-to/tools/openapi-spec)). ::: zone pivot="programming-language-csharp" ```c# @@ -507,7 +508,7 @@ async with ( ### AzureAI Search Integration -Use an existing Azure AI Search index with with your agent ([Use an existing AI Search index](azure/ai-services/agents/how-to/tools/azure-ai-search)). +Use an existing Azure AI Search index with with your agent ([Use an existing AI Search index](/azure/ai-services/agents/how-to/tools/azure-ai-search)). ::: zone pivot="programming-language-csharp" ```c# @@ -616,6 +617,7 @@ await agentsClient.DeleteAgentAsync(agent.Id); await client.agents.delete_thread(thread.id) await client.agents.delete_agent(agent.id) ``` +::: zone-end If working with a vector store or files, they may be deleted as well: @@ -639,7 +641,7 @@ await client.agents.delete_vector_store(vector_store_id=vector_store.id) ::: zone-end -> More information on the _file search_ tool is described in the [Azure AI Agent Service file search tool](azure/ai-services/agents/how-to/tools/file-search) article. +> More information on the _file search_ tool is described in the [Azure AI Agent Service file search tool](/azure/ai-services/agents/how-to/tools/file-search) article. ## How-To From 129c980ce681d516709c62187d491ad0f6bd2fe6 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Thu, 6 Mar 2025 15:47:50 -0800 Subject: [PATCH 066/117] Update semantic-kernel/concepts/ai-services/realtime.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/concepts/ai-services/realtime.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/ai-services/realtime.md b/semantic-kernel/concepts/ai-services/realtime.md index 8f0e2c25..dd559e91 100644 --- a/semantic-kernel/concepts/ai-services/realtime.md +++ b/semantic-kernel/concepts/ai-services/realtime.md @@ -15,8 +15,7 @@ The first realtime API integration for Semantic Kernel has been added, it is cur ## Realtime Client abstraction To support different realtime APIs from different vendors, using different protocols, a new client abstraction has been added to the kernel. This client is used to connect to the realtime service and send and receive messages. -The client is responsible for handling the connection to the service, sending messages, and receiving messages. The client is also responsible for handling any errors that occur during the connection or message sending/receiving process. Considering the way these models work, they can be considered agents more then regular chat completions, therefore they also take instructions, rather then a system message, they keep their own internal state and can be invoked to do work on our behalf. - +The client is responsible for handling the connection to the service, sending messages, and receiving messages. The client is also responsible for handling any errors that occur during the connection or message sending/receiving process. Considering the way these models work, they can be considered agents more than regular chat completions, therefore they also take instructions, rather than a system message, they keep their own internal state and can be invoked to do work on our behalf. ### Realtime API Any realtime client implements the following methods: From b3b3786f70dce2c10fb11bf788af8000fcb36147 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Thu, 6 Mar 2025 16:19:08 -0800 Subject: [PATCH 067/117] Fix link --- semantic-kernel/Frameworks/agent/azure-ai-agent.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 3be48add..851c2749 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -17,7 +17,7 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-csharp" -- [`OpenAIAssistantAgent`](dotnet/api/microsoft.semantickernel.agents.azureai) +- [`OpenAIAssistantAgent`](/dotnet/api/microsoft.semantickernel.agents.azureai) ::: zone-end From fc595236f8f28c5059f9bb98b64a3ea49a0ed705 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 7 Mar 2025 15:49:00 +0900 Subject: [PATCH 068/117] Cleanup --- .../Frameworks/agent/agent-templates.md | 29 +++++++++++-------- .../Frameworks/agent/assistant-agent.md | 2 +- .../Frameworks/agent/azure-ai-agent.md | 12 ++++---- 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index 2c9ce918..910d2454 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -76,10 +76,8 @@ ChatCompletionAgent agent = ::: zone pivot="programming-language-python" ```python -kernel = Kernel() - agent = ChatCompletionAgent( - kernel=kernel, + service=AzureChatCompletion(), # or other supported AI Services name="StoryTeller", instructions="Tell a story about {{$topic}} that is {{$length}} sentences long.", arguments=KernelArguments(topic="Dog", length="2"), @@ -116,9 +114,18 @@ OpenAIAssistantAgent agent = new(assistant, assistantClient, new KernelPromptTem ::: zone pivot="programming-language-python" ```python -agent = await OpenAIAssistantAgent.retrieve( - id=, - kernel=Kernel(), +# Create the client using Azure OpenAI resources and configuration +client, model = AzureAssistantAgent.setup_resources() + +# Retrieve the assistant definition from the server based on the assistant ID +definition = await client.beta.assistants.retrieve( + assistant_id="your-assistant-id", +) + +# Create the AzureAssistantAgent instance using the client and the assistant definition +agent = AzureAssistantAgent( + client=client, + definition=definition, arguments=KernelArguments(topic="Dog", length="3"), ) ``` @@ -131,9 +138,9 @@ agent = await OpenAIAssistantAgent.retrieve( ::: zone-end -## Agent Definition from a _Prompt Template_ +## Agent Definition from a Prompt Template -The same _Prompt Template Config_ used to create a _Kernel Prompt Function_ can also be leveraged to define an agent. This allows for a unified approach in managing both prompts and agents, promoting consistency and reuse across different components. By externalizing agent definitions from the codebase, this method simplifies the management of multiple agents, making them easier to update and maintain without requiring changes to the underlying logic. This separation also enhances flexibility, enabling developers to modify agent behavior or introduce new agents by simply updating the configuration, rather than adjusting the code itself. +The same Prompt Template Config_used to create a Kernel Prompt Function can also be leveraged to define an agent. This allows for a unified approach in managing both prompts and agents, promoting consistency and reuse across different components. By externalizing agent definitions from the codebase, this method simplifies the management of multiple agents, making them easier to update and maintain without requiring changes to the underlying logic. This separation also enhances flexibility, enabling developers to modify agent behavior or introduce new agents by simply updating the configuration, rather than adjusting the code itself. #### YAML Template @@ -193,7 +200,7 @@ data = yaml.safe_load(generate_story_yaml) prompt_template_config = PromptTemplateConfig(**data) agent = ChatCompletionAgent( - kernel=_create_kernel_with_chat_completion(), + service=AzureChatCompletion(), # or other supported AI services prompt_template_config=prompt_template_config, arguments=KernelArguments(topic="Dog", length="3"), ) @@ -250,10 +257,8 @@ await foreach (ChatMessageContent response in agent.InvokeAsync(chat, overrideAr ::: zone pivot="programming-language-python" ```python -kernel = Kernel() - agent = ChatCompletionAgent( - kernel=kernel, + service=AzureChatCompletion(), name="StoryTeller", instructions="Tell a story about {{$topic}} that is {{$length}} sentences long.", arguments=KernelArguments(topic="Dog", length="2"), diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index 31a6b0ea..b5dd3fbf 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -248,7 +248,7 @@ await agent.delete_thread(thread_id) ::: zone-end -## Deleting an `OpenAIAssistantAgent` +## Delete an `OpenAIAssistantAgent` Since the assistant's definition is stored remotely, it will persist if not deleted. Deleting an assistant definition may be performed directly with the `AssistantClient`. diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 851c2749..842a5f16 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -37,7 +37,7 @@ Detailed API documentation related to this discussion is available at: An `AzureAIAgent` is a specialized agent within the Semantic Kernel framework, designed to provide advanced conversational capabilities with seamless tool integration. It automates tool calling, eliminating the need for manual parsing and invocation. The agent also securely manages conversation history using threads, reducing the overhead of maintaining state. Additionally, the `AzureAIAgent` supports a variety of built-in tools, including file retrieval, code execution, and data interaction via Bing, Azure AI Search, Azure Functions, and OpenAPI. -To use an `AzureAIAgent`, an _Azure AI Foundry Project_ must be utilized. The following articles provide an overview of the _Azure AI Foundry_, how to create and configure a project, and the agent service: +To use an `AzureAIAgent`, an Azure AI Foundry Project must be utilized. The following articles provide an overview of the Azure AI Foundry, how to create and configure a project, and the agent service: - [What is Azure AI Foundry?](/azure/ai-foundry/what-is-ai-foundry) - [The Azure AI Foundry SDK](/azure/ai-foundry/how-to/develop/sdk-overview) @@ -67,7 +67,7 @@ dotnet add package Azure.Identity ::: zone pivot="programming-language-python" -Install the `semantic-kernel` package with the optional _Azure_ dependencies: +Install the `semantic-kernel` package with the optional Azure dependencies: ```bash pip install semantic-kernel[azure] @@ -83,7 +83,7 @@ pip install semantic-kernel[azure] ## Configuring the AI Project Client -Accessing an `AzureAIAgent` first requires the creation of a project client that is configured for a specific _Foundry Project_, most commonly by providing a connection string ([The Azure AI Foundry SDK: Getting Started with Projects](/azure/ai-foundry/how-to/develop/sdk-overview#get-started-with-projects)). +Accessing an `AzureAIAgent` first requires the creation of a project client that is configured for a specific Foundry Project, most commonly by providing a connection string ([The Azure AI Foundry SDK: Getting Started with Projects](/azure/ai-foundry/how-to/develop/sdk-overview#get-started-with-projects)). ::: zone pivot="programming-language-csharp" @@ -242,7 +242,7 @@ for user_input in USER_INPUTS: ::: zone-end -An agent may also produce a _streamed_ response: +An agent may also produce a streamed response: ::: zone pivot="programming-language-csharp" ```c# @@ -574,7 +574,7 @@ async with ( ::: zone-end -### Retrieving Existing `AzureAIAgent` +### Retrieving an Existing `AzureAIAgent` An existing agent can be retrieved and reused by specifying its assistant ID: @@ -668,4 +668,4 @@ For practical examples of using an `AzureAIAgent`, see our code samples on GitHu ::: zone-end > [!div class="nextstepaction"] -> [Agent Collaboration in `AgentChat`](./agent-chat.md) \ No newline at end of file +> [Agent Collaboration in AgentChat](./agent-chat.md) \ No newline at end of file From 3ec294c4b0fcd70cd838eb28cb7a9009fc462168 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 7 Mar 2025 17:40:18 +0900 Subject: [PATCH 069/117] update next step actions --- semantic-kernel/Frameworks/agent/assistant-agent.md | 4 ++-- semantic-kernel/Frameworks/agent/azure-ai-agent.md | 2 +- semantic-kernel/Frameworks/agent/chat-completion-agent.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index b5dd3fbf..aa888610 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -248,7 +248,7 @@ await agent.delete_thread(thread_id) ::: zone-end -## Delete an `OpenAIAssistantAgent` +## Deleting an `OpenAIAssistantAgent` Since the assistant's definition is stored remotely, it will persist if not deleted. Deleting an assistant definition may be performed directly with the `AssistantClient`. @@ -289,5 +289,5 @@ For an end-to-end example for a `OpenAIAssistantAgent`, see: > [!div class="nextstepaction"] -> [Agent Collaboration in `AgentChat`](./agent-chat.md) +> [Exploring the Azure AI Agent](./azure-ai-agent.md) diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 842a5f16..8e9ebf3f 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -668,4 +668,4 @@ For practical examples of using an `AzureAIAgent`, see our code samples on GitHu ::: zone-end > [!div class="nextstepaction"] -> [Agent Collaboration in AgentChat](./agent-chat.md) \ No newline at end of file +> [Agent Collaboration in Agent Chat](./agent-chat.md) \ No newline at end of file diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index fe38eb9a..827f0b35 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -312,4 +312,4 @@ For an end-to-end example for a `ChatCompletionAgent`, see: > [!div class="nextstepaction"] -> [Exploring `OpenAIAssistantAgent`](./assistant-agent.md) +> [Exploring the OpenAI Assistant Agent`](./assistant-agent.md) From beeb9d19f9c6e9ff5133c5d4d3c675b539de2211 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Fri, 7 Mar 2025 11:03:26 +0000 Subject: [PATCH 070/117] Add is hybrid supported setting to each connector page --- .../azure-ai-search-connector.md | 6 ++++ .../azure-cosmosdb-mongodb-connector.md | 4 +++ .../azure-cosmosdb-nosql-connector.md | 1 + .../couchbase-connector.md | 1 + .../elasticsearch-connector.md | 5 ++-- .../inmemory-connector.md | 1 + .../mongodb-connector.md | 5 ++++ .../pinecone-connector.md | 1 + .../postgres-connector.md | 1 + .../qdrant-connector.md | 29 +++++++++++++++++++ .../redis-connector.md | 1 + .../sqlite-connector.md | 1 + .../weaviate-connector.md | 28 ++++++++++-------- 13 files changed, 70 insertions(+), 14 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md index a85ee0d4..dc490281 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md @@ -32,8 +32,11 @@ The Azure AI Search Vector Store connector can be used to access and manage data | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | Yes | + ::: zone-end ::: zone pivot="programming-language-python" + | Feature Area | Support | | ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Collection maps to | Azure AI Search Index | @@ -46,8 +49,10 @@ The Azure AI Search Vector Store connector can be used to access and manage data | Supports multiple vectors in a record | Yes | | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | + ::: zone-end ::: zone pivot="programming-language-java" + | Feature Area | Support | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Collection maps to | Azure AI Search Index | @@ -61,6 +66,7 @@ The Azure AI Search Vector Store connector can be used to access and manage data | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | + ::: zone-end ## Limitations diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md index c50ed06f..b0fcf275 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md @@ -32,8 +32,11 @@ The Azure CosmosDB MongoDB Vector Store connector can be used to access and mana | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | No, use BsonElementAttribute instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | No | + ::: zone-end ::: zone pivot="programming-language-python" + | Feature Area | Support | | ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Collection maps to | Azure Cosmos DB MongoDB (vCore) Collection + Index | @@ -46,6 +49,7 @@ The Azure CosmosDB MongoDB Vector Store connector can be used to access and mana | Supports multiple vectors in a record | Yes | | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | No | + ::: zone-end ::: zone pivot="programming-language-java" More info coming soon. diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md index 6e84f353..2136964d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md @@ -32,6 +32,7 @@ The Azure CosmosDB NoSQL Vector Store connector can be used to access and manage | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | Yes | ## Limitations diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/couchbase-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/couchbase-connector.md index 63ba111f..e27ad81a 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/couchbase-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/couchbase-connector.md @@ -34,6 +34,7 @@ following characteristics. | IsFilterable supported? | No | | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | No | ## Getting Started diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/elasticsearch-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/elasticsearch-connector.md index 632ea9f8..6f41b9e3 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/elasticsearch-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/elasticsearch-connector.md @@ -13,6 +13,8 @@ ms.service: semantic-kernel > [!WARNING] > The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. +::: zone pivot="programming-language-csharp" + ## Overview The Elasticsearch Vector Store connector can be used to access and manage data in Elasticsearch. The connector has the following characteristics. @@ -30,8 +32,7 @@ The Elasticsearch Vector Store connector can be used to access and manage data i | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | - -::: zone pivot="programming-language-csharp" +| HybridSearch supported? | No | ## Getting started diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/inmemory-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/inmemory-connector.md index 1a08b629..fd626301 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/inmemory-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/inmemory-connector.md @@ -35,6 +35,7 @@ The connector has the following characteristics. | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | No, since storage is in-memory and data reuse is therefore not possible, custom naming is not applicable. | +| HybridSearch supported? | No | ## Getting started diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md index 8671e1a3..7591b678 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/mongodb-connector.md @@ -32,6 +32,8 @@ The MongoDB Vector Store connector can be used to access and manage data in Mong | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | No, use BsonElementAttribute instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | Yes | + ::: zone-end ::: zone pivot="programming-language-python" @@ -47,9 +49,12 @@ The MongoDB Vector Store connector can be used to access and manage data in Mong | Supports multiple vectors in a record | Yes | | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | No | + ::: zone-end ::: zone pivot="programming-language-java" + More info coming soon. + ::: zone-end ::: zone pivot="programming-language-csharp" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index 134a29bc..2be8a72d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -32,6 +32,7 @@ The Pinecone Vector Store connector can be used to access and manage data in Pin | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | Yes | +| HybridSearch supported? | No | ## Getting started diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md index 8eb24e45..e253ab5e 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md @@ -32,6 +32,7 @@ The Postgres Vector Store connector can be used to access and manage data in Pos | IsFilterable supported? | No | | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | Yes | +| HybridSearch supported? | No | ## Getting started diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md index 03ee4e4b..45c3babd 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md @@ -17,6 +17,8 @@ ms.service: semantic-kernel The Qdrant Vector Store connector can be used to access and manage data in Qdrant. The connector has the following characteristics. +::: zone pivot="programming-language-csharp" + | Feature Area | Support | |-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| | Collection maps to | Qdrant collection with payload indices for filterable data fields | @@ -30,6 +32,33 @@ The Qdrant Vector Store connector can be used to access and manage data in Qdran | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | Yes | +| HybridSearch supported? | Yes | + +::: zone-end +::: zone pivot="programming-language-python" + +| Feature Area | Support | +|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| +| Collection maps to | Qdrant collection with payload indices for filterable data fields | +| Supported key property types |
    • ulong
    • Guid
    | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • *and iterables of each of these types*
    | +| Supported vector property types |
    • list[float]
    | +| Supported index types | Hnsw | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    • ManhattanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes (configurable) | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +| StoragePropertyName supported? | Yes | + +::: zone-end +::: zone pivot="programming-language-java" + +## Not Supported + +Not currently supported. + +::: zone-end ::: zone pivot="programming-language-csharp" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md index 72d8ef82..f1436bc9 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/redis-connector.md @@ -32,6 +32,7 @@ The connector has the following characteristics. | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | | StoragePropertyName supported? | **When using Hashes:** Yes
    **When using JSON:** No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | No | ::: zone pivot="programming-language-csharp" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sqlite-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sqlite-connector.md index 0e1d9c25..5f13fcf8 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sqlite-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sqlite-connector.md @@ -32,6 +32,7 @@ The SQLite Vector Store connector can be used to access and manage data in SQLit | IsFilterable supported? | No | | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | Yes | +| HybridSearch supported? | No | ## Limitations diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md index b3efc390..7b30706d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/weaviate-connector.md @@ -19,21 +19,24 @@ The Weaviate Vector Store connector can be used to access and manage data in Wea ::: zone pivot="programming-language-csharp" -| Feature Area | Support | -| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Collection maps to | Weaviate Collection | -| Supported key property types | Guid | +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Weaviate Collection | +| Supported key property types | Guid | | Supported data property types |
    • string
    • byte
    • short
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • DateTimeOffset
    • Guid
    • *and enumerables of each of these types*
    | -| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | -| Supported index types |
    • Hnsw
    • Flat
    • Dynamic
    | -| Supported distance functions |
    • CosineDistance
    • NegativeDotProductSimilarity
    • EuclideanSquaredDistance
    • Hamming
    • ManhattanDistance
    | -| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | -| Supports multiple vectors in a record | Yes | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | Yes | -| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | +| Supported index types |
    • Hnsw
    • Flat
    • Dynamic
    | +| Supported distance functions |
    • CosineDistance
    • NegativeDotProductSimilarity
    • EuclideanSquaredDistance
    • Hamming
    • ManhattanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | Yes | + ::: zone-end ::: zone pivot="programming-language-python" + | Feature Area | Support | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Collection maps to | Weaviate Collection | @@ -46,6 +49,7 @@ The Weaviate Vector Store connector can be used to access and manage data in Wea | Supports multiple vectors in a record | Yes | | IsFilterable supported? | Yes | | IsFullTextSearchable supported? | Yes | + ::: zone-end ::: zone pivot="programming-language-java" Coming soon. From c6f9a2dd2d2be4347445662e58e5572c013f5211 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Fri, 7 Mar 2025 11:55:20 +0000 Subject: [PATCH 071/117] Add changes page to TOC --- semantic-kernel/support/migration/toc.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/support/migration/toc.yml b/semantic-kernel/support/migration/toc.yml index 1718b55c..9f942a2f 100644 --- a/semantic-kernel/support/migration/toc.yml +++ b/semantic-kernel/support/migration/toc.yml @@ -11,4 +11,6 @@ - name: Kernel Events and Filters Migration href: kernel-events-and-filters-migration.md - name: Agent Framework Release Candidate Migration Guide - href: agent-framework-rc-migration-guide.md \ No newline at end of file + href: agent-framework-rc-migration-guide.md +- name: VectorData changes - March 2025 + href: vectordata-march-2025.md \ No newline at end of file From 74f7a7a3ca53b69dae529314e13e544aa25f665e Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Fri, 7 Mar 2025 12:32:22 +0000 Subject: [PATCH 072/117] Rename migration guide and update custom mapper page --- .../how-to/vector-store-custom-mapper.md | 54 +++++++------------ semantic-kernel/support/migration/toc.yml | 4 +- ...arch-2025.md => vectorstore-march-2025.md} | 12 ++--- 3 files changed, 27 insertions(+), 43 deletions(-) rename semantic-kernel/support/migration/{vectordata-march-2025.md => vectorstore-march-2025.md} (94%) diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-custom-mapper.md b/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-custom-mapper.md index 81deabad..8cd2a2d5 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-custom-mapper.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-custom-mapper.md @@ -12,6 +12,10 @@ ms.service: semantic-kernel > [!WARNING] > The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. +> [!WARNING] +> Support for custom mappers may be deprecated in future, since filtering and target property selection is not able to target the top level mapped types. + +::: zone pivot="programming-language-csharp" In this how to, we will show how you can replace the default mapper for a vector store record collection with your own mapper. @@ -37,19 +41,11 @@ All Vector Store connector implementations allow you to provide a custom mapper. The underlying data stores of each Vector Store connector have different ways of storing data. Therefore what you are mapping to on the storage side may differ for each connector. -::: zone pivot="programming-language-csharp" E.g. if using the Qdrant connector, the storage type is a `PointStruct` class provided by the Qdrant SDK. If using the Redis JSON connector, the storage type is a `string` key and a `JsonNode`, while if using a JSON HashSet connector, the storage type is a `string` key and a `HashEntry` array. -::: zone-end -::: zone pivot="programming-language-python" -::: zone-end -::: zone pivot="programming-language-java" -::: zone-end If you want to do custom mapping, and you want to use multiple connector types, you will therefore need to implement a mapper for each connector type. -::: zone pivot="programming-language-csharp" - ## Creating the data model Our first step is to create a data model. In this case we will not annotate the data model with attributes, since we will provide a separate record definition @@ -204,18 +200,15 @@ When using `IVectorStore` to get `IVectorStoreRecordCollection` object instances the `GetCollection` method. This is because custom mappers differ for each Vector Store type, and would make it impossible to use `IVectorStore` to communicate with any vector store implementation. -It is however possible to provide a factory when constructing a Vector Store implementation. This can be used to customize `IVectorStoreRecordCollection` -instances as they are created. +It is however possible to override the default implementation of `GetCollection` and provide your own custom implementation of the vector store. -Here is an example of such a factory, which checks if `CreateCollection` was called with the product definition and data type, and if so -injects the custom mapper and switches on named vectors mode. +Here is an example where we inherit from the `QdrantVectorStore` and override the `GetCollection` method to do the custom construction. ```csharp -public class QdrantCollectionFactory(VectorStoreRecordDefinition productDefinition) : IQdrantVectorStoreRecordCollectionFactory +private sealed class QdrantCustomVectorStore(QdrantClient qdrantClient, VectorStoreRecordDefinition productDefinition) + : QdrantVectorStore(qdrantClient) { - public IVectorStoreRecordCollection CreateVectorStoreRecordCollection(QdrantClient qdrantClient, string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition) - where TKey : notnull - where TRecord : class + public override IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) { // If the record definition is the product definition and the record type is the product data // model, inject the custom mapper into the collection options. @@ -233,39 +226,30 @@ public class QdrantCollectionFactory(VectorStoreRecordDefinition productDefiniti return customCollection!; } - // Otherwise, just create a standard collection with the default mapper. - var collection = new QdrantVectorStoreRecordCollection( - qdrantClient, - name, - new() - { - VectorStoreRecordDefinition = vectorStoreRecordDefinition - }) as IVectorStoreRecordCollection; - return collection!; + // Otherwise, just create a standard collection. + return base.GetCollection(name, vectorStoreRecordDefinition); } } ``` -To use the collection factory, pass it to the Vector Store when constructing it, or when registering it with the dependency injection container. +To use the replacement vector store, register it with your dependency injection container or use just use it directly as you would a regular `QdrantVectorStore`. ```csharp // When registering with the dependency injection container on the kernel builder. -kernelBuilder.AddQdrantVectorStore( - "localhost", - options: new() +kernelBuilder.Services.AddTransient( + (sp) => { - VectorStoreCollectionFactory = new QdrantCollectionFactory(productDefinition) + return new QdrantCustomVectorStore( + new QdrantClient("localhost"), + productDefinition); }); ``` ```csharp // When constructing the Vector Store instance directly. -var vectorStore = new QdrantVectorStore( +var vectorStore = new QdrantCustomVectorStore( new QdrantClient("localhost"), - new() - { - VectorStoreCollectionFactory = new QdrantCollectionFactory(productDefinition) - }); + productDefinition); ``` Now you can use the vector store as normal to get a collection. diff --git a/semantic-kernel/support/migration/toc.yml b/semantic-kernel/support/migration/toc.yml index 9f942a2f..aa17fe21 100644 --- a/semantic-kernel/support/migration/toc.yml +++ b/semantic-kernel/support/migration/toc.yml @@ -12,5 +12,5 @@ href: kernel-events-and-filters-migration.md - name: Agent Framework Release Candidate Migration Guide href: agent-framework-rc-migration-guide.md -- name: VectorData changes - March 2025 - href: vectordata-march-2025.md \ No newline at end of file +- name: Vector Store changes - March 2025 + href: vectorstore-march-2025.md \ No newline at end of file diff --git a/semantic-kernel/support/migration/vectordata-march-2025.md b/semantic-kernel/support/migration/vectorstore-march-2025.md similarity index 94% rename from semantic-kernel/support/migration/vectordata-march-2025.md rename to semantic-kernel/support/migration/vectorstore-march-2025.md index 31eed51a..18e0c56c 100644 --- a/semantic-kernel/support/migration/vectordata-march-2025.md +++ b/semantic-kernel/support/migration/vectorstore-march-2025.md @@ -1,6 +1,6 @@ --- -title: VectorData changes - March 2025 -description: Describes the changes included in the March 2025 VectorData release and how to migrate +title: Vector Store changes - March 2025 +description: Describes the changes included in the March 2025 Vector Store release and how to migrate zone_pivot_groups: programming-languages author: westey-m ms.topic: conceptual @@ -10,7 +10,7 @@ ms.service: semantic-kernel --- ::: zone pivot="programming-language-csharp" -# VectorData changes - March 2025 +# Vector Store changes - March 2025 ## Linq based filtering @@ -75,7 +75,7 @@ LINQ based filtering and new property selectors metioned above. If you are currently constructing the options class without providing the name of the options class there will be no change. E.g. `VectorizedSearchAsync(embedding, new() { Top = 5 })`. -On the other hand if you are specifying the options type, you will need to add the record type as a +On the other hand if you are using `new` with the type name, you will need to add the record type as a generic parameter. ```csharp @@ -127,10 +127,10 @@ If you were passing these options in the past, you will need to remove these wit ```csharp // Before -collection.DeleteAsync("mykey", new DeleteRecordOptions(), cancellationToken ); +collection.DeleteAsync("mykey", new DeleteRecordOptions(), cancellationToken); // After -collection.DeleteAsync("mykey", cancellationToken ); +collection.DeleteAsync("mykey", cancellationToken); ``` ::: zone-end From 0aa0b6127b1dd685cf0e11681e0ed84a5c35ffb2 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Fri, 7 Mar 2025 16:53:15 +0000 Subject: [PATCH 073/117] Remove java pieces for qdrant, since it's not supported --- .../qdrant-connector.md | 31 +++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md index 45c3babd..bd6adebc 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md @@ -232,6 +232,8 @@ For more details on this concept see the [serialization documentation](./../seri ::: zone pivot="programming-language-java" ::: zone-end +::: zone pivot="programming-language-csharp" + ### Qdrant vector modes Qdrant supports two modes for vector storage and the Qdrant Connector with default mapper supports both modes. @@ -242,8 +244,6 @@ The default mode is *single unnamed vector*. With this option a collection may only contain a single vector and it will be unnamed in the storage model in Qdrant. Here is an example of how an object is represented in Qdrant when using *single unnamed vector* mode: -::: zone pivot="programming-language-csharp" - ```csharp new Hotel { @@ -265,6 +265,16 @@ new Hotel ::: zone-end ::: zone pivot="programming-language-python" +### Qdrant vector modes + +Qdrant supports two modes for vector storage and the Qdrant Connector with default mapper supports both modes. +The default mode is *single unnamed vector*. + +#### Single unnamed vector + +With this option a collection may only contain a single vector and it will be unnamed in the storage model in Qdrant. +Here is an example of how an object is represented in Qdrant when using *single unnamed vector* mode: + ```python Hotel( hotel_id = 1, @@ -283,18 +293,18 @@ PointStruct( vector=[0.9, 0.1, 0.1, 0.1], ) ``` + ::: zone-end ::: zone pivot="programming-language-java" ::: zone-end +::: zone pivot="programming-language-csharp" #### Named vectors If using the named vectors mode, it means that each point in a collection may contain more than one vector, and each will be named. Here is an example of how an object is represented in Qdrant when using *named vectors* mode: -::: zone pivot="programming-language-csharp" - ```csharp new Hotel { @@ -320,6 +330,11 @@ new Hotel ::: zone-end ::: zone pivot="programming-language-python" +#### Named vectors + +If using the named vectors mode, it means that each point in a collection may contain more than one vector, and each will be named. +Here is an example of how an object is represented in Qdrant when using *named vectors* mode: + ```python Hotel( hotel_id = 1, @@ -347,11 +362,11 @@ PointStruct( ::: zone pivot="programming-language-java" ::: zone-end +::: zone pivot="programming-language-csharp" + To enable named vectors mode, pass this as an option when constructing a Vector Store or collection. The same options can also be passed to any of the provided dependency injection container extension methods. -::: zone pivot="programming-language-csharp" - ```csharp using Microsoft.SemanticKernel.Connectors.Qdrant; using Qdrant.Client; @@ -369,6 +384,9 @@ var collection = new QdrantVectorStoreRecordCollection( ::: zone-end ::: zone pivot="programming-language-python" +To enable named vectors mode, pass this as an option when constructing a Vector Store or collection. +The same options can also be passed to any of the provided dependency injection container extension methods. + In python the default value for `named_vectors` is True, but you can also disable this as shown below. ```python @@ -380,6 +398,7 @@ collection = QdrantCollection( named_vectors=False, ) ``` + ::: zone-end ::: zone pivot="programming-language-java" ::: zone-end From 7809f243267289015b7f2f1a42b167dda30b3f0b Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Fri, 7 Mar 2025 17:29:27 +0000 Subject: [PATCH 074/117] Improve build your own instructions --- .../how-to/build-your-own-connector.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md index 6cab6486..9f6faeaf 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md @@ -432,13 +432,14 @@ client so that it may send the entire set in one request. ## Recommended common patterns and pratices +1. Keep `IVectorStore` and `IVectorStoreRecordCollection` implementations unsealed with virtual methods, so that developers can inherit and override if needed. 1. Always use options classes for optional settings with smart defaults. 1. Keep required parameters on the main signature and move optional parameters to options. Here is an example of an `IVectorStoreRecordCollection` constructor following this pattern. ```csharp -public sealed class MyDBVectorStoreRecordCollection : IVectorStoreRecordCollection +public class MyDBVectorStoreRecordCollection : IVectorStoreRecordCollection { public MyDBVectorStoreRecordCollection(MyDBClient myDBClient, string collectionName, MyDBVectorStoreRecordCollectionOptions? options = default) { @@ -447,13 +448,18 @@ public sealed class MyDBVectorStoreRecordCollection : IVectorStoreRecor ... } -public sealed class MyDBVectorStoreRecordCollectionOptions +public class MyDBVectorStoreRecordCollectionOptions { public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; public IVectorStoreRecordMapper? MyDbRecordCustomMapper { get; init; } = null; } ``` +## SDK Changes + +Please also see the following articles for a history of changes to the SDK and therefore implementation requirements: + +1. [Vector Store Changes March 2025](../../../support/migration/vectorstore-march-2025.md) ::: zone-end ::: zone pivot="programming-language-python" From 9fd01ca21f260e216ccc74b02ac0ead9899d0c4f Mon Sep 17 00:00:00 2001 From: Chris <66376200+crickman@users.noreply.github.com> Date: Fri, 7 Mar 2025 14:06:11 -0800 Subject: [PATCH 075/117] Update semantic-kernel/Frameworks/agent/agent-templates.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/Frameworks/agent/agent-templates.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index 910d2454..b8d0a4ca 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -140,8 +140,7 @@ agent = AzureAssistantAgent( ## Agent Definition from a Prompt Template -The same Prompt Template Config_used to create a Kernel Prompt Function can also be leveraged to define an agent. This allows for a unified approach in managing both prompts and agents, promoting consistency and reuse across different components. By externalizing agent definitions from the codebase, this method simplifies the management of multiple agents, making them easier to update and maintain without requiring changes to the underlying logic. This separation also enhances flexibility, enabling developers to modify agent behavior or introduce new agents by simply updating the configuration, rather than adjusting the code itself. - +The same Prompt Template Config used to create a Kernel Prompt Function can also be leveraged to define an agent. This allows for a unified approach in managing both prompts and agents, promoting consistency and reuse across different components. By externalizing agent definitions from the codebase, this method simplifies the management of multiple agents, making them easier to update and maintain without requiring changes to the underlying logic. This separation also enhances flexibility, enabling developers to modify agent behavior or introduce new agents by simply updating the configuration, rather than adjusting the code itself. #### YAML Template ```yaml From 34768c3af3acd2b755923e78d6478ee91a901133 Mon Sep 17 00:00:00 2001 From: Chris <66376200+crickman@users.noreply.github.com> Date: Fri, 7 Mar 2025 14:06:18 -0800 Subject: [PATCH 076/117] Update semantic-kernel/Frameworks/agent/chat-completion-agent.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/Frameworks/agent/chat-completion-agent.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 827f0b35..378c9550 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -255,8 +255,7 @@ agent = ChatCompletionAgent(...) chat = ChatHistory() # Add the user message -chat.add_user_message(user_input)) - +chat.add_user_message(user_input) # Generate the agent response response = await agent.get_response(chat) # response is a `ChatMessageContent` object From 6cd0488e232306767a941b6dca2ac54c6cc64458 Mon Sep 17 00:00:00 2001 From: Chris Rickman Date: Fri, 7 Mar 2025 14:08:39 -0800 Subject: [PATCH 077/117] Update example namespaces --- .../agent/examples/example-assistant-code.md | 2 -- .../agent/examples/example-assistant-search.md | 10 ++++------ 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index aadcba51..0ce0dfcc 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -608,11 +608,9 @@ using Azure.Identity; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Agents.OpenAI; using Microsoft.SemanticKernel.ChatCompletion; -using OpenAI; using OpenAI.Assistants; using OpenAI.Files; using System; -using System.ClientModel; using System.Collections.Generic; using System.Diagnostics; using System.IO; diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index ca565932..81bcc550 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -580,21 +580,19 @@ Try using these suggested inputs: ::: zone pivot="programming-language-csharp" ```csharp -using System; -using System.ClientModel; -using System.Collections.Generic; -using System.Linq; -using System.Threading.Tasks; using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Agents; using Microsoft.SemanticKernel.Agents.OpenAI; using Microsoft.SemanticKernel.ChatCompletion; -using OpenAI; using OpenAI.Assistants; using OpenAI.Files; using OpenAI.VectorStores; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; namespace AgentsSample; From 21a0f24ade2d3deaa657513924b791322b11ce62 Mon Sep 17 00:00:00 2001 From: Chris <66376200+crickman@users.noreply.github.com> Date: Fri, 7 Mar 2025 14:20:07 -0800 Subject: [PATCH 078/117] Update semantic-kernel/Frameworks/agent/chat-completion-agent.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/Frameworks/agent/chat-completion-agent.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 378c9550..c8d3b6d7 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -311,4 +311,4 @@ For an end-to-end example for a `ChatCompletionAgent`, see: > [!div class="nextstepaction"] -> [Exploring the OpenAI Assistant Agent`](./assistant-agent.md) +> [Exploring the OpenAI Assistant Agent](./assistant-agent.md) From 96197c27a4f8f303f2c01e99ff5fefeecc4206e6 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey Date: Mon, 10 Mar 2025 12:57:12 -0700 Subject: [PATCH 079/117] agent update --- semantic-kernel/Frameworks/agent/index.md | 5 +++++ semantic-kernel/index.yml | 2 +- semantic-kernel/media/agentSKdocs.png | Bin 0 -> 18819 bytes semantic-kernel/media/agentSKdocs2.png | Bin 0 -> 17970 bytes semantic-kernel/media/agentSKdocs3.png | Bin 0 -> 14846 bytes semantic-kernel/media/agentSKdocs4.png | Bin 0 -> 19523 bytes 6 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 semantic-kernel/media/agentSKdocs.png create mode 100644 semantic-kernel/media/agentSKdocs2.png create mode 100644 semantic-kernel/media/agentSKdocs3.png create mode 100644 semantic-kernel/media/agentSKdocs4.png diff --git a/semantic-kernel/Frameworks/agent/index.md b/semantic-kernel/Frameworks/agent/index.md index 749207bb..165fee79 100644 --- a/semantic-kernel/Frameworks/agent/index.md +++ b/semantic-kernel/Frameworks/agent/index.md @@ -17,6 +17,11 @@ The Semantic Kernel Agent Framework provides a platform within the Semantic Kern ## What is an AI agent? +![alt text](../../media/agentSKdocs.png) +![alt text](../../media/agentSKdocs2.png) +![alt text](../../media/agentSKdocs3.png) +![alt text](../../media/agentSKdocs4.png) + An **AI agent** is a software entity designed to perform tasks autonomously or semi-autonomously by recieving input, processing information, and taking actions to achieve specific goals. Agents can send and receive messages, generating responses using a combination of models, tools, human inputs, or other customizable components. diff --git a/semantic-kernel/index.yml b/semantic-kernel/index.yml index 3f94c798..b8a4c02f 100644 --- a/semantic-kernel/index.yml +++ b/semantic-kernel/index.yml @@ -51,7 +51,7 @@ productDirectory: links: - url: /semantic-kernel/concepts/enterprise-readiness/observability/index text: Observability - - url: /semantic-kernel/concepts/enterprise-readiness/SECURITY + - url: /semantic-kernel/support/SECURITY.md text: Security - url: /semantic-kernel/concepts/enterprise-readiness/filters text: Filters diff --git a/semantic-kernel/media/agentSKdocs.png b/semantic-kernel/media/agentSKdocs.png new file mode 100644 index 0000000000000000000000000000000000000000..e90d9fa45d0d47573bb96dd96f1c2188eb0c7927 GIT binary patch literal 18819 zcmV*MKx4m&P)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>DNf=2)K~#8N?Y!r6 zT-TZItN-Bm+x>9s*m9H=Y*`-L$`Tc+EK*|5nP3KU026X<01b4`$cahOvSll0i2z7~ z$mpCSn1fuuTWjr&Kw6&hnHkUZoO5>7Q>#I{W?aAb>1Vxr?RWPrxBQb{KX}>ky&t^V zGVMomTC;yRr@8t^b6bx6XkJU#Pv*C|e>%S<{nPoa{A_+Jx6Nc|NoKK|9IK)z5hAy zNcQ*VA9?TA1;@B`(FuOE}`I$;vIgv>Ro=e=1<(V_C0P}`vG^X|ByR2e8e5= zKjO{}A9C0F5BbHq_xa_zKl6*MclqVoKXCV&-*flsH~H1d*ZI}*6Wp`x2=^>)=hutd z_|4)LezUk)>bHxU-n(}}{jz)iPb|j&JGW!n_vasd?+1%scJu%C|Mu7aTzDe;`-|U7-@5!=ez@i% zezf5$ezNs6KifILZF@$!J;%o#2ZP*kD9oL?5$?*5@r(R8zbr^_cR_+*6(#v~aZ2i6 ziqrh2B*kw_Qv9wY#l58|?ki1le`%5jN)tR(8t38C7!Q|3d89bRqeXrm&37{`cZg{R z&hY4-Z+LY3Cp@y{&pf>0Z6028f`?ai^6>Ij9$D7Rqf48Zwye?p=>HCd_`$-rr+t6P z@7=eqe2*Wl`+^^D>E@?9`uW+OQEuDs=e7f3Za*C3j@&qRsm*`j$#t*s z#Oh9-T+zal%bR#=c_U9RZ*WXs`rl-;zIE9jmwtcc`{^GF@LSLDlU*bHEGNKi2P6Fa zaGcw7liZ${*zB zby^nS`5Hwp)ued2CdG`JI5VrGys8QEO6e#w^Lu$I=SyDL@gC1@e3NI_9^si)tvtJ; ziRYvms~%nYPj+MXgQf4?dh6;B-9OrNlAr7tP@^WoZHE*5T!K$aO~4-quj;Sh7pJ+W zC@tMD;4ATy;kTuk?}RVhFX2}x@P!_i@gv-y;pr;H{m-iKUl0Y*rg>SLW@c@QS+yx< zYvatRi87}$!0gfyUd=nj%zdBo($;r)VZ(8rU){#@E1P(6Wg{=HXmG#$&k{*LSoQI* zez@-Qw1}IZ?s0S5fsl-vpNptb;op&$`EKV=z!#aH=I%n_{EV{k7L7+wMu9KfU!rcj z0)DBo@yhHk&+v#EKhph`s`~}}>I~D>0z9XF171`MFhiT-6;Xh?6m#lQ%&ki@uQtYl znh5hNz057{XZFFbcxC6Gd3n=oytKBR7gsg&(uziAtZZ<89=bm@;a zeZx<74)9aq`ti8QOL4mjd;+?vyM)H!7fd=|asMRv$=nz473cqz`=v5GEZ~=?W%i5B zS7iSaBL6jMo~}ysOjVj^tI|BDy8p!~?tfJ+z+7F5dAbw}bV(NJk}R%`vA80F5a`ROSAl+ZB zz@La8ZHiaxQp{EhFkhEsp+3c8Ly9H(B+K*(meqz?s&TWV=nM(Ca4~zRdo~)-(KUuSa$_x2v1%9aD1t2Io(4ymWnm3SE`laBqO0 zQQTiV34bDb#(REIfUXgNjhmySn| z0{`Li)Ib`gzs$ zq5$L2r#gPB^C$ZJ!juAkN&$Xdq=Nrj&M!?(aeitd_otPs?ypeX|74}={;Jds@L#A& zDXphA#Y}BlfnPhm`HJ{a3b0r&+@F;0UujIS+LU6gDaATdf_26y>uUY1)eN#S?`xLq zd6$Kok1{W-h52inSg@v%1^=jx`L-=xxBhJBfcxkB1M=uVz@OY)Gs*`0x8P5J&rE_> zCBRFK$4-gpdIbC%Th4Lk3&#sRQkLRTS%4(d1pJDW0$(k_bXkCun*GB4lLdH1#E)n_ z)dI}ZC7EBBlG|0|a5{v(Sv z9c4jQi&B8qjp@1n*)8f1wsd{(=Q{`7w;%9xM{bNe^O84oGdH;WCdW_0zbWqvr#Qbz zxc+ann=asmdfzS0)wsRpPEU#S5YU z)e3x({c`h_1xU))bAFxL{fPJx-vE7*6~+{+j3R!N0&K9P*l0)lKLGjG;z-#t`yyL5hxJLRF-IQYzWL%+GboT7=m z|10=uep@WyE2o_Im8Q7AL_Os!N$_AvoQKN9X=hBHc0O7XV_IpH$I7BSULNO(iX=}} zCV09s!E}vU00F;Bfv;r$r2A*pCYe*4WS%TQl7*u66dSNUDc!%qAl$FUk2%E#bCONw zBwH*=vdsy$nWJpg`Pp3A$GW`FSh@2pmTv4|$=W8CtZ87`>iVjiIsUt@+ja~ryCcWX zow+gY%1=#!Ee_6wy*yqP@Jo{1Taw`Zk^~Qw#Cfnd z#skGsvg#AKJ~>a9|0=<;>zupt7tUY#3z2iz`F!LGtzTSVduy6!Dib`bNhoJN)yWC? z!ug^A0{-ke;r`SFe9`+Wt!I+udXfLh3HPrzCD~|7vRN&_HcOK2mIOP^5w`1mWLNaE z{_w}F+VKXp!+B-?qE&);n^%ZUJ8&6RUuy9v$2a{D$_jP=Hr% zt`zX8N#~Dmwo(fC-xVw1?<*GYtlR%StG6C!`T7=?udQdr+WPd; zwf}+r;Ld$+=Us;){31W0p40y|_?zKR?QG;}ri4B&5jove4H8f)9(o}8jKN4WJsR7 zEDPaJU1x<=6d=J&ZGu;|NoHwNGJ2+VK8xx^?x!Z*Z%DA(m|(3j$vRV#4JJ{51Y68W zwyN&mWlgfjmSC?f#y)e1-L)fZE&hfL``%^smZPj(*UZYb^{iamaO`HTf7f-#KKHV_ z4u$wdL4se3wz251VN1!QnZFUY;*J&HXv2cwk>QbIMPVr9Xx4#2LQo>EmSY zAble~g0U3mE?(uz^}kT_`*Y0FCYYm5Ft1kWd=`q3&CKdM^6Vm+~ z%?UP}6YA!hV5cR)Zfk1FHT4_UwcHP&owWldHCtJl=K zZ|3{2UUwe~Ec<0%l)DSnM}p($^EX7yxYHHrlMhameqO|k5;f{`-0=daZKjBv`()k= z_)+dF3`_XWS9&<@KhJ3LDkF)j3`Wj##vR4=r%@iy?dARhXL;~oKMx%m;GskPJbbX9 zM-H9kkppLVB&UZNMco)*JHyE{1DqZ9OcWt~;ak?(lg!b^WdRn{jk`ZN#r;YF))*A` zivkGuPsUF|*?iRk?6bw$Z;Nx#9_O$v%3*7W1G*9Rly#B4|6SH^>16G?CbF{XS+mCR z_a9aKDlg=|XB@r?cLKJ0WS{`gOgTPKKFcEn75ELVAMfv_>(x%DNE9H(eMJ%OF9>k& zp%FeFNHP?=%t-PoL$QnW2a_CV6;lrVJal-7hjT}GByW^Q^GBIhFv?^3BRp0x#N&Ab zJf7Rf;|EVOEvJi;7NrRNBVHnj3>U9lXDED)x!M@>YZEN0Q`$}eUu6FZy%IgE4O88( z6hOq!xchgQZ;BsVoI|!axsEt__9%Jg00*@F>@N6}ExX=i{iasdWi^nswm!XO%inQt zaCcr{**yg@bqZlhoJ?I+S5GPn6M2&Dqd72IE&`0fr;zC_CZf!NWs5lsC%5`5qq0_b{!%%j1P!9xL?lc%g?U3f(+e zILcE6!#tTcz*C3M@Wg>`4!KUzb9RUkUx@VBC9Yii3w7^|F<%>JVQqrNb%_c0uhc8> z759r{2i5(iI9tqdwwmLr`{V4k#@Q$0#~SB=T@)Zjo+C!SJw|~oOuo^>0nI6P9(<3@ zTaU4RLlf(>>d9K;`nSIY_vDA%zb;Bn03B}^Zv;N2hrel3VH~{D*H7*3Q}R6LezA%h zEYWJJ-Ohb^etvggkUyP{&=LLZM6`FX4;z~e=J zo-FqBRFRLTioHBt!`{QJr<7_j>*FFQ7AMCVlLa_zR|-(zh*4;d zP-G2|S2svb@fU30^ER6{x3OVeJsYxI|N36=FL}WkzbT4KfD>S+0KWO(CR{!aUwvpY zev}{_KjC^6`~yP82_7hl@jy|O`wK$cpX+8(?I7LW1Ou_lGXH(4%goTYc{tz8BZYpZ z6$f~{ILH$vA)YJ=@pP%8=_Ns?mjswz>|=VdmuHLJ%CtklAkz<@;^n+9zU&#o6Nr#G ze}${p{z6`d^1x+jU3|j*Q?q~lg!@JItL`_)757`>>=p23{>M0Ii;-&=?vGRGh*9i_ zQR0YDWb%?*b(+10-)HN#V{F>c#3oUMf2||>O=0->Z%dLlLY)Fyz`eoY>QV9d)2eYv z_*0)=iIYsJBoB&Xh2l65mWZ3IFc0MWx$oc*rALS9@+29IUB;WfM$4xu9x3qhXiJGE_BCw z8FB|mW-fB&>R)*Cn+q(djk2sR#tMCcRr&<0^$FIh?%!Zk-EX?l{UUnyi1<djwmJ07^RLVWwszibweC1{(_x*-XwcdE1TEVvw5BKy_>nq>+a(4t-mXd%aM

    X5(mq(di5#!m4IL}qac&;+a^FkF-UZ@E3LRpX(%KW@g;^BqjQC=z<@b~TaKuL@4H>!QV={wwSRopLHPvQQl@gv+{ z>Woq5j8WkTQ)cu~P}Re}Lw{!b)=svpZ)D3l7vH%V-&-7AcCYA6%F>Fy1FkX#s5~4Q z2Q5EOkp!ReoE>FI9&KCyLP?C){RYMud8RvceSMVl_cJ~uPmj4%uHF8 z(Z9cz3c7sqG2!Hca#dntVf+D#RFa290ZL*#R21T&0@2}&aPPj;e00h!Bgmb+LhQnI zrWFNvqBJZ=3a3}ZUanq%=p2Ge4!u_HEb`d=>${YfI42>&Fl_N}rX_Uh9Z`im09d=~5v2}et**C|~ zy~W{MA1F(5e|biZ`6;XNSpls4Ek2Krf=t!;VAKTkDUwgFD$+bEAEG3Aq%_VWB~czO z4)aia%<%!~u%zF_z&q(Jdd0v1Q$IDeoUJ~c~)k$VZC74+i zXQn2~%!)8GD+0_c^U6W18HN44aJUP1AWAZGNfzMBm2X*I6Jcdtlr{P|YxN1%8{%v< zOmV+j00DoOS#`f9#(vRyT4PG@Z&S11exv&}&L~yR7}c%_RaPIRHN70peV^UikFtG3 zBinC|p$Cei%N{IGDL^YS65<2pVjY(<{x=R<%wUd#9v=&w0ADc)UOu@J&ur!6!z7QE zC3vJX#v>&WWpcUDFK?{w&*|Z<&j!UjSIfQ3Dj8*FVL$6?dkI96jGe#ArOV%v%v@!8 zO^8*xC~I{w34en@xIeDAUqp{dZ97fk#8YvFE4Gjevn!Ol(1Y+qN;wzZDQF~@ZRBLPoH0G{%< zm_Spci~^2-7B0V8VpK#tFjpR$Cwa6y!L%~*ASJ>ywEz$04)Z`x4~>61%cs3QdGruJ z|1JAF(oC<2%3)ej00q2wxFX;S*9-V5Ua3jRL$+DfF=kaqnOzlOR%L)$6<%H~9p#n6 zJ`TA0i6+l+?&5X%4d@xU$VzRPHFZ(e>0@lr$0htt#yDGyakh>}k4bUAfG-O$4&N3h z&lZ=_Bit`HUxz3_luAdGDp!;mSCm>IXNVf}D5aGrIkf)|?B3GGj`j8I%yK#=3h;16 zvg_f>3=e5C$W=&ytyKTM0lYGQDW#4B-UvJ`pV7+CmC9$K$Has4@&u2U$Cy?YWm-vy zX<~LiZ-fW;pC-TMG#~Vga>kz`a_$<OeljPN! zB(r1z)X_psnAw#9=2Uo@T{6PVf?ke&93*w_3g<6f=fb7yyzzOO)wN;P=tKdcY&68# zWKiHwxZf0$@W0=QBc^%h0A@!ypH?l@$rDe%RBlmb1i&gzJf z5%JtM!4nlR9xsdVSV@S-i~T%aFe(qy7HWF<^EZQhJru&5yhI> zci}2UZ4p-2g~_UmvR)sP@W-QPt5LW=#!hpL-R2m3%;LO1#(qnTgBCSi1ba3%v3qQFy zX)H?q=_pVt?GIq3BD%D9C5~9lVDy=jCs{j z=4nFAs|+x=%+0LAK3@MKM&{x*&Z+R-!Sk%D39zO%%sO3^jr!Px^T**U?wMVzO~qdX;A%+erF z6nS|(PrN>KicKc*Eg0ad0YC28c~Tdzqx<8S?DJ<-C74;A;MMAc0$-bCt~SZMn)oFA zsxb33LFS3U+R|ZM??xEAbe+tlZ^>M^Mk;d=-D_c1YlCFfMOd$kvdN$nK)~N-RHMff zW49^BUUN)oJEi+$nxS0|+Riedwsx-)xMP8mN9O3c9XL&T|Bu($0;lu7Bx<|tJ zk{3x|x{mY1bIh!Y@`@}#oY~U#aptP<=T}FWUman7Rgn1=KIWECVk`+}x)@nnnuZyrjxZejj2d(xr#kqgt;I>ZoZK!8YmZSQX$E(xFpQyus1xv=Q4CJu{0J6T1gk5I#pXp<*GqZ92OQpcjGPVi z?9FlZJ`)C5?k4Y9E<%w~ObO7w^uE@ji*E8v@>956?v^AB615`MlVN}*bS z(kbq*vPTv8jxcr3Fgm9wKm?--U%21y3}JWpu^0!@mVHjq-q$&_shJ$nicYUhrKi_s zn66b-bFBY1W3njz_&L1M z45OhW!~Pfpo)G;bem?B-vZ-#66`E02SNo>GkC1JMO&&GK+!qCis_s{FU%Ed^o;6CL z8a*QWh5IXPYV_D5)Yv1`IilzsQS^=wMpqcKGmOO znG#YC&|cO=d8snOOBEqrEcf$bsh1Z^+`L#g%=3ADJeS+Uvj{&!?m;g-LIZBP`$su7Fv`a#hbe9DXK`si%PU7D{48yN^|c`xJzMlq zwi#n=H>!8L6S=RzSKL1dzrZpXJ*Bn?<+ccw_6SvW1%9o_en%L+h#yB7lOrVITZQwT zL0qmN^-e!_(-8W~uPNLAHo03`*uT!fEzi|ukgA=OD8O^I3UoyZ_~%tk{VZKRWtHHy z%9|)JYE|%BWiVF07$M(GOv;{qMpaC9H#0P0W>yB7QSRgAQV%beDo4pL754K|-Wgsv z+{259x>#S^LsiFVTK>?>dtdkQSb(0DBBI9-&fCiWXl7})Fgik* zos$J{3i$N_SpcVH7^CJSEXdO}MeuIb}hlrjK z7Dou1>V8oG@eOEj`f*uDFjaR^e&8MQvs*Z{&dDt==`y^i8 zv&!AfE*)i7$q2JbhL~G2$n2tisX0Y`%q{L?UU5GQO9oh6HpJ435tdiFSy|;#;A;b{ zs|`x`Z`Oy|Dx${_VTUowE`#WJqU<$AC49N@T9l1fX*osoM3fsYYm_pp0>9E0Ry&_4 zTABTkiRckBJAzmpLF|qoPN#6bqDHY=MzGX$QF-tk3bI={EWQCR=`-n<^kclN6Urb8 zA|yZ8DSBD1Xn>l4}=9Y~zuXKd@Wy36x8e&1|FpJ7ZSW-U9 zvdU4GYdoyfcvxNSWv$lFI&F}RwLv!PLS*a1Y|}^Bu8*+O5Mj45!d_#TeWu97&DKG) z;{K^Er*ywnalZmzbUkYH*dyrdO7<(!6UO2QViN^$25>n7G`IpZy8JXb12kAhu-0^= zIrI*NTU$ApWl!HSL!b7{&}X<=FN;+tl;&k!TCM`{#Hvcbo~fIZggzkwT{vEV7r63?Pb1OW|Eq61od{k;2{=%|h7L^UNxO|u; zWy36!&L3rErJL1N9hy$iD zhs+VFTyumxb40eBO7tl1uMoL!7yW*86291Y?E=1VzKEV6R(lYKQ%K4EMpuAlA*YWf z>j?ImE~*Z_L(x|84Y0d!nQ6#$%`{|~sUKseVT_r2@j1gxox18XyfO~=Z>cWBtGXM( z&l0LlFk46)WmZju#riOg560*ZUnM3+&Cg%|E5*)VCw@Uu;-aGD#cxT8BL&&-D}8_F z@?YdVfVks5cja68o`BGWtAF95(6zsCN!8_Rf0093Vj@E7ijeYT?CRxjxpw7S{GrQy z-FtzWrik2li)|4~g!5(WD4VWue~mp%tu0KQbbbVbD1beL#U7H;Bi!$B_;EQz?gymv zMFCo!ewr-9*sD)cbMS47vRgQuWq<#cSB&Z7uNudA)sW#;!x-Z8D~1fO=rg=3m7ZL6 zX=cg4Z;~!0|9zG&&1_v#QEi-A((xkS!<4-iV<>hNU*;NaG3^|?$Ux*g{hEUSckCPFV1@S@~nri`aFCs)bHWs zfS0ZTFWo|eUQP{qIX&d%jL@)`vm-t^A>B9XV?b!c$KbG6(V&;1AumIN9)<@zj1Kzn zj|Pc{l3dJO;`-(5y#D7jW!4atwvZYUH=lOCd!Ux^My!&-OfBL$gcfabB z>z}?BYaj1@)5rVY^zp&TeyI<;`sGd7$6ft=(lx**-Ti#lBQzl24fyi(fP6pTYx$1A z04L83aE78;L^3!4W(qbCG zR`CTjId4$BsgZ&#NA@jqE$L}*3Y>9%kkDFuOp} zoPuuV7IZVOpqu%HV(nsHei!rex>#7y#lrk978P`{xS)%r1zjvJ?3P+l*u_erf|IN* z>S9e{7gySy>@58TOLI59UHy0m{hW)Rn;%g#!zFFVTysa`gf^|HDA zEL+NZ*;0On?D8{gE$?Mp`5CsConl8>H(QIorlI{5qkSIIkuk2FzeXy46^+GDjV(Z( zEjW?;CVNmiUvxY+(e>DaIPC%I?Mn2>+;;|OlkN}D>GUZD(Dh<1{+O!WM=9P|&!O?r zl=j42iw6D%+!SfGnBzb?XjQ2>pHnMG&$ zbhD@IE8P8F(xD93&RygBrR!8#0%)y4SpeB`s_>=r z?I8ueBS^jIdPM9vg3|e|P9N>gfOP*6Q2^U0Ep?|c7rcjNXD1~aU0oAX5sR#8=OXJE zlgcQvs4|Z$&0=$UVij6qO0(FIWRW4kB7Kwvbzv6N_?c5N!Yf6m_~`T~C*28#6IW!5 zIbSo%qG}I|s=X|(_Oi4_(XtvJ%c^}Wuko=$>tm(X$Eq41t82tV6h&)m#Cg7#bv0hr zYdx&5@vu?rVN;EVEm{xRS`S;b9=6qb*k0>lXRU`FS~t6eYTfLq9c8a}gndFaL+q^{ zWMB0FIaU4atvJp0qOW8GWx``z&0OXBh3izCebid~=xqTEwsGex@P+%GV&fIjqvn2# zGf0agK)X|ko+BVy%ec}~ zODq|dh}E2CsgNnfQd5#8#yE@h5f;^jSf~{@Qp3Db*u(qX!+bpwVwN62^RY|kV^^JzJ#{|zN_p9*^O95NA*a^O{@PIv)Q+&fW|#vtgB+;t zC#Ui>JBz*|I2z`B>;hNEuF4pyHhHPD`Z3x9GI}g_akHgvx)Q#Ko&ZgbAkCrxjsR^A zHTPY9jyZ+Abee}~toj47WQ!BlL5XM2r(GFC=0OF7GPOzfE9H?R@Mbs zr339Z?S&Y~1Rz_-~#*lgn+kJxZU?knJx+!rmU0{^Il@8_7^O^30c`ijrd z<-A74mPU$iet&qGeeAtu_A!>(MFEsm{%y;!%%-f%ZE2QU6|Im;vC@)ag(b;yQ;g+? zC`)xg7HfSh&5R2?)0Ib;fvYYa-|83N=R zeB|pr`(Fm_cjuptt%l*#gQTo4V=BJ6%x# znfnTS+4VRBbh-i({!y1tx?dEaU3Ug&@ds#kAEj)A^Zgs(i)*#z>l%qkzu(# z!wN@+6}B`h#AhL!^7l&dZ`L%cq*AQ1Bw1;Sv%(lQVN$fy z6k(Ss!fsQ9J*Kd7qA3s6A{-PlDd#$e&0%s)A@VF?@=YNMEg=ex0SXO%iVR+g^ltKN zhse|RkyGA5AMxF68iX%rZo)wi{dipQzc^~t+lanv9NuHN|iJnIE~bCgY% zIGfCIHe2Fsu_nm2#NtvfY$7TXqG0v%@d%cEzSEqsQUrh~3LkhgTNhHK&)OrU9BXUtv7( zHkvKXRNWjut#SDtuFy7XLjD$m z&`xWdU8=+@LVK+-`I3ZyFJF?gDsQSBvc}|7EIB-7Q_l1AEfETZ%whF#Ekvm~K&i=3 zvB5*3ZiE7DKaHIuT+CeIa^@PBQdby@pX2K%?bK1Z#q|mKbY{5mxB~tgQ30yn2Mi<-IH^J;|{Tdu2z|JsQQE zx=1j69ml&FwpgNUwZzzFjZ5&`t;!E!>=ZwSVN0;vroNkHQ(lf6e?w)zfUmwLq`;5L zq1ik!IBSi_;VJPjML1s^tV#Hm0HtO>B}Na$h7k(219ZObB^|%arLk-B8<2^e$1@Vc z<{H6Za$~l5vD$nJe7m0phhMqbvioUq_-J$ZXt(?55Y89TDDc4sFQ8=L<1pUKa1W!!6BV`RC;SnE`j<;*B@q{(t*SSOWYog>Y9A$yW_wgl@e zF|y1N)*6GX(fL_b>t?xTfW@UfY%_NA$1l&yGoT)K6mLTLCLI6#99u0>wuz{*#-$YS zij*J3h_lZ={$`?hKTCN-<&ZtbVey)f1Rs?HvqEx$!KNH3ms-P=T7#6DgOr$jl$yMh z7)Hp~_VcHYB3wFuO~OByyhJK8Ml2Agxpf4caRigajm6@{VfW#(`=}T2?S7hs^JVUf zn=L;b4soZez!%OxZujz&IB< z65y2rtaoNur>+~E=?QIgrldC56Kt@?SZ9u~&Lmn&KWlX!R%^w>lCvx+?V+f#hd+FI zmiN1dI5`}WN6fMFS8>O#lGh^ON7-eKvfCD$bi6%Aj$L^lSLmSlX#nAR@q(NHFJ6*U z!HbtA3T+XJ>=BAp@Fl|aCLtdsMlU7$5nP?(bo3mTFJ9-u*j2`omxzbbguM~|@CP?_ zh9L~5Q7l#ucAF2U-A}#UC*3a(*`{o|;*_(~;p2#X()q91J-lXj^SX7Iqq@^H6n}(% z&vB}@{2jlkiwt3MEt@v-amZ--D^qqSR(AT zMcHSIkYkIo-yY?FEy_W=03YSBT~V$rLY_S$-CtmfNEM36v4$wND3MbpGhevgP5lu+ zgYFAlzWgm0FJ9+d<_gK!IU@czes_oyuMJaOH-O$ag4sNZ)#AZvQ`|3HFVyVtN%zZ} zEdgHLbSH9O$|K$XnrVQJ>Tj?e{sY<_?f$FB{pvFr`?88$X#*eF(2o5oj{kOF^; zQz)g#kzlhm#wKfojph*RO##;Fy{y-{S*smpRb?+L%DUNQ?Bdu5z48cJ994WX9AY4p z#xILW37#IKOpB2ou!5f0fR9JWQswS~#EM#!^<$rlnWrZq&7B}lO) zK(X0JvB^VSgOB$NHkRe3j?Yq~HUKP4mSPu~p42Qw#!!}8q#u|Gu4T_Jhl8lKn{z8<{D@sBgOexF2l zB!;G0w3$J2%>nYve)3E{w9O&R9bsC3ALH$hQhe1rM&`nG`DpUOm2Vllc#YK9Wn%Gj z1Vaft?hqqGK0f$pn991-XzKc?(+^@4(PNnc-zMDeQ%*RA^XmaQ=khqq}e7^pJ7{lnr*HW+nq_a zIpS=!N7-r%lWh*N#pGkN!OLd-D4S}BSYOr0+KN-GF6&~Sxtrr3oS7`dsZqWf2yogR zWiTX)ah_26647&4ilKmV;2c0#t4Ly zxP1|ZM*Iv8dieO$AuO&llvSOkx~>nMaR{Sn1hZ)ro7IEE=A+&wIv%9}H^LV!r;no| z_w8OzIDNb>HeIKOHyj?`u#IrSaF&*e&#>%&1MRl<|26lIU)hbBW7&-vgtj)0v9&?0 zX|^?H*xs0CyDUJ4?e%GPG^E*4pJInA$xdg2osKx$Z4tKFLTt4J*=q8$)#xSL;9-ky zlugrkiuHqC~WnJVLyKudEns-GZzV740u0cLKJ<8VuK6-{j^m?KU1QHB~Q+T3d zc;jRE6X)c!VEIfqagKnP6OCo?MN@b}aYh0WhP*-fNV0Fp!`XpRKKNt+lj9Ucl|7W# z^irc6KxY`1(IawSz<1iba?@=T*)RHiyV`C#M7ybWJX1Ga0p0244X1}U9izM<++TAN zSN@+c?m0Sn>iNHVZD~p0y1glr-qw^sN`hD6H)Yt-m}W;~hMf&UX?8WF+1-$0cYTsw zt^~WBF?Knk?68H|VGXj~9ALZ2%QmBjZ3Z`6b;E4Y_OrRFmra#DtS|3ohqjy2h927f zc$#-U>y?it#Usg&y9fF7)DT~u8R6?bHz)f&awhcDppR3-eom{;g-;E*rA`lwa=L$* zlc$II(Q;(Xa|j>nF0 z{?uOIA;5ciQ^?`rO`FL6GqjX_f@S}kZo}?l-+kBn_g*_1)5|36<_tTU((0OFr%+>> zU8;6Brr9IZkYaB`iaqs7_SDDO?ToR<5oNa{!Y*5g-If45%|3RRJnS@j*kKr9yKabW zwf$sQpC!BMG@C1W*i_!drqXZ7vvg70(u3{TDUQB-hBrU#<&U2!bD`ol}jWf79Popt+bFiX|TunEHRi`P@o~5F$k81rOb;cp|rs0Xqw^=;6 ztfJRf;42%h(r$|AksXh7%BCx$M~xlfd?DK?ubcbnsQwyf?jJGi?)>*f&&}8N#&q>0 z_%yquGVE^7$j=k-8`JD-O0%ya$-agZ`|1#$HF5z4kDBtU>lz{p>OO z*lqT($K+wRVU*qaVRq?;*jYQkj+#DpRQIx@>NMLcd)Qvt&Gw3Jc9fr#+9_0al3nFr zv!~)Dd&SQIRGeggWfuoEJ>*uMBER|!g*CmD)b>%SAE3%OM2%rszA$GnkIEY@tHq7O z=2bFZ95KjtbKLoU+Qh>Y0bV^|JEk76$qjcRc09c0aPy{hh@-XLG!=hz2{kLqY0i+-oZ^5`lTea_jfn{z zYKU>rB^2eLBdjXOL0gamRzC-W9eJ4^m(lq{uKtv0<1}(+H);Vag1{RGNm-7>B7gkH|OG>&zqQt!_+K4;H%*n>gQb z_;5LVG>EZ5b&9Q3PO%B_lh1RHh%Z7CTD_psMbQ=!+ z2Xp^#UHjV8zuMQLfX`{okRw*1<}~}8(;R3~)`Kl+4mPJa)RdGuEYz6fP(y;ljd5}t zV&v9G$#q2(IYZ<*go5PS1LRr#S-s?0JmgwD6qwx4eHyW!Ojd_Ft=Li`0YRf2E>nL?rH#(aKqt%1S?!oHtVHdOd4s~j|UKyTksSncX z3MxZXa+JhJr)xZV#L@C(=D+R|o33!bD1gZQ0gh_BXe@dk$G$iIF+X2Fe(i5bFUx67 zvtOts!vR$XThmg9TGAYD&2YFSrKmYcZc~!n<|KJdN%ET$5o85mXhR(C(+m?xV=&q1fi7#O9$?$m*ut>Y>8wq0;6-WA#uiUIn##s13UIJZm2kX(-zwmz$!!@| zN-D28MSe?)f@Y-vMJ*|c#M&IEs3}HKW0c~Ch$2^*5~&a+&LCyZAf=7~<&FRq&H&|3 zKNWUA8i$`Mho5SP53SRW*5RYh;X~)}qjv-_Is=%Tek=lfeE_>FfWsBQRUf3GK1gFj zh^B@h&BFN&K?%RT-cM(}pCk2tj@J7)>hf~jr8r*{U?TTj9tr<#`w%Der)eqw6j$ya z{`yCl{*l+A)^xUjf4DuPs4Y#dP-~hzRr#$cMJ-7RT2d6YBquYm8Ff7@?v;C`@HTn2P!kmGvPst`JrAL8|LR)YJ#jx`NcYg6LcU^z{J@^+C+_ zK}_{QEcGF*t^l@%5RQf*u7(hzopjA;}Ym&m&N%#W1 zP)Tco(v}2etqIDS6O=c{sZdqf6r-{sN@ZgNO;Z$2W0>m3Fg1-~w2fhE8^hEIHH6VO z1Ti**Fg1oSH-xb?gt0b+ur)?-G=yx(>Ccf0f^!a_62?h zyOiSoqf_9YXz+1DM32kM8`AY&-l`Yz!O;}K*`zwZF+^jN>V647NSlxV-x#2?A)q+F z!9NMV-p6b8KHjMJ@n*e`x8$Zf%A59KP8iMz`0kdf&;Bv3=D+tkbS(Y7qK;&BQAZk~ zl8&?lU(%7Hv^_~9z zj)3n~;d_;KvtHcox)u1wGjvpaQSGSe`p><|`p>+I+Y`%5I#TY^jud6%@H`N3gWUu(w1NH7m|{HHWEh4$+{h zu{lUnb5IsQz!z$73ewT&=SWk4qm6!!H3k&tHz=_qz`x$0z<mHq5w%W9Z9M>l2ogzX-}fnV*2~#iB99H40&Tk3P+7hI_SxC4)FbQ8czrn}x zMn5MS{Yu9poL}$eb;mHrO}*|8!|A{MThISpE>Q?gXVP8ODWIpQ=}bxZ+Kwc(9ZBli z6H>a41iJP(235xP7^e0(rnV>+0lzJVwJnORHHy70hI1T#YlH^10F5nSnp(rOw1#MH z4Jpwh;I{F0SW%KMxWH{4dR*Z2*<4h=_AIo|Nr#%|H!MVGrdgP znRIJAQ`AZ&sq0Ll>kx`#5R&j!@Evi??US;~0>rSlMRByL@Y^CZv_)xXjnE|25}`#D zKsdiOMB4=XAe}7%jx+~2+8p4xD1e0T=S01S*IaIn+lJi7%>CKN9RI|d?0?3k?M%<8 z?Mxik9Z90^Okg;Y#CRkr3t*IrVHWV!)!Grm)~?9j9>dul#n~32zCA*HTZE>zD9vpV zMXh0)h5K7Vw2A0x4bssPl;DrF1UT9f;FxfJvv9xIc-dz7}eFzu~jI@&@~B6>QS zy`IiSx3jZ;c-jAj{&!rCW9jcXj>V_hj>NJZ9pP$cXY_b|XY_qnd(6|&5luIBMrrDZ z(cBTGr6WpPM|5$1J&)#{_Kth_q2az;Qt@cfolgUXi^~n0000< KMNUMnLSTZeWTq?t literal 0 HcmV?d00001 diff --git a/semantic-kernel/media/agentSKdocs2.png b/semantic-kernel/media/agentSKdocs2.png new file mode 100644 index 0000000000000000000000000000000000000000..1d897fc6773b3d562e82b8417d301cab4169f21f GIT binary patch literal 17970 zcmV);K!(4GP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>DMaM}*K~#8N?Y(Dm zV^@~0ts&G9Y6v}q8bZ&Y_3w-Lx~r5WrKFPg>FGcN?+EWLy#YN)f&@r{@RlS9!h6Ha zRKN^rXI{gKzyjh%twR{vkwi7xsN$!9_o3t znEpr$!G{`5{eB)(zgJ`OfeI5JC^7y!1;&0$$U|ell`Eh9|C09~|9`??|MJiOzw)5a zr+=$_F!Ot@Z|*}KL=O!Re`<#Gb1P(@+oAZ%3DwtbsJ`(+{p|uYKP*A}!xC42xKS@C72$q!Sc)p`}i(glLv53`Qe#9f_M5D-q{l@ z&IYhJcZ!AC6D-UH;GH>wd-@Pg!2ukTd$5gd!~ARm`p2u#{NRP^TNmVC*&z9xgy=By zkUC3=soyCu`GJh%cT&g1Kgj;}mp|wKszdpK@>ik{G!LW?^*QBd7HGb9!T7@xtWP%J z8ry+q%8#Yl09J%StcuUDE)8Q-7J*L@!=9g3`>>Arpntpq?GGL(zqUjAxe20Ab(njo#`NzM5Im4U@PQ0d zzmw$zA4uN+<%^h3cosmLroF^T?PSt7RUMwharn&LypyeA%}n=&*mq_ zJOXdXu}U&HvN$&7aBR-u*hrFyH2j7%_H{|@YNFUuonuXMg2kDAxW+eOe!K|nw+<*i zGeYvI2EvaNnEkyRGY_OhZsvcY2<4}SUmHHRI4s||;CZ};m9bs;W&_xhoa0at!Ld4y z6HO8)x)e_JX`JXYI5lKhWzg7=#i^do(Kv@wV-BaR=HGBa=S(DpBS2$Q4yR_mT8KOX zB*c`#u`$g!G*FjCv86b}s^|!w$t~EPEkpOc1IjOqkbXK3@ka^>e=o({1BO!g!T*qp zwf+ms-&wwPH9e13ur{%a?U`fvB^NkWL~*K4;#8ADP?JJPmqw5$L;`eaCgaqQLD0a@ zbr}TdF?|+6eU1?_WDzvv5HjWw97&80QXv!gI=CR}vmd191@VV6h?odTPWac{4Rn#Ue(jv_{IrDC@g3|6 z12~di;6xEcNS(l$CWSL?8s~&AgEL*45z@aUNRSkL=1rx*L;5Vv3|X8}HPSeTGb731 zAR$3|o+*bja}FVMmen&0M~JF5hY-)1HH$M_79qlt!KpckfH97J^%*v$hgh84gzb?V z+OJJeemW27M+%64FNNrV1mXwM3HopS<$wQw>*4su@oW3HuH4eoHEc~E;!tviQ$+-4 z^Ko3vCvl-o<3g9lg*L^;VO<6n`V>2Nu1j&~x$&8v;N+a?GdSnDAPhtn7sedUjai%< zGB`J8aBj+SbHdDVVb0;)lI7vX7uGD!tyx^yay&H7;=-QAxh;(|YYNBu2o6+1Y>4*Y z9b19<2Rk%hXrcI60qKWQNIw)q`g?KiuVzbfeB=0=g`XChzKLxd2#;|hJIA>?im)bu zuqK5Fp-tm*gpLGoaO2clNXiH;&C@&-JXbFbSzH>k2pcmD5)(FNabe0LY$7>Xgv}fm zR4p{la9Faqv}O^uW^ifC;?kDErJcy~q!G5K5HiOR(1x)mKf>ztCY+Dm(0`?e`V%GO z4`q<^5+onY{LL@(_%A&?KY0Fb_30}1ruPt#1aYniBRn6)r6!K3E{Uivg{U@-sD`VY zj1(gJ6e2o)OjVb{r7p>n!lnML6q-l$X+#YfzNQf|q;Y8=85u-a<>y9@h&h8xQw9;j zlEI~!&ZXHnV$C38%OFbFvxwR=h&r-}IF{BSffA`B={!5PK9~U3^#x`*{dx)U?3}MwJqVq9CHF3nWaYVHV#I#Ao zwJF3kDXwZ$i0OFrskf4N2@zeAL0>x>)A=YpSD!-6kYuyavI8W-jmPPAv(mF#1Ad>OVMEYN+f z#{9=JC_fZK`FjzRzn^>X%Y6Q#$Lf>i2is#loCuF`p}0U~KElF}YvYKKfO)D3#F>Bu z651qB3JF~baeWGL1Jxwrx+D@*wFx#RqFNG@M2rcd%72YMjkqDr#xVmYk%$}99Htav zMk0-vDa|AhaSI29A7{`wg@i4GxIK-yBaH;%%pl>&u*%Lk>9Hgt);L1?Gx!yLY)q}d z{m2I67xU13B7^!v5tJVa|LY{IJz4p?y~%ByiUYV*ULZCfMO;JS#}L!RkkG}E&=4^s zwftC{K#~M#co`JBF2#ap=n_06fW%SvkdOrb9D42u{%c5@A&Hnifdt7krjRi5)s$i~ zh=e80h?`SLvYJG~nnK*d)r2jHq& zlJhi=vvVYa8^@8ifp=tK;dNTv!@FLUoSR{3X&_B7(G* z1Voq&3OucOhxrK7>IgFPF=RA6+Bk=fuZ$SdeC5U?qR42X$gn?SBs+?fHi9Ib8=;FK zttTNdqzo~n31b`?6Olm1oIu8sK$a(Cjw5G@BTHBl$k`Ie+7rk*63C6JGl9G-$=4L} zo+R>af@DRIu$<#eeSqERWh_22!SaP1hEIghemD#5hch|l2NVB!Z`Mv{b|xZ{08*-R zq~|Y@)kcxkP_PkXNCJ)7Jc=wqp|jxsv}T!rD6+a3vf3E3da7|Y&+*}BDR2h&eHrRY zCY`!2p$;QK9jLxQVm{1BYQjkANJIo_eH3X!6d7X-SyK#IbDSskCUmNKE7chCwgmFF zIES6bK_qbHyVHwU_|X8%=Q0>R7DE4U7P^mSCVrXY zzw0;=_}&kT{YWa$kev@Bt04gtX5?*U;a{?#v-6k8QKi6Dm&mH8z_lxmYd6OYt5M_~mq?gS5mM}6XUcyL)f9#?hn(+NP zEIP;~6+vX@FOb(xeN|F%imdt^ z`S}avG@OJhO&C}6VO&v=^AtQUKz+&0RTnILnqR4SQEI9rfkNkGaFxD>B!!VxT_B^p zKuU3jm@J5h_8f()HcH*sxNX0}bz_K3sg3we4VSq)T%>Mrk-o-d`UdfQ5!u@cinSIR zogov^Z{Op}8Ask2!4+X5NI(?Vge`^}YYYWT6a_nph~maZ<0uM_7z$1z&Q(_ow}ksG z1$P8Du1n3@+|lZ zK6K&&*P1Y{=Xn_#Zna;{U*Jm3K@zT27r3HIkCBY|2(C2|_H|UR=K1i|qwvp>lm`)` zS6a&{6siLh+OKide2H|qi^x?Kq2vt$(F~3*Q#iax;OHXG2!s=CvFI3n4gDnt-fTz-@GA=8xqB2og=3>Lz==D z2M}>xvelk%yhOHskHlRE7r7EnE_2vh2x41vfX!JSHeRe_{s2p75+7QOIF^n7YC2lPd6f6jJ z8`vCQ!{+P;4wg?4yi6dP&cBf`9K6PjGm0DI1#V22C|V*YS~*BU(H23`9>tv_iV}^f z+9SB*B~bWx?g;K&mncznU$S}0bIG`KU!dR!B5OQANVNyrgXZdURtu^#+^WuTLnzNtP@QvgC3lXbkQ^q9 zwkHZ-8A4hXL`-sw(9AAksR~k+Au{!sNLTuZ-_~*BK7)U93#WozoX+gyZ1w=bxqXC$ z`v}hN;bd+X$1^)PnBKzn^TjvG@Lw-%BRM$vkS zJ8Oi6erLPHo&A!HOH?Uz$7sw$0$i6UyTT~DE>L!dQSydS_FUl3eU2MP0153Dj%Qq0 z`$-MwXS1+;JPFgo$>yK+-XJ0Kzn@c{;CeoU0$l{D3)rgX!)JtWqaq0uFc&-vUww{S zWeB%QPQ3tmY);SNy5UN7hP*t8jO-LiX#nB5eVo|O5G{6)s@`(~N*$c9#t;zf zAT+y&i`fH&g??NTB0n34MF%(&`EfS4kKo)c0yESRo7fy*#h&E==dlzrH$^nt1Khv* zBdYs(+!)VMAPMFR+?h#07$p*6yF}SW0xntTWd}!zujMzvb5z_U-~tu*IV#=@ls)Gt zxr4Z}_z_XAVsFfd#cw6Be=30G;|Ulaj{WOhkdYtduGOcwApsf+nd6p%SA|ee6G0SI zLENfBOaMm+MaJ112}KHA!3WQzoS~pR!;Rt$*NPCbBtRNKOn8XE#1_sXw}_ScNY`lZ zA0l>?!>M2g;n{segnq`tx0Cs@IGIDvejgm(8HFJJu;<+UX4^da1t&QY;kpgh8M@h0@L z{Q?!+1uAwDaDj^R0#)Y)YL0VMUFWE{xLR|cq3St9ZG<<(BowR%h^bd_Fk!^fw_-T| zFa;|~_~`jR|B5Ih_d9RqPr2~tgD4W}5vM4sX@v)It31W6>J&u=h0aO1W1$CGy_56M zn52*hMGyr!7ygy(6lqBS385dSbYWe*z*)YIWMzn}<}2ia1;pnLkQ5yvBR)b-a*S(f z05`G#ZsaGp8X*fXDQVdeQW8I6;(c6-_i@gy3qEX*ui$7gfK;x4daIB7mw!YnQo^l1 zh&xjVWy?7#W)g9Zij9ZDx1XbGKS#}W&V^1m&rx%7!B<^psJla`xk9LUf~a{ysC$E` zErd|>o}%nJ!j;j7bCDCiCrWs}5WxP)7_1+^@clA(cI1@)UtcMYa65m3;yi^t;)Dsf z;{(4_@}VnEajQ7Nor)f#km)f_MoDqX!lqhcl|-DPC?^3F{t2#V^@C{m&$U%o^}Vx%r$C_E{69%V_50oR(Fb$A&8nOh>H0PRZ9p}>lrGxGgRznsM@(& zvxiV~5ND`4L#R2=P;(I>)LlW;-9gknK{Pz4Xn0RiUkIYHaEh8IfTC>|3H1{E6FMw? zGYjXZW3YYv0{`U2IH&Z#Ur+^5)C6%if5M^WQE|1TL6QMle!sY`hD^5`6>z$m! zr|~H&^c?vKO7asFR^FIKU4mSqC6wR@C0uaQ;qpx!liFkaH5UazB!ieMBUCxD;*UNU(<8xius**QnIH>^gAY zdx@g<2zQ24R7^qC%|X;GA=IoP)NCYzYLE-x9zxw2Lft_%h?+BqhAV`Ii|Q$w?o%|} zCun+3(Da_7xp0C8;XOvhd4RlO9icf3Hhz-8^Z5iEpS*zW;gf%SF}_kA`iknnJHZEV zry@>JQd7tQlxR#clmXmP$RtD&U?G$V62Mmdh#)6KeuA>(oB!Rhn{b|w~)&flWi=wUGY882V{jDq$UCBrGI##1!Rr>GN_AnGHxEXxOQqG64<8DGR>Nf)6qQEhZqqNmw{RYaxK7Xt>jrkm7m+WNQEv^HfcyKO zajQK-MHfKL7(mT@!UQy|r>GMYJV6rdCtT=EzzOQk0Gh57G%55Eo)fe@0Tw*RF^AWW zvSSxn?Ggf0I;?#+3-=$!;Q06{93Oo1b{i_l{O{kYj&P?wMrr;SC8{b;0R4xR-_!sq zN|F&kSwX>!2%xG6@L>l~k-w!%C`m>DW!VvKC5N~YAF%BwDI@`#*nPGL-*Ftr$s#h< z0TWQvo-$cw*)eL00O}+_eTs(ilm%Z`ouWQ}$^=lQpZ896j9bMauB8V^OZSizZR31) z9Y@M76iRipyDyo5LFXQ~8b2y}PC&zQil+4x4a*6W(6CZHLERQW!+wIMJ%ENIfQIt~ z4H7`1I|FFB0%*B8+MWQ~3jwsfM`$k)hm5*=4+Y~YE<_e=J(9rt`8b@PJcZ-o|)(6pQ|NJ7&ZK*JV5^9{#n*#lhY&SSJ(@8CW{$8(I1=LnsJBXqn}57F@+ zqO;&f%e{w^brVU29sU;zEPpit*Qd|m{P>CTjeuK)KU7j5qNFxSwder3xm~1YcW^1# zg#YOxHf#sjKF=a@(?F^73YB01W$7UkP-h(wKvTtq-%!ywfX4g@nj}C?-N6aCQ}}Ts z+ecQmhlJ3F;PeVoM=3Nq_vj8@qc?bs#@Q8$IzKA9Bh-z@Xqp0OS%?#^T8U${D0mkB zFilqhVKpAVf2oC}@;KV(!DhbYSrQDGeM;UBWFYb@|1)JVcR^M;(x z9il2d#GTZS8_^!}bGt~-ZX+hxMBw=%e8M&Of=QgFinyu|&}h9xnL?EvqOLe*fw%bJ zn<|=f=bG~Y)YV6*Q3p`>DtQ5t9mHoha3=Sm)*hhUe~s?oXLNfnQC>L5oyL#4?hp;b z5n85Ww9NsuEgYlp+m<7=Y)5F>k68F^!g+*_^AK$p;b*~jU4C>uesnxk576^);rEz; zeKg$LC>WP;DKcRD(Hs^&dj|I>kGSi=jdb@nCFKFiY6_o%CjkeX018{-M}={KiroK( zDub6GJ4B6;k1DO&cf+UgA*vETN|FQI3ipr~?jk$0jkv&v;EQGKK5}Dy=K_1-EaHU* zD&1FTq-rQj{iw^2&{Q6ysU``>+?by?RL7{Rk5T2jwW#o;AlpMmyp8bm8m@vlwEC~m zrLCv;63t>0cj`S`3=iNoyy^XSE840Ng2Tz4q{_;5|;G-XY^WLp|_W=oD5|n;a zNPzr+1z)DHDSVpCNx(j;3NCmiK;}nH?nh1LM^$>j!l$5Fl^>!Zdlx@y(gTzw`?wYD zp)k9H?A#8L)0?;)Uxojv7aPh=_)ZfDByNzabkXR)Mr}9Cx`0GB6-R94GYKjhbJS^T zRQOR+9N<>Ahb!q0;&YqG@5a#{yheNYGg`e@Xteq$Zd{@?zlWOk01dq#ZQ~(2#v^ph zN9a=c=0kL>JS4$JV?Ww9KROQLfJx{&572QPpzGqpckiR;-bdHH#|7`%L!SxQLC3p| znr#(Xl^w@pQmlSG2Jfej;ClGugZJ*_dy^&QKFSK>fUWehe4h`Ut6b;@95RB!AB_)K zuytCgJQOw?Ga+x!)uet@B>T7%?cru_8~M2{WTrO}n_9=o^F{3bJECI z9rOwBHhK$NXgF4JqjexSrNG9w6!n|!Y*-*s}~@1Y_mDD-_)WW*k-e=_IK zN%v8c?X$|Qe3Gz_y7T}I7B*Mk&PP%XP>~*>B;3WVXct#=+sMuMkepgacx)N|CvI$y zFJQwT#_o9v=h+f&YCSXtuh5L#p=LNiU4Fp2u1=k&I6#elcG(_^(p_9hwvm=?p&T!< zJ0@C#pHZhhztKgpP)1I>hq7uH)%kribO&hbY4!WjH65Z$E8px#*WyRVe1NXi&sIJQ z-*$j5Vc%!;cmWIw-o;n0twh|Jt97V+%LQ%4d zl4J)(-64vL=eRq)Lc9GEO&0tslv@K7D^1+oRgiNXqafcxMYW5%W*;rxKDq`!x<)^G zW)eUm4$!6WEhK=)wvVoLpQ}v39=eWwbR8t%o$y^dEO?G>^xa$Nd$xE1K6G4bC>h;| z3g@x=(-f9Ie+>60--q5S$+z=m#V#uHcaZI(A|)vFT~2^>4^_!72aQR>8z}fa7Cu!j z_Ylu#)Ao$D+|6>=nC2R0eCxntX zNn9a*UBPv=gK~R_M(-6`Y;WiH_rcF>tE%;0q0+v`U89Gaaud1R3bMHZGM)ggWj>S? zJE*Dm(A4aqt=mV(;78ARfS$?EF$&+Z&q2YD!nf_AXCJYPo_z;B65!lH&$ay~_?~+U zJ(mxC507^f1Meof?sZhm9>gVD96S+V<%>t~eEfaxy|QelS&{FcEZgD3-{Azvc2Je> zpd#gJjYLRxIOv>Ymz`rsc2Q^Sv5*^LlCXy+iI9v$kQDAb38;v6QJmXGVb+JM=}lw= z>qt$lG6|vQ3pjl2!tS^iTf67jK8?eFkw!3ag=nsX)NKv9atl}0Hg2jN##On6++7`+ zLIsI@5%KH|5@$)o6dTA()=`xEP*rZDq25DFvxg3Kfc^kI>HyOL3%_UH=dcia=rVTE zweF&8BX;=kc>xZRKv3vg=)1PibNd*5HwEuw;rG28=(<-?vv`n_X>s^;8Y^EuhUb&- zo9~t76g-8#&8QM`TID;a@)DQ;*(hw9Z?jsL?6As>sRJnBT_&K(1dxys`96HnF{Tj@@7!`{xN9U#4*y z%OaG><1BfFb0U$$c|41YL>3ok350F*o7QFIM5`#uH&IsjP*?4sHNVTPe*HfBh6D5r z`{)_>(I?D%oB-1v6VRjZnSdQGeA^ECd?f*W$2Jqtclelu0R`{cWa0PS8|ZsBFz^!V z=y_I9w|S6N>TvXY8mnKCfbZ~LS-QoAFW+Xta%}OH3!COFXz4cU5-$8ItJ|oFcUb6k zs^VQV-(@6$rzPD-i*>{v>f&8gMBAK%*)0^9gmvVmR*{}sMPgzZmoF9&dhWsT6DRzS z9oQGGV9&9Sz4cQZoJ8R#8Q}y1p%_m5VVru85K^rpJh_0x%o46dYq*ncqNMPls@g_F zwT+f$7ai>`y1ISz^#|x1570O4qi@68IfAt8SPre-q*g{pdg$j*F0;m&c zEFrcSH4-5vwozqu3k?Yg*hWK4?C>J!+zy%&PC{$c1x&;)S|mWUgPLdu72!52b6Y6R zZQ^!z6W1g`u!@Xe6{)Ee#3z;z8DGTtm>0npZk#-I;`oUJM^v9W5qRdr>2oJSV{U{e zJctVxk)2z`wRjyx$tKFuO;nXzXsEZ*QtzOx*+Eykhn{W^ef=I6zHyJkw98>8DEJ-p zEj#Fr1X#ED@O|jneCRt!z$W_6O-_Jo69W$k;KOGERvCTIGMaWbuGD&*jLl%}tDoTc z-q2#9lMLz% zUXYjsY@;IDLV3=|D9&u)W@a5%(`!sZRj3R8diq`T47=zXcmc*;3|RQo z0q?ASs@5&^Y=n;&u!+8X6Me@9228*PW8fy%G2~bOaA6HY?+OOqWi)MWT&s0B9h=7b zTLBx*DyX&YyBHKho;X_U7Lqp|5bDozlzm2wb8y(#?y1H%j4BP1Q3`{%d6P6te ztlQ{Yw$Z1RPtd9#1#jQraFPIG9evk22JSTsJZl(w)-a&p7gjM`AOVYLIo!CJ*C9AQ zjg7B=g7=fJn(tL4>-nl=0~N^z7rbN>RS8##x`cw>x3y9 zlYk|pr@5LDEFni1+u0T5XIF7e0z_-Lm8_#ASw~sEi5j7xt!NW9LPhvcQ*WZF@u98r zp{w6Q*RX}2aT|Tp76#@m^erQN=vyg#9~ZuDoeSTwjsf9Z$H2A5!33;g=viSapTZw{ zmoW4!qU~^T0>%W``1(h9KmIEBURAsvs!G;D5~|{L)Fd0Ii7EUI)I=L^LZ@>S_9%32 z<@?YOl7LM#NdgOgB*urDaFbCHZlE-~j$4Tj<-HUtr5-BmR`b{Yh)VZ0`$&?g^2ffLn*Zr;GAK7~VddJ(j|C#+g0iyTvkLVMx{)hq3@YNsLO<8nD1mm9g883<9 z&v-?()WUt@7CjfeFYqw|0}H{2Z{0-Sx`Bax69fAO1`g_gH4F$RAHHi91NRDsZcf0! zv&@Ch1T3Isci={?!Rd=hY<%@2ybr(Jdaoj0byme|APF@Rz#s|hsEO857n6hy)Jeu0 zHc=ODFd95fk|5qfQ|LpTf}h(!Wo{ki*>&6qR*?6GP^b)W(|W~jx~1C=6QwTV#TH_> zO~h|oY@8^zk-Tdo#Y1<;Wy)=2D;?x29poz=Tva;kP1#MY%P7>lxUF|lY;!8;fun+5Bb?z}ZPM~MpK;N{9fn^i@ z5hTGz64o(vtfTK(!_cvcfs@Czf`Mxp12;jTFR`uX-n)o_+k=+Xifg4Br_U#_{^fUA z`1lLwdu7SWWL2`pRzDLVUPDd9BV0p$B!Mc0FXU>2L93q=P@h|W(+$;G3VscDGpo3! zZDS#XY>m6$lWx4axM?AfDq%lzg}rc&eIUO}HJrnKIERDFJp7S74tb8Ec^pUc2*mO@ ziREz`&$BmWp~Tgj_W>8lD}+;5xJ+FklDcH^vxt;1AW4}&bA*~^+Q_uZv?Dh=v?N)cP+8- zIROh8Fudry-Dp}Y$SV{GJR4^MybnL0e6KRM{Gdjmi`P)6DqKaK7eJvi2`u!t)cE;m z^^*h=K-Ac(Uqf+v71xu?hzVB^%~o0P$?8j7-ga@EDq;I9jg6ByV?7Y#$1!Z4#Na!P zVe2%8?O>d-6N+Ou6vtjDj{UO)4$kB7pU2<){Q&k}Ae_Jn`#S;&1TT{aMiMxSCU722 z;(~}I5GF1Y2#1L{E-&H;pT%(*iX(CsM>>*4wNS;deUI0jd+rqvldz5fg>P9y-@1l@ zjkcau3>_96qz-=#dqNKbmp$$bzFEO*s0=g2P9Il{@!{f9$aZ zfyY)HKQiN(pC3Q5@>p>4%!a@-D^8w|u;Ao{6(`TkI32U%rY? zaCtCryBR~T8-14pHIo4ui3Ep_pJDa$ud(#$7r%M0sr3D-DqKQUw2Yc)1vTLcYIDnc z=-j+Mx5CZ&+F;{VH0IXOBmr})sLigTO5xA0peR^IetZEJkL@_}$Jq^6$;v&l^_Pfv zPH_Ijj>{(wgr7PPe&)c%GY7)Yooswbm3ZpFCCy(r8Idt3qEufv5gl`}d1Tzhh)%c= zBPN}Q^04s~;XqX2K#V6k?L=gnI~SR;BQkBrxxk9Rgb^Y87ILvXTBSB#bzk9Cy@$48 z1wGR$`j%A;2_PZDZ#OQ;H$QJtglGHSwQ)aH006h7nam;_Mx_1P6vXI4<2 zUdG+@5(-m`$h>gl^hY!HFZ1k%t90!ih1M%XCp}2Ka3L|~L2As6^q2?faWB#nUSub{ zNKGyvJ?Uj+COpVYEg(zdDKD}#p7J0kSU_%i0Xcye+37{(XBLo~Szt9kyMX*0RWI_w z1>}WZyjDhAuaTZWo3w7lv*JdQKb4dJSST)7bmrN38zg zi_n|9A}X^>s_NV#6Tq0|>uAhF9Z{cM=9pVXV#mKLt(SP+dyS@k5nbaldgf&e%u6i%p>+uZ`x28dv@c+2_wuXXi+iUB zL#Kw6Z%e7oEWKZ20v1tcp)dYP@HDTJ0D^S@ zFM*g@Mr~#p)tM!f1Pe^S)wl=ICpP%MH()!I#!04vOl`fH>_^djzNxp8S`kz;NNrP(Ev<`z*BE~6wOmQWTgqfAx8QIRg8 zEM4a75=yc~+)2GCO1!w0cyJ?j<4Wv8PV7W{mbM{1ZnJmjR(g2Ze#y3>mTmzZ<0AT| zB@8SaLkga;fPu}+1l&6)csGU)H-=6p7ru+acVOVMp=~vypqArodD9|4rn%c#uJcF0w3zQpDX(K6V!DdMkR z;$>7NE2v7AQI#&ECR;|GkguRFr@D-qVhL6GBFa)P?qpuvO5M1Yx{#M}0?y=aR10+s zYD2tizruZ~g{IDnu3-T^(;|kJMGUQ6@C<(S59}Tm{5=V9x^VAw;@;`Ny~}}nw;cng z1r4(fIhhD2&z@lYi!ZSB>8F4C_lIk<3%=T{7uA^&B*2RrFQPuPz(kB_Fab2?YGrzX z6F~d+m$BT5Xm(Xw+GBk>}yZ>_vc(UKA{& zE?P!Iyo?5`D`-dv8ZWWhl&+#FTVb^1D`+WJ&{D3Ttyn=@xr~-#2@T~EYKlcvWD6)s z-MHliL}o3BIkwO)cQI_-rA3jOoRy_w7{?OrKu<$8(h8;tv z6@8}#HN!koq8a!f{fM>CKi~S3@aZT`xgOMJJgD;2ra2Lu1TSjS3#d;ou=6CPL33UP zJysTYQJnJPdfbi7m;R%idC)a@(KmT9wD8cj zV{^X^-~LYcPCJK-uwm%7qGL1QPNP6{dIEdje~XpR-`Rdf2iu0G-561u_MkfLK~2Do z>oZ=|Dg0@!5)D>4DU~TNN|PSkOt_F8b0YcNhTxAz?EI+5`oSd*B3DQhTkM9bS|Ec8 ziKFdINCK9afVOCrLp-W0{Fv$rIy9y#;gPPQ%ix0V5(;7!UF8aT$`$n0tLUj#&{Gr3 z=%|*^R4$?_=LN`INX=Ps?F*vcxW};Z68-uB-C7q_%QD*30R|5S=8*sohBm^Dfz5>h z3*U)*hZ94G1NROah7K$4omSktER42Qi-Jmmv+-x}ef{M>-hM_$P2luZ1uj&lU8qqN zxLEj{1YU%|gF4lB1qeuh2jxjOZpU57jX98dZbSHy2?yV4v7ueZ=4k>a$y?;AeeM@* zD`)I_(wrj!%V>)@DCCZ0^-b6ubbk4r&{sHQT{Zqku)ZB) zzkWE&mT}$aquv~%v2u>a>;f9B14aV4mCo@__~K=BC7cAd;+ceHbY;uvjUX8c9@Q%P z>J{`=v;{3OTIvNflnbcJ-6+YNxE9-Sv+PH&*2kdzigiG{(nT$Ei;{5x4UGeBofBPy z3j-4g7=`b|kixe*FtFJ%u+yryb1(sB7XH9#LdUL0Nh?QeW&-;^e2ewZKKqydnf#i- zo~tnl9GnCvYEw?G3j{7Sm=F&dGhQ^PBS^rM8;`N-N)n^8N(|-B&F9F5M7ZYNJx9qF`S`MQuZq1n8aUnOqnW z7CwBd14A2OX99+HD~9~mL#vPIqMZ#tn#UBP!kZOVo6q!Xn{CvL|b$iJ{5{oIQ96En_!(Bt5{d2Fed znS`yg6oTm@`xSGg`w|UWb?Ob&<`&QtE}$h`LQAyBhrjd&3ZEh6kuALqUOpNxp{rOz zN3n>uf(yREhhLGqP?9)sXWv3Adxv5FHG1urXx9g*(M`K_>*D4iib@-5^LDhf4s`WS z^qBxBC&0o7Z?$1yBdiz_4lZ~OGlothdUidk1_jc>DI7ie9=H&Cv?Y9%E;0!n=_0zaMReqg zXv-GSmM@?sUqDmlK||(6Rp!Q>#D$V`8=ZU^!{N{9_g|yax<|dzLFuN7n_LmO#a-OU z%&4erXlU%{XdUS3ofw#$xVN})Z*}3`;>11c02_}L1Dk~t;Gp15jDf>|wq+i-Y7s6c zpJC^_FaPz`KRRj?wg=To8%ETa2s`SNgo6oaFgOV#NK$#ifzr4Gw_|o(y|6I}>1P(i z9vcw)Q3wCG^VpiQVQnkKeg(~bRgo?tQEcL-K0uXjOD}iO@Li&A+(LcMi-vFkbumd< zKtr^EhIj!@$pRV-FB;MXG^Gn@v9T9*sTVb=2Nkgk740$_M^SXH${5`L5q-L&u6>UN z{X{@jM;GY!iwxO3)0Wbh&?gl>?a+&ChR^nV8gbJ)#E68g+w331k-m& z6dLUMQ0m;X`#VWSGk%Nub{MsVW7JGrsLrpWD)gW#bfYSAqat*prd>nL>_dI&5RIJ+ zw4!+odiU(3$=?0XX!l>C*&d=&?clap!&R<`OzIk`{Se|}6Y^pc$_fi=YC9V9wEFGn z864;vofw)OT=*8McHCQSxaU{@NC0u~FtYGz^>?fq6xCux1uwAo{a1hf>K`4|=a%0v z0pk|bCak;!8`lL>b~H!=PgCGzkdV>!gX;tv3S(AWk6Dm?Ze|kVPmH+uNr%Aq^Emic zjh$&b*0zFdTOtvA7g+=n1%z{DB#R`Yi(+$tO6MLm>W=m(u&!%ip(e>jVi#5K;-MMUytBnmZT?i$FK+uVJj zwGR6%m{IGXK%Wy=TDUGZkS|t|DU^`9x@8|p#!`925?Ms{g9y*N5f$i>5gJjDm{H=x zud8fm&D&V`J)MJXJww7|=e8b;9YYdev0`YY@GZEfm2Wrk0%-N?+1`Jr79leA90xyr z`}Vh<|JI{9Vfb}*+}y0OF0i1^1X#J?sS8FT{-h&D*9z_;&LoW6a64|r4M})mM)rjT zsTXD>o*5B&qQ}`!8l3*9!O?eW>_0PNNAAVe${~FI3--|@eI&VemW2Nzh2zT%PGUI( z<9YtG;A@1_Hwe>b!Zc6h7~w<~;dq*T{CKf{igVjK&INizrgTWn7;q&vp(wSWEVH1h z;KFaKZRlu7fE|5339xgoepm0&|yH=rbSIBM^-$A)2Bb? z{NH@@PrvK^bC24D`Tg328659Q;6yZ;1RI)DcAilua9b6VFloo#gcZecD+*&4)(tr( z!i>~&BNESzh(0wS{78q;BP~vTRO9G-HT>Txu|I3Wo@NPqHXnA^jSyDyac zOqjnv^!Ng&flHjM`*G^tM#!>?kl2RMb2Y-_TEr)HNKYG)7n)EITTr6#6;@Q0Hq=#i z7Jhf$j@~GI1GoCw_G7X!m;j4~3*TzS&}QOSKNo)ArbE-9L_sl!@c2{g{rK^cKY8?N>^Js($Cy&)QeWJqYQ#H<>&Ew*Q z29a?cViS6#ru4{68*nveMnP=GorJ=-qN1>&uC$>=D}UaOuEvh8){ef;j)C5Wfq_D& z4j^n8nyhT~a{|nad)oh*00RbgJvwGJ%4#u^Gh+xm`C;;xIsUs3dT&r0H|3ZF28l4E zF=63g-C#j$%EllWE!GYERz=&=6!%iLGHF9;(u%tY3vMSYxEbfT8aE?1#)-(hFd{u> zLh6M9$!7+{pA&k-p6L;JrpM(o9WI~iaQQ-q$O|2!V|v8L^+-(Uk)ALhJ7qwIR{N|8 zHx#_cf}+@pJBbx#sTDQ34NZj|EtLbEc?Wv)4h(co3~1li+c7lQanH6N3Z7s2qwp;h zzUl4iw;NgbJ&Oi)og6vw6oSuw%7wmp=bs|@&p7CMPsuccPNC1-{u%j|%LuJy6(xe5Y36eqElLduw3$Dk_xEeDdKhBXOCd|l9P&FYl zVM3O#nJE*plP2V+jL1)!a6N6t&5Rj^Su<{DEx4UCqbRbXB(|a=v7#okqoJ^&sdS*D zvZJf!!tZMx7*hB;8-{v2?n!{bhN01lp~=EwrtmGex0rE15@0i6VAEq@)uO3apdg#Y z<=E4l|M7ot-~Z>1@-y?_)W?jh3rIp^f*=VaI2mkfqDmsDBLsw<(Gb{C7jPn~Bx1^n z@{|o_>WWDVimWrtjKZWDH&cWKg-HvpC%JRiQyez}Gj68MDDV_#EV!GspfqPiS!h9- zf)`s+li1LZ+R&8S(PrU0&{aFprIqK3&|u{S@Dj{s+%o|t)&cicBZf8u z`c^Gk1~u-KA|wQ32u(cu%lz~8qoel1@P1>=fciK=GK{>4w=zcIH>P+A0xO!+HV%Oe zjcFSi0vqbnB*MnvL=YB~CoQ;}vf@r)#od$zC8`1on-^KNpg3*89Wi6Y-K-U*Su4tO zR#b#G)I?U)#T2?74Vev1nGJ1)9UY|uU6liUwSx;^aAT=osfwR!NbbnZWsrcm4s6zvNLH(?6(_gfRmeB!DrJU_x`!j0V9A zXi=T!B?zo&32bOFA$Bxp?5Iz361Zw*olzwj0xK#spRu7jP1TC>tPSNED=M=#RA#NH z&e>5D*-@k5g;q30HZ&!6w4`>lWe&9E4s;X_0l5W6`gD~J^jP>#3@ChFz))-FcoV)I zLnHsHhY2uq0{GQ`PZwXS5d(`JUE@5e^D<=TrVt)`_K*J&roZIDB)l+yx}nJg7`X7? z2;gcK#oVB4jXGcqDM@!^DOYA^P#E(Tzv?YWCT`7g`L{IKu^c6fxC;GgAf!cxLo7K;) z`~iiprHikP-}`Ch6BZ1OwE9iBH=DS<-(tkTY(Uq*g?}xcK}_)Czh>L{=Z^9-?cdbL z^f^w1k#$3pm%s!}(lx<|mcaZre1QdR5-^g$hfWe$S5T$U=WJ;4;kSi$bi{-MU4lZF zIMJctrB3vu4)kPp^eKF~orOPO0_<;A|EL2-0tOlz23jkI?A4ErTm4J`h0m@2dtSiM zY+$hP)e2mTXPb$ciNEXx_s>1(C2D<4$0Ts!Q#G*Q86-kLUBGohdwSFbya>J%Ih5>oDH2(_(B^xB0G9~_#GyITlqby{f&U0%z>VQ1n{UF=qc^!sch)0>=?}3(VwSE z;q$x^V8zhDhi^3Fo-mm(G#Od=J-r4sl^i#s*<4QepZx!c`Hvp-X085QJJ}f5v+Zak zpusZ|!6ZzZ-$-DUM9f$josob!9^qR^K$mraofGg*_+6Qu(UX$|8{Y+X)&Xw?*w9zo z(4V)VuOY1HYpodQtQhDid@BY93kL6mPYg^3boE-)R0YXjd6BLRX@642%Y}wHlOV zvSvXj{O{fG|J;M$l2nZeJr_O`@U8?-K$9x#22KFi37iN9>jJ74wgq)Z0>n1f1zm|9 zU8#-HBV;!82KoUj*tb8Ys1S<=_Bjti;0$A`?Mvv+n0bKa>>W2y7C5#00 zbS88(dNfp;W=Ssp?|E(impqyihW8s&`do_ykc=q<>xR}eVPe5^UBFAA4q#otN$8RY z>I4!%!Se!oOajlF)lULw@3(T{j|32W_ybnW=xL1T%p1{CX>&EX^1)yAMfN}AXin(< zwmG49HYfFH2{;MOcSN)aCc(r6bmq+Hu<$MDiKvdkw=mfL&jipGWI<16L0?WQ|84j^ z7QO}jc?$+~4bWK7n>VAQGN7r{ZZ(yf|M^?ZzxAMtHEmBVfnkeD1+pzz;pKXNO&atnG23;Kl8@+Sg%YBRcO6WU6{R#UEj(EQ)N+W*{x$q*Rc zZwd^m){Nnd!taoP86!HgMs(hU&sQ-=j|31bd0RJTs-dW>d#`tckFWvzF#OVJu;Gk>eD*!+uRZS6T=xec`f9+#v z)OLAKek135n|ayQya1%r)m6qx=L}2sgX4!q3Ph{nHvVifbv|!uznyZmEbv`p zJQH-uzo>p@S~2$Mz0i>2+N`w@@@CL;J$wHLZTEuUk_TpRnN9zr=N!_t^dO9BX}4z= z6+9KqGG40-&N?(Y&iDHdZ8bU>GtNOCUG962*G6}RkKHOg*F9NnhZy;e{do`2oa>Bw;Wfx4D+j}%Zce?Ih_jel*LUP zL$4hB!MO5cir3#|V}{7bso&dDCj_5kER`q1GN-P8YD&*NlD(8I42#w&7~GeWj@e_> z9NhG&SncboaCZ^w^(C`wdIzN&=%#tw{4g4K^*H|coFr(0|6jnzjg$5|liCmgS|7QB z_>76I()&^4sdpZ_C{X`ICHC9L(e|IX|VYeB6*&ymFJ|dpw}ddkS-v zi9Pgwc(P3Pe*Zh=6Az`kkXyO?cRNeQ>a|^RgF&}Rr;{hYu{;B>X?S9&tSirm=0=wz zzxoSGi^Q+~;!#F4!;tqjk&t*&(JuVnezu(DPL*Uv-!0BJh$9pUd^wYYM{rISDi6dOxq;C9xZ)7b-8mm8elysXROlTo+ z`DTGf3-g9r=LD8ybIVtmC#voXDSp>z zbk?$S(`BaZsP_W1HbC-t*y(A7&9%(k>IJbr-%sbp9; zNCRDrnD6E(!PL*?1!^MgEy7gy2~d9(6&=-tuL%G`kuS+45E-CQPGYPd@=@ZUs{7$t z1J>Ua=ESLx_h!C7H{vn{O(e<9EhiACb`z@uhkkvs?R(AQUnBaa<-%xbmUAzwqvd#S z)*h(htz|kOrB97}%u^#nT`2nojG&s;@SIfWC$^ebfR;4C8e9Odfi;Zp!QsyeV2gC8 z(gX=IlHy0&vqMjzLf@lw&`1bf0RFyZS19JfeEWInYhvR9e>z$9X+zBN4(gZ9j)vEGX9p$+!tll)Nsl4~9ybkHC)L1)$d{%&mmtb;*sWdn%DFh3d zcr??2WqQqHLfnbE4GCswmd%Tfca=*x946vXdna~>jw~C{$<%aR=>ERs!%AvPS-tY3 z4DM7Es}p6ft9J4HK~R%)^WL5!B(o;mG(EhCk0@M_ zs3ex%A_HU9fCjk(Onf%HN96{tgN4!xw~+Gr2)e zVW{L_;!%>O)UT5PM@kKW7j(6qkC5QGQ`Rn9Xv?npgNRtL#RH$t0#z(qh61-WzfPD= zYGOSTb~qwBjyRVSVivTjd$nA!(xz(3K49 zO-6o?u+PEkRdvZCQrR5EKnNM3nwVT(^l8B6CYO)G(rgH4Y++&d&X0%8bhaRsl+{Yj z7|Yz)dO^YxPD0Xm?96Yl1aLsOh;S>5Z)^%yafC`v4(fc#OdRI$7#4i2eUh|*NPKI1 zE~q+qq+a*{^cNRjURR7Eh2d@!XYpa(lav3B3bFm*XmrqXY}iuujC9gYLEtakW?Z>n z6OaZ4BRa@ADAZO_NH{=WJSC$%Rh|@?Yz|#uhE#=`nCQ1y%e!RJCB71? zPkxn!CObQd&ECG+#aX=Heqk24mF*5*cqCpOI`JAg3tM+^tNubdT^DWjG69vH7NJ@L z2?KC2^+%P!04WfJci(zOxj`h$CPSFSiXA}HBOELIAC})oR&1-$=L{+Kt?GW(8yh@L zc9oC5LXq7sY?|maZ_ihbA@Qc^5y?zc;ha>Y@A#M-$pGwZk-mT=kFbA{ExN4jR(;~c z;fdTF6vw3m-(0d=5e(~0{SwGl!*r&8=Ra%Y!o0>o^Aa$bYaz+*>@X=txp+cXIkdLP zZ}-cK<+jHknUxt|`YnkVCK_6ds!6A}kOG@X;Y}8s@pb4BPNNWDy3`EX%$35V`I5RO z(j<4tXT&QO`j#A?MGh6^kLhWESfA>HH8rxXT1n~Wk0aV+nH&hf1`^hN!K=psxP{RQ z+gEXpq!V9${5>j`w z`S?i!za*QzFp={)Lg1-TlomcXJA>twCZeZ>E*R|rA{vDX@F?}vgRIxdA~iLjLj1_A z{DQ7tCQY!KdyFr6B5(P;#31IT$wy;+MekAU3;Rb8T$hJ1u6H`?-?fQX&n}jON#&A4 zem*nF$lv-ruAtkE8}3fT{N{xaAd1ii@IcR3sTDnBhF}4zVoWeBIhNaARk0@-(98!s zAPiV_e%AMPFM8rIO6Lkcc$EnY$d83i0DHlpZFf)P%f(0wRzNtBoIYd6Qd@<1Ota%E zL*gV1=X8*aaruP@>d!~-TrPiLzz`rE+m^EYFqm?p`r@E&kn5BOXbuF~0u@hRDt^Zy z0JvLk|JS;rho2n*2ZwXwGo`rWU=5@n?HTucGzV@q%At2)114~ST)A+)x=>Odm_jWe zIv@DH4I6eEwL**ngY_b1(BS3!yf7;lf#KB0meTl72x%@>_YdUYa19aK`@uKGo2Ogu zo1Hu2x3dOU-gpwLa#uKRexz0Gb!Ip!LRX3riIx_xdQ*BisW}sJBF)$JOzQ(l zf$+e@9>2fSW~-mYm}DdoCMbwPWu*gH!Hw5uLZL6ik2t6_ahTHSXQReA!8EC>VxPlR zGC?tK*rNG@!kA)cBmQvQEHbw_2QMDdD%frT4jRr^W;^*7opyR^GQ7*BYUnGwe?`q| z6Smo8p-yj^}&dDI01fbf@>?eHX=fTaZ16X~F<{bjr=1 zdY7BP-MEw1XWVE-`xIMv@vuLlKo;`b{0IjJJXHu^ynnACp!J_IibUm~&q%udVyy%& zf>?>rM$NDC?DgX9DPhZvY*dMz35R6kzM96}o$Q6XVeJ^AYqrV90$C+g?=G}mw|rvLS-PTxnfo7Z+OKJPWN49 zUj<)7B+)nM7%lV@uX0(FYRU%I-_XIRVqdEFF-%iwe4Io`1)94XL*E8Yn8DN1ZfX2{ z8T0KgF_Y^QQ_dtMK|`^(cHN2r4$S^KtUWJ3pt|A|&y0CgES0csMl#X*U35!NGuDth z`VDjzaW@efx*<3G-v#$&f?!_0P>K>LeLk4FmGDaFoD}fNSlauCC^bl;w=Oqk>g4zK z)d}0;-H}Azf&!ma@z8zz1+I&co`r5RLx}@6GpI)sFa#t6{04$T&7ki9JMIDD?o{A% z&&c;X;BAA|pC9`^srBj_2{Ghd(>bGMJrnThdMZiiGe;)hd9dAZ8(YCh1fqZ9 z!>ZJKth}&rxiW2wY&bt}j!v34QCueRDy{djwY`!mf@~$Qr;PuD$YA#R&lKBt ze4@+-`@MhP>X)Ld#0jRP!b4ZJI5`KYJRb<4?$vA_z^Ib{MkKax-mtseAN$RUUhgN` zxj8NvBu(HPrpldA> zKjdt}sw@)2sy4T)H?-{~zEGU2q)blF6YQ;UbUX<359xp{=GlN*RUOyjV1uwMh=M>T zLERf!Tn)R@&1H-J&1ISRLmw@4^FpEDUEEL|rzrk_7Ai}W$%cK{TGaz_=V(7&04EN2 zSi~{phvVt4vRCTF7-te!uoHpj|1=6ojhoJViJQxWhysr!0yudwX2O1CMtG~&a4u=ZFexR?yUb;WnZyC{faGK!A>>3P1uT|EOaq2!nr ztr3$gS%(>n#(x4?Ar5PMPnh&8B70Q+yAS~p)%EF#P?;}(@2ipP3T3>|wBr-++}|NQ zJ{@OhcHFB|xJiIIoY?Abgy92JF$T;j_C{hIs`&z{nGL2ETKQ$yQ>}(J(UMfIHkbvps;|^NL&OT zmYz3h*-9t#)WNHPf+$C3``=CsdQ-*37u-7)1@QWlZNIijR#DO+kxpy>CA}0Pj~}M2 z7Je<_D?%usD@m(W*o}%ml#W02){}6aBL~Jj*i61?N$`VV$){QP!gR-~5U*=9I zLQ4bzbn-KK*i&glQ(3V^OysiW?29=NamQ&vlkDhO7S5L0yHMb^7Z`S~B;d#uZIF*u z=S_Kc^9qzO^Cu;;KGr|}c|V0HDN6B*4NQFQkwJx9?I&w5)kJarQDS?0CuV8K&JR)a zmBlecm^Y&j`t!+Jei13qp4D|6C9(Tn&;U|yTh>K9yf0`C_6Qb0LNp=7D zkt*Ri9*=fd{`LExtY0@d3j-yqRKq#WpBz6h|JhK2%7c27!F7BPOQbQY(!n3r>U@BY-r|qfK;fXe_{tqTv6CrD12Wei(`#zMX4y+ zBuXB6>@OtMRnGF(mO1c7(U*e}3ZSfsBbzi*Zr|MG!IzR@!bLzC`l>DCpupsKNe^qg zk)}+Q9;@0cEEFj0#^}E7mq4Hg-^)j7!p!9W;BvP}kOz#wzUeRiK#DttPvT-&nJPdt z3dP05q40qWt2)me5H3gy40?iB#16==FVIw3FBUdy`4PU5kKU#x6a*BsB^>SOliJ8A zd%omiwA8?%`xG|-{EW!jjG#Q4-c`*H)ic71=E{5q;QdJ}Aif*=i(d%9RETG>qz@v} zV9MGkuWO+>O@rI_ts^I24Q>+PqfFYtNc3A+bxzW_t3~ga-1nZ}OA~7g^gpcs$U1k7 z34UJoXSSJ-u2chtNrb{V09m4N$tc1Sh&(U?$_C~kQH5*Lh1SLC&eZVQsBw*%^OUR}?S zA915&(dYLhy1dk7;p_`66lHB{+ph{}VzT)Y$MFt`gmao9s+oU?hjJJ&F|&vFnD=}% z$>0LQJfN=tUsgX4_uG3t`TX?RWS}@vrcU~r{yY8yk`{qo^3~Q50^nZ&Rx-g<+2@jB zfH-IbKP}I}NO9D#A{x60dbZ+=<-e!XyGg!6qC^T-T5Ye0Fz%*9u6sm&Vhfh9h$@rB zSIpNZ01JUHUI1CxIUzs!)~vVtr5U1wFR{87MCdD;bhdJZV9gKe!Y(TPiu2K^X&eYv z4N6o!RfibEYG-wTR?-won!?v+4P0aitYCvmGKZXo5eI#Ue8aE-8qq4T;3y=PJ7taw?O+yD;v3@1Vp6L!N=i=m3RU$9xKo7&=#o)#Z^2e+OLXs+$3wO%Z;t^OE>HGj9j>_#k$iXJ_@w0VN?JHjl5lG|I1GDEv}OZeuiG}HzXe;<17^d` zOx;JpXQjOYxQau?FlW5iBKH~ckS)@Vs~03;!e(3h>Gt-EVEOofy%GBjx6qU~54KO2 z8v0se%f0mzp_UBGwh!(KvC^vR6x=E;&nhKF$Y|Px`dPMCmrgeUpBCA_UDh*WHtVAs zLD6H@D$1nR`+6eJK8w_z3&Ms7;%wh`!uI0QNa$g-FVrKY0A%Y3x7nvX+IwPswSm=e zcm3MzW(k5J9dz2xxlNC;^}+3&xQKBaa==|4;f4S!uf`{Dx(I>eJk(8WiB|nwwdyPq@tOOXa#K8blvOa;6hO7yg9pBQqE-j4H$YX zA#Wj~wL?rCIi8CZEoN2iOXM7qolwo#fx__rBn-a^w*7 zl$s*~B4B{>Sp4jmVl!M!h(MId9url(TE%mgZdbex<5m0Nu60j3zU+WD3ZHxisUC|E z>;~|EJFaD1ZNK5{u=DAy(TY+@g{YVd4)6uP|8MrC@4alxX6$9;oZwjxiXMBpb0YD- zoukA=-pWDyzWWCJVvKr}6xzdHKvx1W5Q50YHO9YSvj6PJB-vdMzlX^d{9dAnE_fR# zKY7@o{GJRT0T~9YiP?dAD?KsV3$tQ27@(FD$Ucli2I|dI$nxfEYNN$SH0#i$_SI))g6)#sF+l zF&+hP6CRmusY`rQ$|=$&9hJc+cMCdNmK{VwT~tQYDGcNpFaW2tmf*3v@nmnIMk9C$ zZ)Ae;NKM8lsQ)uz}49%=HAN+XZy=cFqtf0a;&%?4! zGxbL*bbK~-PFh9w1fF!_fWr2R8y%0*|dOR#9P0;h4xK|ZG zQH%n>;#+M1Yv|;EfCDV79=PhTIJ^dx$3yY@zkEUDHunT#@Yu5n*?z0V%ym!ex3pH&<^&HFTJ}pfAfrjScj9|R`J_TYW zhSwuMS!#wLPBw=tTxAQCEmC3P8XG0WR=6tEvq8#&h-!gWRl9PeAHkNNL531;VAMf( zmVUCA$O|u5;Ix-c>r4DEm$;&6Q&kA#-;`Axgm|8&3YxSMlvWwJRT}r!6x$H*r+nJf zlkNMvrE*87*7g+xGEppR=v0UmS)@3;VxSk*h)JaI4p{wuYk%j|1X{tt2~}gVw}Hj! z3uOSMJ=D~p8`1*5@k<;MD8gIh(+JN%Rw9A(KaVEKQs9+es3WcMe8X!Z_w14aYT+hp`kUqXq&d+2O0Z6z^wQ(Z$+f1_~lL^DL}Pa4OIDY8_1o zFC0k~o91@|QeOrStG`JhJXG@gNY$a5$It;Kz+D>~O!7alR^eDL4p#j;+ydx%`_dw? z9h$!apjHfot@*)6o+}anco(s9tofAHu@oJkN~Fio2zWf;NTAZHmI7VN{#j(qc zQ@fX#t^MN1NSME-&aTbOeC^nQR%&xxm^et7x=&PbJ;mLSI&OoKbQEX3SXKCmyucG? z&FMN!t}LUmGGyO#d96bb{U?(T6s(T=xEbv}F15yM@|)|;7aOy(io}J$+`$N zA{3ApNIwSY;mP}R9KIP_8LVU5-$F~8@%|~FN~tEu>LiYX2D|4JX$X`T&^NUum5-K> zJllg#)5g>T_L!m|9tYNr>zV_)I`EnM% znWs(<|6OG06?u3WhHW604 zJEbd3mBl8-^vFHK+s?4r?x)4rBp+Xg3}Hp{M9WoV<+5!Y&1JnbROFvcYp59_b%Mz$ z6P^aQSpk&9Mbahc3_%crebLOI^Qfa8f>p!1Z$q1+o!l8r{*h#z$;hJ3 zsB9iQw-`LP^CwgafS1asoyl53q8&rR7enF_L&6Y40(5+Vk3!vaKxvZ%#rnw_g?NUm zSf(~c*4No)*n29>wsI`kJ5NdzYVE+?V5WQ(PKfxVMkRNs)YRx$TL&@#bDg6Y)M%y9IE+l)Rms{}~ft z9@MhD6w_27JIl26XCWr>Pq63NS@Bm>eLKdswwuYH4oW^Tl%4rOux4tUCq z;)9eW^Y@gmpki(Ba8S6`T`6nO9wlIVZlT&du4_GB_pze&%AFDD>;8y|Bz5p5;Esz4 zl^fxeY02pT8$MJGDy`W5$JGX{D7>g**fL4bi1c$#^W+<9^t9Pdb!0_mixem18lR(kkI&Z61Pvq`8yuAQnkWO>yDUC#B0U>os^4k^WXtq zeJA2A-oY0sEc(yS+E#<{a^@u`7iVLiOy3hXq#Sq^HKxxb2Jr6NJ39_`4tOuRI;vdW zTf8SqWco+6Uc4NCv_n@wZbOAPsG$Fy5){qJAkVCblUy$WWEj&oN#g5Y(qRQiS@QTX zJ1)wE(1n7m$pFJR(atCsD_EZ+h5U>C9IC#6y&ER$ESM*$#n@Q6M~>Yf#D4sm(uF+!5YK{{(- z##11D4{FSEn-~69Vc3cMmnZ6wYR;E~I>G4?zB9=s%rGXOy83R>)k($DPAWwLjPkotOmRiSXYAZMEe{q(a z21-aKZkFzu@JS|Zl~xOrQNZTDQ&$Q#Qg?rm!d+7#(vBu?|lOBSJ zN<7jKklrr=SFr89D#+@N%Ne3bpFmM^B~ENb2Tm{yaI4HH*;|ocyh5AhEj%MUV^;XY z(LFc<|KF+nzrm?cs8gw`WdkC;Xh|oXBUKC_##(~W+;MJ?N0Mgkc1oo`V_jlGkx5{U zO?F6H9Wvya24c8jTgl8ALFuQdklqUWoHl3%G5(8Dbg2~5f5XZqG(A1kKWfgi9NhzH zxg9joR<9*-tJBmst2S<v!qc$cDhajel&$=kdNsp19NxG@Fj0?3Y6+Us zHN=9qh5~it*v)po>mqa$g|9*J#DIMx4UD!VZ+3f6m$xKS?cTvH-+ib!1II>16p6E> zpp!e8nW>-vM+eA`Sa01Dz(-5)-_LK`QhJep753adHc}Bi!SkoHZtDNX&);M98W_z@ zD4=fyFg>T4t2(tZ*zA3tAFpS`=Tvn(RsJ$TkCM{G!^&W4%8Ph`MVBeYail3nbIvdu zRx^wm9!MiL)uPFa8!&u7TlQa2pDN|D7{su=s5=E-Ch&BIQB&S>3n0zHfMOt>LQn)N z2+6ZoN&PtRqwJULib$N%D>Wgz&4zOf`ocgxbvpg_-}Dz39%=Zx#qjKwb431RFz;S$ zDka097 z1&A?)Rf9k8KO-DiHD~D8H$-M4*KR_ay`RP-zDA3_K9ew>c4Rl!B9`JWSrRgSVz$d8 z{xds8Xhq+yr+C<^CW+5W(rj0_MR>=p>c@&Ab3pfX^dxM=PLM!mn=>4BB%))dg@$q5@E6)c7 z+MIw**EAyk2C^LK^F-dR)^dib7y#`k%u`JZXcJURf!4|5ZqwOi)+hKSqD z`CvG9JU9XqonGSTGe2iA(%^%hZ-nxK9mRU9|9~nSv6ZXhvaxfBYtbLSdbRjZ#NjD0 z%%1;q5*v(7}s*vSXfjk*~$;BSU8MiJvJly*>ez0s` zrVCJQJDm8zY@qC|2m%E%P#9pliMeP2D=8Y_)UWyz_blHeX;vYsr9=uvNMvsRuhF)L zGJ4cHLd6oRy9tVPW|Ef-e%?PGe})52-R{vYocN=KjPVZq9$__P+o{APQ^#$f7egUB zNo)4%HH)Uu8z!f+Td?q(`dX*9Tm(F@&9kL(2WC)Vt(YF&H6pwyl|c~mGd%ejq?ea@ zgP=F)T;s~q;fU7YcY?Mfv2Be21Vj~PC9rKbOr-8Jd5C?(I>(h+x<9Jaa?>jv*nPV> zT-TikjfO=Y=L_OtDlgEYs2j3KFH_PPmuTb8Y#^*=J~S-#o3C3LxS7PIKS?at?Y@hW z9?dX!PwIP67-eb7sufJycjEyL6GdDU7?kklXU%%*OE6~ zw-m^v(E6CJYir-ZKsS*P!itRAsS{Dy?ssxnr0_vx7Aw?CznLNTD$N8%kej+%^#VBh zfvx5A_-#wQPc3KgA6p&6_JCl;3G<%Qo^^BhZYv?k9ACbT_sBsMT{8~}I9G-kJB2oW zhFubC@U{4pXI8FO{fZ7YhVmsbKIA^l!Ukgji{^pIEZuQob+6au#|Lrl0CME}Wi8qm zg!3^)%3U;%?|HVHgOHW6olQL#wgOd`a?cKOh!&a5zI+f-+DvF$PILqXDA0tn6Nl<_ z@f_eDpbQmvRms2(tO7tZ*If1UqV?);Ef1?$clACO>z1jEQ-@y_hcKE`M-(cU0OLtI zH?a1fgG5YMKK+K+owLtziP>zkwpz*LkWj4M43oGWc+;9@h?+WWze^@(J`(%IJk+Pl zr0CBd6()uB24X`aia<~jB$hxgX*H?nU-+xC$b6hSK0+6*Cj*YYAIE}CBa-F%*q09> zy&}6|L2eLq#j>^9e6^0We{YFc`y!IeQs@%58Nty47S)cHK?Kkv>`14D3y}8EZ*9D()Nb;nusuHV#5N!~7 zt)33jtYwv=al%4bmEEqPfh;cai^E_NS4-}`UJfh^QVTn1qBWKBjk2KUCTL-(RV0lY zjF9Xrhk*Zjq8bUHCmR~J|Lkrw0gm`)1bJ1gXLK*)q!@YoqkR5qX?Goyu-WQ$PmGyi z1QB*bGH@sbUN2C#6o>eZLo4{Q6{xT&O(7$0h8n;YiWD`o-|pYP07}C=ZF(sCU?Qeo z7RZ&pE%46le>RC$Ch_%QWT*2c`hR1yl`V{)&t~~mC^5(nn1LXzM|*yUA56c}T;%+O zN!>EtVb_3T7@1Z5hRCCT9H6&D>9I4cO+v&&R~@@zPwk&Ui?|y-o3mZ6et8IjQg*@7rU9bEse7qRdi8Sl^-vN*T2oyXx^xNPW?=?(dC1wad zSoNPsQrxf$oW0`}Fw!K3_UZB z^^ifpa9pmp(=E76GMt4WBuuMML7z&; zxqFoO{b{_5mWAH`x#is!>_Yn+?))Xz>Y6~{ODWu3{OC^=Vgpm7X53+P@{-J6JJ7u= z)e@sr(@uifQhfWXGcPm$t^fTCH)8?eg_dWpVKuW15Kc8T+yVmbKWSq9Nco}DkwuwX zJhJP{xxL&=R5|vZ+wHcunRLy!-wVzd{7HNZ=?6>Vn)ArLzX6F4Qzq(N@%XG(*F|5R z7#{^aG6Yt5t4aM8IdVK%o!vkOIO26IcVZuI-XF~x_x|$2FD@!fXW!Y>@9kG94hx2~E7aeD#P3MG z?Lsf3WFvLRenxE03+z!5DXrqixm}}Sr?`TcN#z_*VaZ2`EGza5lrYfgQOf~aI( z&ycCb_`h&-yuy)JpF_Tsm?E3a!~XzW;ivzARAhxM;s>V5#c1ugRU1tD@Q@}HcExHO z-b;1gpRnv+Qa{Cl4_n*Y$igv*^cNI_A3je863$(<&VA<5hkFniXVav%Ct-oQVA-4W zaO1^xU*8C5x$2~rOJ+G}TJ^Af>e6tr=At5((;nl#?5Hrf0`pfktDo=a;GbEwZM0&K zA&EGXYNx#yrTUDv-KM5fGG2yqtI2MESwpIj%)30GZgYI5f~5&(J^uJ1>E}J`Rg5bF z|1A_oC4X@JtLQ%#escb{F@@H|Vo0&fr6ls@ymJMn*cMZ}%7@-hvS5@uzq^#RUO%vm zZ|l^T&j1d!hO&oLQIbkn(~jb8fV$Jt%&QVG7o~^nR8>h3H=8yNhQf>aReU}vDNN7N zvt+yAu5fQ>Ciy7C_s8kgM>Q@r3+LVbgbcpLu3r)DBs})^&0h*8Gsc4(ye~`B9s3Mv z59$RP^^@!a#9kgQE9{NEvzNIiI!LcYN32V1%Y29^uA+PYe5xZVP-!7@e#AX`W~v0jy3v0Q8=(|&SSk(to+aU!}Qb73;ANdaR|V1 zW>8boVGRRLy6Z;MEb>K>+*MRm**WYDJkCm{l|g1Oe$vm0em=2QX|xwhn873>y6)7y z{Z3|aG9=<{1((}?-(`#UIa%#;$G01KKdr3Gm_MBXv_$7VlWsc{G~IQI21{c0wSj~+ zG4Ce5(~RdoKQ*>3@(@w3diT3(sJJ{=vV(WJZUUJ+p0X|b%z0(o*B7-#`QD&{sVrs) zWNq0HcQiC4-ADW`8T@|HwAT@!Q)-MB4b5n4MfvMnuJTf6SJ%{QjvN}YMGD2uE8x3oJ?J@*>+ zX=D&w-Lh%>omw3Ob-j&KFc$8j=yA zN{^>NK>c;4N=;??KUU$8rWyH=3hg`ThnNBr*-16+ED)xsBc?oyysV#Y))}d)(1;!E zwq@WNsnSTw_Y+sVI)2qkU>5!SMdZ{7^Ls-I>&lYkpP3GHT$V9N{Y^>8^UvR`{D9FT zgeK|El^(p@?!?A=pHfHiuAQd$Mc%Gd4##NZlQer1b<8~ZR4RkL&v-SEAnN9HlF&N6 zaY~giIb^6|D-GqlF`rZXR;&pr)I=^=W|-2*Y2PJ%yH}9`u6OOo<&vyrl(P~jnRR$W zXI+r0?KsFW_A3#?q2J)7oZVqiDXez)-0gWyOsT&Np1zVTsnbuTdZXHi8b^AkpKq^+ zWKRqCf8xLlD0}qKoBDNOreARx=AY^ER|OMc2_GB26_^}<(N>M1QrX6e4UAO`htHm{ zB*MVBVc5l)0-KuvB9bFc|Hfq^7XDm)sEvMIUpx#sIo!BW13fo|Z;P^&EwMVVgy#@x0-Xi# zX!x+!4C-QZzA{*N33luscHU`jy>R1{l<_J=v`12 zS)|Jw)5C6F5U3{4+ikXH_`26$7^fygE$@!3yn7Q3uOH6*=oSVEV=oMY;X$V|-SkwV z%nW9o-!Q9yZ?rxSq9~?a5W3Akq_d!Wn+=GKl+M4A-Iz49Kn(jd(diG3PDRxn+?6-r zQl!12s6~{L`(n~==iw!W;HH;)ocOJIfBG^R!b5rHv)ey0vAl`D=l%D%jE)3Bpp-_4 z;efaZ7UbNBvPVSa^7ot$#d~(J=i5mB_1l(Ys3x~J>{tb&uWRb6c!TdDSVwv%A_}#??N7frtESeFc~Z-&@rN7 z*;%n4zN7|+pJo4XDC?+GhHq4VO(7pZ4nbL=U&vn#GpR{jvv)apeflD3lH>9p-5o8j z$m4uWr;qCG>;tE<(#Z%1E`m%;wly@0br_$ARSi0X4+_ORbjS&8ODY2NsO0#5Gr}l~ zfp*S)^lv0sIT5O{+3iSRgoF)|X*up$tDE6#+j^cLx%+&{cWHA}HWaX4u`LBo}c` zmn;8$f1LMy)LChb|hzL=1>xrPuPOi!>3}MD#ea_hiK^ZO`!HoW?!FQ9NWVigZn{T6t6f|+|Yi|DZsX1){^ zPF6iT2Im#ey%poS#+K?+jm6ikk1c8!a_h0XO?BY6O}RF!s@~YhWaLol1ZxD!2N@V1 zoJL+Iz$y_Y`=cQ2f`rMO^e(57>b@#Gbr^hYkD{9!Fl! z3DV3X(FYqi>ADi9C7W4kNup=@UDOrnJf+nA#uEZIB>2X>mn#sN?A7!te!_NBp z#mJ(7_4<=$dHraX`O5!Fb#h2eo`D>i_@% literal 0 HcmV?d00001 diff --git a/semantic-kernel/media/agentSKdocs4.png b/semantic-kernel/media/agentSKdocs4.png new file mode 100644 index 0000000000000000000000000000000000000000..816ada7152e398be287b6d0730f27cff9f38023a GIT binary patch literal 19523 zcmV*mKuN!eP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>DOV~+7K~#8N?Y!rD zTUVN(>pM7g1?SWioSqvv)BW$9si|sL=qi`9>~fTqEKw4(n2A6TAac%;nG%&K*|IGw zXHgP!CXw^r*uak8dA_v)N>+7G*L0fhnc2_tK5GNC9Nza^3xo*yjW_KtmGNJ`%Z>l?eQx4cA99m#e#lL|d4QYx^+(+FuRms|fAc58)NejvCx81%4VfT( zS{wV#C$&++r`6b-pH`#4{+Su>DsK$Qtc{Tc*&#Mu_7fj?gUtS9T?#ur*V@&>kZ~yh=e|_hh>0iCa&;I&D z{^D;x=CA(#6Yje|e!>0t?pNHc_m6OQ4jkhi{OLIR@Y5#t@n@&lC!aUho_^j^oBy(< zI)AW@@RZQOJo>V|`uNLs=E0Y3)w`d!R&RdNT>atW)6BOAPE_YUI9|Q*?y>5*w~th( z-#WyM|Ng7$*zXQ9(cgZ_M1F@HWFo))Di-?f;a|P}@A*H~h0{L$%Xf~<{PNwk3vYfP z%>C|T{`Yg5 zx~IDJb#Ha+cwcoBY3gUTP7W}eCkL7J69de8(*U#9G{CH!7^p0LJy3amw3m5$q^I)m zt1jmD=N-%spSCjJeB8`j`rsrp`|fdO`t764#2*ebvA;W1jsD$N)yVI@Vj_Qccp?0| zL+`!*@BSbDI{VA_j-3D1d)c|)91yPk;S=H3J74k-K0M4n{nK%N@$-}1>X*&z`k_{K z>qtj!>qsZNd#sDyIo`wWo#?6UHTBlwC;Mvgll`^$slh7Yw4$15Rx*he6_aRHGl>>8 zlW5gc;;loKy;d!~(=uGyYSC0STQv0AX?11gl!|_FLQ#2gtiN*itDee_UvyT!`BQ7< z;)kc0v+o{fCjWSp8T-Q#Ci-`WF#;3${gMAULR|Re+kbuGZ{J`0=C>aS*Wda~xcA;c z;ps=m_{C3~xV0}&bDLkaayv)b*`1@EZ2apkHh#Q^jUVr2<0pFAcoWjcCQkL&5~l`g ziPMOpnrv30{A!|ERZX_4nN;f#lWaqVnPi)`l5E#ik{u(JWQU%Pcj)Q8PCdQbF+y** z>*>u_9lg@5rC&4+(T|P|(svH^(cgdCS-JdC3p4xvNoMk$6U^9KN150kjxv!y9AP5A zKe`b9{gMB8A*TM{w|;)<&G#q2{p|tq##^6?4?Z{~JpcF@zw%iVzy4)2zkRrk+dbOB z?H%pp;$NTP;$L@hiLZOuvXI``N^)0hY*b9;~5!Ie@B~Y*ANHeyVM_j<%9& z*Huy-Bb8K#zLM%h43$)ukxq4)=;Rp_o#?hu@oo#X(`BYM+70y5X)XP%NkiW|GDu(l zvZr$8~1aNr+=i^bcR#Mt^_$KgNsorQf{w!8gA>kh}TTC*tD| z4hb**bWB+NyoujB*v#)9YUOtixAA*N+xhshP9EhaQ2y~QE_u9%O^^U}`Az*KK$1-U z;L89lb@^@T{mDFWo3E0;fRtIWKAx-#*06BB#uI1~8;#`t<6_WQ5@{qBaT zH{bl(x4->x_{Tr|NqqSJ!G?HY>)>gA_fQMJcess@A8F^~N89>x;6)3-!XjaxxezIk#nrzin$>g_dnKTCI&@rhFJ(KR#SJEgS={8o<31FA$mjh%5T!i$1n@aY1 z%JDuowcX_?ue4jJCnt6E&BKbyl~21XvmdrqCf_~9jJ@5&ME}^tMBi#!_;+(8UH{ET ze|6)nPjZjm|59B1_^7z?`3Z6N;Avs+aEq{aq*bWPmjmFjgYu7ckpM{)e*(q#aEYeg zefD?2sX;bzTEXJ<%kovVMC%YC)jnKJwhvcPe!63XNq3GgC_mk0sG$5zH_A6vGCh`h z0Glko&p~HVe!r8J1GuT|pohv1`pW4+e>vIjE64l1<;`ve}AyQa^a(n z%Jlm!%=kN}nAqDVnb;psEsXuK<^O)&$G_=y^Y1?T;P%^}O7kBcl2$+YTHN~bq`3Rl zX>spRvlvJ5b@_Y8I)ucr4m^4Y@#EcmLwz~G5x2NYzpPe(_c%r z4%L!va)4BawwmhDRf+sgy&Ql9A^rbIpCvq`aINfr=5OsN>BfISXr6-w5KxjK^rsv?rCQ1?bA%`?bFQI zA5Z^}d0n{i*2hQhzx##s;-e!HPW|@5)8g)-W^wOu3!x!Dehldl5?^=mgyY?O;&>0A zIMKt)$BryN+0@4+PWExKd=!r;*i`-KNwunLDHM-qKgw^H<&yws8uE=y2IY5|m~@w! z$#h$o`VOdMd+n8MuY=C^y&}KgO=ku?gsj3(WfXoYs|Zk8Wsu4!f>c@&qT>Dj@wZcNUNA2^2y{2@nfANK>T=@kVN^704V-sp9~RCCZB7_Pc|#rWQ&ST zG^^NDWAfWHIQ_L`$8asxF@n=yO?MiqC_mk0tfsq7_35`TIQdzm&r!+rIVw3K-$lsD z(@$pyy>w>KOJ@~6Dys~<44?{8S!I|?E5lSm5u~>JeAGg_jlS2UqrW{kSegCk3^Vaw zYjyOU)798J7~|h4!o9aY|KRZl2c?xyzLvJWMET89{BVny*q7f%K=FyM5tM&MNF0}^ zp9DD3%O{)q_(W45pFG*e@5?_uz$KdpF#v~056YJrVrB7~TCyF*Yir34Z7tm~QcZOt zhAPU>G~{<%m~^+Lit;l(HYU?+XD~vx--XEX``vU7#S?rOfX*s{gdEBzMChD4LS-KWU(^9aJ#0A9q(V!q_`4)vjAlj%kgPgNrPQ(V?m&oI>2lK#PZ=xU9a1vq|J2P) zyw_G8dlz>@YxQ64h56B2pZ|RRz*ieJd@}X&<1Iy2r|p$w&I^43RHSzZ_xEQ^^f_D>;-;rr%%5DT8!Q8KQH_Fr8CH zXfpjnQ97r_05K{zG)85I#;A;XjEXCx)M~evet61Mx%QQcnf<7nnS8&aI{t2Jb?n`? zMDU%q|Lb*s|E%lLn@4{(|Iwk1wa<5`dPcKTyf4f|aZ)RA~et@(D3I zrx~U5nlU;zJWl6^C*%l zkB?~AK0g6F2Tx0TWZLVz6i-NgjZ@ztCXaWDiQ`>j@>kB~gsE2K`!;!~$( z@yTXH!8gPwTU7*fC6J3^C zvd2EvB0A*g^TdM1JyApO8G&FOUGu z1AH3AH!DbhWUG=-wqXD@kJBG-Rdai-LtMOVnB8sFu$yffcGnT((%(N2a+^u1RH#Vh zng~=)gmSSW6_>Zg{Ef###yi6%d#u$|udSNyvsZIC`E~i(0S}WW$Bq|6Fj<9<$*Y1D zB&P~h@~Q}p>=e3h`Zg`5$XeQ|$MXa*a>8sptGBQ`b zP*!I?>aLBy*HIgLucJ2pPUrit`~2^^9({Q9=gXfS-`M`@6zm;of%wr@NE~ZJi1cJb z{Bc=+;&`W&MENJWWtw`#WK*x0!pX18PoC}JhIo#KWPyfEnSlb$0$t;QP8X!y^ffMB zKYB8KPKF#k10Etiry$c`myhB@BtT9TuH@BGnISmHP3C zozYCu+lpA_MTeWYbn0n{im* zOG7+UgnYS5LR7LvDQ$?<(*4dV*#Shpm&qypasXwJ$tnX3k*^Fhd3B_MlP?Dtt>lJA zD|yXmB}W*qQ8?YT;3c$(hmk1+FXF6R0XUG@AY{k6#ttA#&d~)=_#=(=Yd!!lS$6l4+0g2VKqg;^Q{a#THb+s zFV^7B{0iKDx&(KgEW(|qi*Rp#2_8IKf%&BkSlvlLGFO5!!$Os+O1aTFF1;VXhsO|; zR|ZIcyefzRn4Ag&p!oW+LjuH@{Lok>kMc=?NkTz4O&7G&mAr0-kk!sqQtHXddS94% z-0Wb!J2YH9`*9yeV8`A&QyYKh%zu8}?>~C2eR*PI_i(d3`Cqr;)I*ZUuTvMFM4B)_ zmy|f!Ev1NfS$^tNznDBVAg0OG4-oO`76r-|P<|SZomN>svelyE*G?(9MSD!ZY2VDV zu%4~KQlbn`*OPGn$tv84T@-IuSR6&7l|2*q>P*-ZW1&nW{{Sw4!Fu>n>o-^oA{uM9ewZ2 z^6P&8o7d{W;|I16ohI_*U$;T>csr4vJkd!=5Y9lNsSA=PyCfumoa&a6r+cLo%0JyN zrcMutspdg~EPhZ(wJL;Et5Qg|Dun%Gr$tTVubfiw&w9=L>P}u-&#|zUt-*4Vf@d2k zcrbTMyxFYbZ=Xe>`TWo&RKzo&KyJ%SeBn3o@DY`WQvjHlO4bmbSQteQq-TX6!jM>`H@*Bt(mE; z^~b6Yn;o@p4-VJPe%#Mae9+B~z2BYke0b&`e{ZmHsOiA&krqfCNBJExD8C6g14&st zBu{oj@>Dk@PxnZvI%N8rQGUOaY#ESJEecUaDW+PLVy0awq}x?OvP~r<+Eqfl70-P& zztO7Z7f&hp$Ifx#MZ63vnVK9RS%zm5SA|C>)%>$&4Y$xT%q_HNxusSux70SmEw$;{ zg|-pyMVp>|(W+;kwvKR*n)U3R(>nHtlf&E%=Y;s=#R{x#$DvSSpvH+%IQJu$Q3R?P zRj8Un)ZuDg9jO+Eq6|)bfyf_Y3Mij2QI|i-6m(N4AE%!wlF2_;DUQrC1;lWkDe5mU zxsh2Wp_*ovx|Kq!0>u~c%{8(N7PsE?8i?4I48&aox zAbGkMQv2fjq*O~o{(zKjRS@y%R;8F~Q;DfIrI>103W+wA5N}fnJ8ddqvqjCXoKo_S zjt}sUpR7p>2^v;BkcE0!`!_KKZq}uHz1zQLxtg?nk`EyMWC8i1Zo*oxRxJ^RP#g8 zYJMnIEn#lv21m~!CqgbX$7YqmY8H$;nUekjz7AB2`U`}D{vwkcI?Jr|Myj`( ztnB44)a>+uUT)%pZf^X8-hcQzqV1zC)A1AX`;mEb~PgGw5x@UHnp(Qq7oi8DTLc&SH#DgIap3tVXMHw`t^s>a_ca^ z+NR?-I!3syGX`$wjDg$jGIH@A6T8=IW%qh5>~4>R-R`!q8$A|wt;@nLcbVD6P7C|2 z!^}NwHL}-F>A1UNm&KKhUC0$HP-8_X-FYG45tLPj@d&Ds9iWa?3!2etK{HlG`GtM? zasZTH9Kq?IsTOo+tA&wsgyP6-wLbmTBHH`>XUYF?Yko*?ChTgxTz0& zxQP#XuDeMsDHQw!^DYGJuqDcn0TAl$fmS9-jaht*6C_DUSAd!~ie zjuC#dL(gw_8u-0#6N={&J!UT1YvEFTRxa6RV-vkLcCXLI?)2H&%^n-O)?;IryR7Vs zP762RVc~8!8`xXMD8Ib64cR;e)tUg+y_}e??|{Nkw3<^#YlR^>K#>F(s}{8r)gp=~ z0j8=29Wq_T0L78B)gsE*&sIw)Uysu-M<^LDRZGUpBtm>>rnb@>sog$dVK08Jr)T8t|ZA3n#+ZBkEZdb~5sH9AXT9g?Q z)15N$jv;ZcQzLA(YlO8{weX@@E!;XjApH1f1uy`9-;eKnS5*#SqeIW{bQ$@*9uuGF zHFN1cS$w7+v2*DGJDcvev&jJm8}DKOVr>0)FDd-ovl^e`Bwy<67d>nU~VZ z#LME-&3%w2N6jnp+Yl5l%g=NuA>E-uh9BkxjkfC{4?-=2?yUFD9i9R!r@={vZ(}rplUFfp258Ey556yaRb#o6gc^a6i04ys&QW>me z)Zu!7(OQulKclsxcD!05;_=u)UJWo^BLVbhYe=cF0}PEL=u)j@L@w8grny>Pf2kJN z%+waTg6#FLjogJ#aTnlg!E47*;^^t0@0~pJ%CyU*TY8CjLTkTF+W?t*S-t|&9f%4t zohnEZ)Q~~(XNE|C46aD`u$b)9it&zNal2z!T<;hbSK2kge2ZGRequoQ{>d6VT+hHp zwgy>RfUPb)zuRNv<2@!m)o10?{Z>9ZVCVCL&iw#+rHjoeTx?e1V$%v28}D~Caa|p zeBqs}m2^`zSw0Cstu;c_jdJI}5UjIsAlJJ`7| zhw8ha_ov6uZd3b#L9?0T<*UFos2 zk2|c~9Zf*kP3E9Pu_QpHSdru%5Rv5%kJgIAV}z1!f&?gPCu&6*8@G^3PSv zWa`h?N=D>Tt!zU1mup4iCAMUmV~fViY*v4f-BnH1p0@kg?~aUcvwu=>lOOi+6Cd<_ zupeOWtZa&{8zx9xz1W1H03VVGPKG|>KGXqvW ztFV&*M7+|;=anuluk>)aI%$QQODWvk-k_V?>T`1I{VsN)+rd5Ruy7BZ(|jsdhH{06 z3L`*yWmiloL$%CMq?Q|w$pN%uwUTxWk06XtE9oa|B|VCtsulIqg#8F|fLh5oTPu<2 zmjjfH7ullW5?eA}Ci07>IksS&t0gt(YKuK#_Qr8Dci}T7KXss=pFYs%+z*g!>3)Rb z2}F7yq}x$^KcqVbAk!g>&ve$0oQ8Ou{4O%_BtWKHQ)jq=lefn$Zk-hG z9uc>@M#Rldow(AW73W(u;;mCk;cD=l^!<}{c)pW|WQl{+kB`Iz31H)?? zcIm{GPM!FoT`S%_trEXgI>l=bmf^ul9JX={obUC=kssNWQF;J@TKrsSPGz+QVWi~k!W-~)!Hm8ZAe72+; z!y|`nOn-4?nk8p{BZ6Uu$S)gFKCZKblHmeMClN}fi*f){J%DMBEt#&cIsIjJM>WMg zZTE5C9vVQPM`Uk6pHV_X_qHH-H8km$kY=sfJ&bDeeoz=16hJBp3tj< zT;B*;v%Mpb?HiHOy(3bpS1-kT_0nFCUfk@_i)&pY;$nwRe9$r^e&3`NuYUIcew<&2 z#oaulDGqYaR;9#%RmhOZcknrti^$Kb+ixrN@FVigKcQR638LFi)z;Kk!Yh!FdH_oE`2J-RK5IlQy=vSlOOf} z^&82KzGEpAPo%#TpFX32G){b%5;9#l_4UbrMLvqhsn7NzvUoz@h#Uav*CSG<-yo&? z3{s-kAnoEVzyLjZaj9!WeA+fF-fUJ2-SMkd(e$fS&Zk|hC9zHz2jmZzUB8P2gq!#P>J zah8Zj%BG7fh0|}k#Fp#wQM?@B3Rg7EaVhP2Zn;0k-)^$;7e7~%023ee9eE?s(WfKg z&kRC_NFRhumjW_fvV0UT2T+rvhJfN5()$pUE{iAf`}L3;Fzk~ZFhFL&Af@|FQnKGD z#rupHKw9t7ORHTY;)_n5c)wL6e1B3UT)lEj`r+{kJYGw{cD4cqhKJmD_r(+jP`mhy z(#2=h7{JYE)gC^h^70v#k4vg>`n}xNpod%Pb8#E9Hv|-q;LnQjJwS4OPuf=a<>O~K z%H|r=KQhjiNB}bZ@(wV_0g$48hAkQE0f=}EaGouhFaXL&@t5jTkC5}9EnASQY{`6u z%jhq1n}ZYhI>65z)Ce;l_Y0E;dWYXgboNi9^mG@3;uVnYRzkX4MaXokA>FNpOix{W zLp~8-r@tY71abo?zaC)FSjU7&nL#t8224_--z4qy8KuoWgS6UXAiH3`Lnqv9)`-`- zP2x8kr5z=XMUc7y`3bi8OHd8I>@Fp5&8Jhjj;vY7?DrJk04WQQ9sQV^;2xgFwK?> zGps!QXW5eR99uA)6HN}M8=$sJ0F)tQNj8q`v zIY<>5h^Gs%lSspA;G(dl3~-5|FqavQusQtBXJkyCetGI&VTvu*6OJd_A=qARae*$SLH>&nrx?V1}I1LdYmBkXBfv#GqN) z8?=bq{U&j(&m=DO7{%vZ2Jr!jFf4vI5)`l9dSQio`#)N0XFv1u(q}htI;`OL*eK4R6#C19A>jxocL)mqUoyNh{dn{kNdT^BJj>x)Hl1h7#`7#?zEB5`pNkx2zQkdKviUMs zHqUVsPQT?USGIn`A_)LibR^LeXC<_^s-K&zN_YFZBrT1y- z)TQqqH+A^~I>-*9__}-qr(TJeAg44zUTKz5Ss;ffEs#}OA+5AZNrgp<#lj9ybXWJ|_WkZ=n2a9|~_!7vGOGjvB2joJb!bQ=e7T<>SO7b@6#s z0~9aIC#bEELsVACsBKbOWtEajs}xsQq#cD-+!(ZoYyD<%sn;aF=r)RvIt=2iHl6qb z?gnd2`u_R@xc+bvZai6nyU$kP!NNK`THJ)k%UkdSS=xXni|g=oX&s(Ed=5_n=f&rJ zPJVsR!|kX8TxuxDW`@IDUKeHaBQXx;%i?jJAk$Ce8!*5WS2Rp>r8;F(L;g9gY@Q|Z z_c33(jVh*p0o%TKUaw}e;N?pK$Ag`{AS6iT*Z%8-q zKZ9Es7Mn zu808)<6PN*$Ib*-!qqS(2QW-=W#e?68ICfZ@{U5WCE<1F}O7AQ9AdDXFqcdn%i>tF(!m3ahv_U=>#e zY~oVCMSPASx=rH4E~9v_(3PuZ^W zW!qIgYr4p9C?|wF&30k#t409DaG@ccOuOAZKXrpP&vdkg$K6ZnKR9IQz+9_KozQb~CFQ0U4C8C$byj$+Qp2 zVdt4a*u7B+``{PvJn z*wWMkXhVEb8{|{E2qCSD^63#I#%D%ieAY0^=M58F(KNvoF@kA|urJ?&;Q5ccK$cHi z&vRw#EKiZ?Cp&J1_KCNLlgYn7Kbo*NF&UAI=K2qRz314 ztJVWT#5Wi+Kpw?wP`n9pn!4~|3*?67m7qneuV8~Lir2mrpBhHtcG%T>AbIUBq!R^5 z<44UY{?ABLJo%9%lEvRTCJ2ycWbjQEe&1iLisX$MzT+*|1oGzu5SkUCf^Z@*b*h|5 zK9q%@h#=Gu0a&Ce07rgMpOu*T4YB0E#FfocT-h?s@5@K5_320PHk5y!FWb)ZlVs*-89NZvbbtq zlRj*&3pYbfTNmGuu6rpR?{iw4gk*Jg$Y||Ed~(g2n)LfSllZTmf|H?j+bC1A%mZe!;hq|r^~RBF2iOTzvnNLH)T5+3U;y-?B*!g z%TW-|QIN<{kjzt%Do~KdKQ~aIAxoejm#>p6LoQo}e6|dQY#EB#GSEc^YBUR6sS1@J zo(Lt&G*`0B)Wx6WXzMIT+h@u2lK{5!JdM~d@Ra=`Pdn-X9ApOwW&4~!JFf5u<@iP@ zJLK1Zb=5e3r^O~*#v|y{!HG9AYQ3%@JvWS)UZSl_C&KHX^gqJ}Ih_r%1R`HbY3&dn zw!`+26;{uFOFrp_A2-AgUOn9@!o8Iw+<3kPKhAFe{y_eEeZBE?18zRsgj>%y;dY%n zFE-)si%qzXyv%7OWsko3OgH3G2(-u(Q4gnRpH=MF#jP5A@XsLdiPAm8@rZ z%668Q<-{TsmA!* z%~lfN+-C#BZ=^MbW3S5IM`wn-4%gSE%hR48vB~IhwL@;CE?;Mdl+F%`VLNOMSz*E8 zg%|57SV~o4Im5!UJqqrwB;otVtMJYJMZ(p43-$X2`1bw+Tzjwp-#u7>?;kD_et5J< z-h}=5Xc2BaT7sL87va{EMY#QBiTv4syYoxr&j;Kme@0*l9zI)!htHSb@rxyR@?sgD zBF~p#{@D`D&o@|vXHOSl{_!F_d$b78A1}h{^L5DVW}sTC0bi;@**(jbtTQ}qKgUsa z6pt|)@^QV;*Z~4%za&r&Sw2}Emj&91(|<*vU03-M1~8xJR}?Y+R+SMm1Fjgr4w(@Lq(hogR~Prw3P~ zbi2nS-Rd?H?sS{P+dXFSc8|Q?=`jm;dMv`-UaN4o$12?IwaN5YgnNB9;a;DezuRjU z?)5qNdwov+Zl8<4SLbf8i@&4r3ipHO#pjO}VQYB@%9%3o`19ji&&8s3nlBUa=Xlz7 zj>ixrfc-pAJLKuFI4<&(<6>RyCePa4nhivwZd#wn9< z;h;*q@Tu~z-zaFcKWhj_(RfcD>rr}Ld;^rNw?n}olQ%e^U~ot|gB`L4C!|LlQgXxr zyIMPJ4%y&ozY%WVoF^Z+T29vhKlA_W?6>e_&;;`XW_UVifq8`mo+@y)5}pm#*8!{a zY|th>Q`n{XI^=%PR>v+rQ#izDgAVbT!YRH`IE3eePT`rtDLfl=3eOcT!m~Orlpg-M z!oxpPdii;US9q@Q^7BeB|5WMYA1l24-99(}FnUGYSlEPOvIHE(0hcLD1q@)9kD-cV zmZu%FJXMEwoG0RG=LLbrU4WDCtnUQpWuYM-*OGINPa4kh3w=T1`bnd3;R~fWb+rD` zlkfhrPnuuuoXm_=NT0fUrL77gjZXVXe-JCLpY6 z{KB#(AS?~xeSm+V4)Bi@UjB~K$8RofK`~wguFL_?@KCT!@nzeL9Knt|;M{%)+Hqc> zF#zEbk^U;QYmTR!SA?>2PAJ+h^LryR!hDZc`2P5ac;TSx!TzTra(avRrD$9m*r8x@ z5b*`01M+xJa01FN;A(P0*5rbW(FG~J1NKJju&K4dqS6Ald-ZUAejOfeWXYd)!EZ5_ zhHS7pY=lgD0ZOIt_h zm9}+$5!o8?i`ydsaeKrsZ0lrpMgqc)E+A~{>gz~I*wzJwEnSe`)P?vpO+Z*14)UwR zLH>ox$KUC5@puFk6D44aEbufBdFvEkvQ6_P`&ogqpRJ2;$iE;^jtc_qyeLqv`t%d= zmwA*=0=TXSwC4(6uwUZ0wNt{QF1PsY*IMzy7fSbjfUMqrAa6q1vTzwY6wI>hyvZr$ z%}yvZFgc}y*#$Cg$eHB;NrSTu&74EO4VJ%xF-}|o(+ho}zF4)n# zVOQ^hxZVx%5jX7VJrJ*x(0d^<;>A@;7<^Jf?~@WEUMVr+lj3^6v^V0D_V77)%5_PW@0Oula{}d_6KMAp zK4-lsY-+}Zd+iSK$`Ort{!7)%4`vm#o}ZBbW```;f;e!MWL$*2#RUa|yG|p38`35h zB#kK50qZ(DJXf3HR^JF*`|%0fS&G9-szL&6IU}&6cfp>)4GDuAl148iOAu){@Lt@$-7PF?X zfMiS&L`<0@V#*j6lE#Rzs}Bo1BVl1 zht%TiLG@4n{BYiCzedDc5!c^?`yV7{aZ72l3z8-m>>3;r23Q!f!0moLe0%-}_;G#{ z7WRseEaL~R9zfjahNQ^@X|oqH7C&UoKFC^pkRhuda%Oza4;ga+vX&s^tTK5^P|DTG zn}br;9F%hVgk&rcF<+-(i3oX1Ow3!NBCc6W1b?n9VvwXMBJAnI!q!MgSl5Mw`60jX zeDsP~N>NbF*PvFYL3w#c%33Fcf?WMsHdTbABM51W7qV6#STJMTEF1BJSwJ!rDk!c&zdY+4ZEv3Bu)yRQ$X>c7=4_Kf!7(G09Ww&$JS$Mn za{}#}6=)}NUZ7nU1ln~`sCX{&wEGf<5NHo_S*UpA9Z+;!5_a{I!hDZg`0j*GymIK3 z<7YpB$MZ8H+U}MLc9iacg2O`|7ablbI=oP@d*tWscwfing`CX;X{!g47{KU+4TBR_ zbav^H$_&?)X1IE59&SG0f~9zoeBdg3e;zU@ABEdx=_Pvr%8nqE?8v@wDBHtOc7&kp zz%>kIdq^rf!cy5DmP(GWRKnFBA(ZVAiE>0m${7>O4jIZZDpKTrOe{J^#e#iQ%vfUN zIEtIX;+7#SE@=b8)|IIoL-(eH493A@e$oCP*;&UGl_*zCq}-z- z?TU%CYm~&GF^F?iEIDIh-Z3g>Z80%vj)=Rau(+lVit7_s#Y(XT)glK>z6PaC88UaC zNh#~Nkh4z;Md!3ocFhQs`>a6kg8}3S74LbT_FlkUAW+_m0`0vd(B8{J#XBdIJ(q>F z<*cx*3JSMcjN;r;_2lc`_pgG(^EX8&3id+L^%~@P9}rx1zy0udJq6qO3i<43{+sJiaL5r#&LET;;_J|^FwnAe z`Tmu)5uuC$>QJtzM7v`mjRD-FV#PfsQm#?4?23sc=cp{-9urg6h`47CiyMZZxHJ2M zNM~q?DRF>1pp>Q{v$hL+zO!P+HZJ7tQ$o=>B~b1ef%cpgDxPz409-w@0_{03&|c(% zKoj{Fg^CY%fE=Odx+uhrlftupkMP4uop|N2>dn`^|KuvVeH%r$4~lM|RP^|y`aKGl zh;(xAhoak$tArF>KFB$Jkg@IEG0Jfk9oxJSj3 zdsHmqhZo{D4{TfAux@g~vcUn5 zH5TcX(g@!Lr%8nG9)v0wlJQ zuswG}+OtH3jD1YVIVXgoYf>nC>H)k+J%Ddk#*6EDq2jwBRQ$-rI+umA`+|_NP76ya zzi_ifFMjj2=FeYzU%$wUvDaTf`TO_+Q1ZS4%CFy-$z#8icl#md@o=`w^}^T!y=gJFu{uhn+kF8T|09dR9pVIu})8DuXzQ8dey-v~Rmx{iCRP+UWU`1uuB3d~5Grr^H( zylY%2xyJ>{GbPa8X`$kuks^97;k4N7@$5V9V>gc0HnFYMUeuw`*c z>lPO*865CbYlFLLGu+TS;oF&SVD9b<@*UER`3-ount-LhEto^%hB+Xn>=7a3h`l0zQlN4A{TM)o@y`fM;4GmU zI43ZHSsDL%p%OskT~PF#6ZT9o@%f-b`0=#%FQ5L#RSt#zx)=ybL^`fPTtgC44uqf_ z3;|gK!8#!*`9o0f1tISZK-L?8wA%*>r&rpw*LQ;11r^34ia=cKr%^NV*ojpFyG z)E~a?_bE_kX{nTKqOMpAC&UGAY{FMDdqC-N7%Nx zr46eaRxB=9z}=v;!GmEd+#WK+bz2C&J^wvi#Sr%w;D<-caBF@Y?k{b_lhrsp-%P>M zZWh)OdDu*qU@KFCoot!>ESQij!!G`uI9-J8R30|t8Ccs*!U2s7a`ej99{G{v!pbHr zEUm-y8~5R$mg-T#bV1m;! zfoY){oFOp5b3!$U0cJ@63{eT57fRly zmBL{tBN!kY1`5|uBM3n%ha&iO38NJQLCE`pQq~)k(jGq~Tt3)ydSKh`flZqm)~y~` zu{dGD=zymqHh8GD!aa=%ZW-NhJunI1UA+m{ZapPGn#3PT{`hzqZarOryU*6({=x=4 zT-+p|34OY}4fCrzFpobI#>Yz=g!!d)m|t8aKZbmI^Bz2zx+pz1c%)}~m$Yp3NL#jm zxaSCoDMwgLyCP!F8xxD(m{{_Uie>+}SP4uBOmI?Qf+%065||PzL1bEFf-@r70ikn3 zH8d+QNN`r5{O5$UV_aMv_KFX>^=miUjDP>T-oNxJM1lv(kqDF{^#EaVFUj{|pu*%{ zhD6y%Dh5N4_Xi>C4?x=Mha?_JF0Zuf@WQsk3!8QitXo~MY;nP&(GJfIHh4N>g-2Qo z+|!uhuG$2*Tw%BoJqth1%)#|5}|2;k!@Z!M&&O;Mxs% zaQ+*3Ggi7G7Q1DC%+oquSbig9sY#aWk9X+pK zDMZ)sqFdk9+yFn@jgHjel`1}%vNVt6vcl%)1 z;gNP6Uf9B2VRyrZ)di~-CoGv9@WN<^XL>ovW1R&aY0dCJV}kn{Gu$7x$Y|vCfz|>K zhb?3mJ<;K>!I%zMG`V2ajNbsbr7b&#_e*hSKuWlRQpz2Yvfi*%@J7TEiVws@IxtGa zSAt_=B{VKpLK7rHgGy+Uz=WnmCNwQD;Te$x2%i-yp&7C4pB56fu(&wn5bt#9w6A;o zCtroBsh^c&(T#E}B9&t(J_1yIC1FU6I{P8Q7z<-arBDcp!64*=A;|fIknsl~g*(FI zlQ6`d%L}_M5A2X5$_-m~H>}%Tux@j~n#Bn#W(O>r9k67w!-B~H3r1Y+@WSYTMZ_e3 zec9}UHH(Xc+`u=34lnFF@#ynI-0hc=?tqkby(4+INY#Z2gBe_X{?QVjE1i*@~`odp!k}DJ1yz)0lo%hya6eT z-=XktG$xfX$XKKvA@Y)!5u`>8 zngj{eL*T9mLMar0VkiKGU;y$#Kjd(C_~^2+;`w z6P*w%kqNOJ921kyptz#7i4S{q|1vMS|JX%Mj2x@_t+~kRNh>FXZsC-$x$fUsnhq zJ}3qKgmNeVb7@|QW2vj6M#FZj4<*=Vn3i+XgD}D{{QFIu;j(@$M{2KiAbR+-3Bpc-qe4H^L=P#tZAh)UJbdWg{mD83#g7L^!`Kp2%O z(U?>YMWw7aAa0qR;?l7B-{#o)hc5Qq#9vX9u?=c6D$z1PPef$K!?+vBj-bawK#zr_ z%6JGPNF;ALp$s11mEtYMIJ-@EE{Oi8p{-sxSD)Mt`BDO(K#(J@)#4$A9=s-Vp%|A;}>EGAMt_FELZH z@G7qIy(9-fyhJ)#>r|)eYlErR*4LlkkMYw$%(xfmF%J}@&W&uqdhFlzi|oI|Rhjbt znx6JgGcy>#Phh4ofDf1{pCkwH)gOB$rY@S9^x$J)roF&S$@lw$e}d=F?jx mjGA)%4gS4e|NH-Yx&D7fbLB>o9yHVd0000 Date: Tue, 11 Mar 2025 11:19:10 +0900 Subject: [PATCH 080/117] Update Python API links for agents (#489) --- semantic-kernel/Frameworks/agent/agent-chat.md | 4 ++-- semantic-kernel/Frameworks/agent/assistant-agent.md | 4 ++-- semantic-kernel/Frameworks/agent/azure-ai-agent.md | 4 ++-- semantic-kernel/Frameworks/agent/chat-completion-agent.md | 3 +-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-chat.md b/semantic-kernel/Frameworks/agent/agent-chat.md index 5ada4e7c..34d1a18a 100644 --- a/semantic-kernel/Frameworks/agent/agent-chat.md +++ b/semantic-kernel/Frameworks/agent/agent-chat.md @@ -24,8 +24,8 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`agent_chat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_chat) -- [`agent_group_chat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_group_chat) +- [`AgentChat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_chat.agentchat) +- [`AgentGroupChat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_group_chat.agentgroupchat) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index aa888610..c6d8ca77 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -22,8 +22,8 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`azure_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.azure_assistant_agent) -- [`open_ai_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_agent) +- [`AzureAssistantAgent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.azure_assistant_agent.azureassistantagent) +- [`OpenAIAssistantAgent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_agent.openaiassistantagent) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 8e9ebf3f..bb70855c 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -17,13 +17,13 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-csharp" -- [`OpenAIAssistantAgent`](/dotnet/api/microsoft.semantickernel.agents.azureai) +- [`AzureAIAgent`](/dotnet/api/microsoft.semantickernel.agents.azureai) ::: zone-end ::: zone pivot="programming-language-python" -> Updated Semantic Kernel Python API Docs are coming soon. +- [`AzureAIAgent`](python/api/semantic-kernel/semantic_kernel.agents.azure_ai.azure_ai_agent.azureaiagent) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index c8d3b6d7..957dc3aa 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -25,8 +25,7 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`chat_completion_agent`](/python/api/semantic-kernel/semantic_kernel.agents.chat_completion.chat_completion_agent) -- [`chat_completion_client_base`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.chat_completion_client_base) +- [`ChatCompletionAgent`](/python/api/semantic-kernel/semantic_kernel.agents.chat_completion.chat_completion_agent.chatcompletionagent) ::: zone-end From 6b644946d6f52c5ba4ba368eb911ad9af7f42031 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Tue, 11 Mar 2025 12:28:40 +0900 Subject: [PATCH 081/117] Add missing forward slash (#491) --- semantic-kernel/Frameworks/agent/azure-ai-agent.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index bb70855c..6d6eabd7 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -23,7 +23,7 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`AzureAIAgent`](python/api/semantic-kernel/semantic_kernel.agents.azure_ai.azure_ai_agent.azureaiagent) +- [`AzureAIAgent`](/python/api/semantic-kernel/semantic_kernel.agents.azure_ai.azure_ai_agent.azureaiagent) ::: zone-end From 7c6ef5d0e08e8d5da5a191eede11318361929ca7 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Tue, 11 Mar 2025 12:31:03 +0900 Subject: [PATCH 082/117] Update Python API links for agents (#489) (#490) * Update Python API links for agents (#489) * Add missing forward slash (#491) --- semantic-kernel/Frameworks/agent/agent-chat.md | 4 ++-- semantic-kernel/Frameworks/agent/assistant-agent.md | 4 ++-- semantic-kernel/Frameworks/agent/azure-ai-agent.md | 4 ++-- semantic-kernel/Frameworks/agent/chat-completion-agent.md | 3 +-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-chat.md b/semantic-kernel/Frameworks/agent/agent-chat.md index 5ada4e7c..34d1a18a 100644 --- a/semantic-kernel/Frameworks/agent/agent-chat.md +++ b/semantic-kernel/Frameworks/agent/agent-chat.md @@ -24,8 +24,8 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`agent_chat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_chat) -- [`agent_group_chat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_group_chat) +- [`AgentChat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_chat.agentchat) +- [`AgentGroupChat`](/python/api/semantic-kernel/semantic_kernel.agents.group_chat.agent_group_chat.agentgroupchat) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index aa888610..c6d8ca77 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -22,8 +22,8 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`azure_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.azure_assistant_agent) -- [`open_ai_assistant_agent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_agent) +- [`AzureAssistantAgent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.azure_assistant_agent.azureassistantagent) +- [`OpenAIAssistantAgent`](/python/api/semantic-kernel/semantic_kernel.agents.open_ai.open_ai_assistant_agent.openaiassistantagent) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 8e9ebf3f..6d6eabd7 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -17,13 +17,13 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-csharp" -- [`OpenAIAssistantAgent`](/dotnet/api/microsoft.semantickernel.agents.azureai) +- [`AzureAIAgent`](/dotnet/api/microsoft.semantickernel.agents.azureai) ::: zone-end ::: zone pivot="programming-language-python" -> Updated Semantic Kernel Python API Docs are coming soon. +- [`AzureAIAgent`](/python/api/semantic-kernel/semantic_kernel.agents.azure_ai.azure_ai_agent.azureaiagent) ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index c8d3b6d7..957dc3aa 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -25,8 +25,7 @@ Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-python" -- [`chat_completion_agent`](/python/api/semantic-kernel/semantic_kernel.agents.chat_completion.chat_completion_agent) -- [`chat_completion_client_base`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.chat_completion_client_base) +- [`ChatCompletionAgent`](/python/api/semantic-kernel/semantic_kernel.agents.chat_completion.chat_completion_agent.chatcompletionagent) ::: zone-end From f96f665654452ebfab0b46b2769e62eacc9e8a16 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Tue, 11 Mar 2025 15:52:09 +0900 Subject: [PATCH 083/117] Update Python Chat Completion Agent sample code (#492) * Update Python Chat Completion Agent sample code * Updates --- .../Frameworks/agent/chat-completion-agent.md | 37 +++++++++++++------ 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 957dc3aa..6ec5ff91 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -36,7 +36,7 @@ Detailed API documentation related to this discussion is available at: ::: zone-end -## Chat Completion in _Semantic Kernel_ +## Chat Completion in Semantic Kernel [_Chat Completion_](../../concepts/ai-services/chat-completion/index.md) is fundamentally a protocol for a chat-based interaction with an AI model where the chat-history maintained and presented to the model with each request. _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) offer a unified framework for integrating the chat-completion capabilities of various AI models. @@ -48,7 +48,7 @@ For .NET, _chat-completion_ AI Services are based on the [`IChatCompletionServic For .NET, some of AI services that support models with chat-completion include: -Model|_Semantic Kernel_ AI Service +Model|Semantic Kernel AI Service --|-- Azure OpenAI|[`Microsoft.SemanticKernel.Connectors.AzureOpenAI`](/dotnet/api/microsoft.semantickernel.connectors.azureopenai) Gemini|[`Microsoft.SemanticKernel.Connectors.Google`](/dotnet/api/microsoft.semantickernel.connectors.google) @@ -61,8 +61,8 @@ Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantic ::: zone pivot="programming-language-python" -- [`azure_chat_completion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion) -- [`open_ai_chat_completion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion) +- [`AzureChatCompletion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion.azurechatcompletion) +- [`OpenAIChatCompletion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion.openaichatcompletion) ::: zone-end @@ -106,7 +106,7 @@ pip install semantic-kernel ## Creating a `ChatCompletionAgent` -A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. +A `ChatCompletionAgent` is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating a `ChatCompletionAgent` starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. ::: zone pivot="programming-language-csharp" ```csharp @@ -129,20 +129,35 @@ ChatCompletionAgent agent = ::: zone-end ::: zone pivot="programming-language-python" +There are two ways to create a `ChatCompletionAgent`: + +### 1. By providing the chat completion service directly: + ```python -# Define the Kernel +# Create the agent by directly providing the chat completion service +agent = ChatCompletionAgent( + service=AzureChatCompletion(), # your chat completion service instance + name="", + instructions="", +) +``` +### 2. By creating a Kernel first, adding the service to it, then providing the kernel: + +```python +# Define the kernel kernel = Kernel() -# Add the AzureChatCompletion AI Service to the Kernel +# Add the chat completion service to the kernel kernel.add_service(AzureChatCompletion()) -# Create the agent +# Create the agent using the kernel agent = ChatCompletionAgent( kernel=kernel, name="", instructions="", ) ``` +The first method is useful when you already have a chat completion service ready. The second method is beneficial when you need a kernel that manages multiple services or additional functionalities. ::: zone-end ::: zone pivot="programming-language-java" @@ -154,9 +169,9 @@ agent = ChatCompletionAgent( ## AI Service Selection -No different from using _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a _service-selector_. A _service-selector_ identifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. +No different from using Semantic Kernel [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a service-selector. A service-selector identifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. -> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no _service-selector_ is provided, the same _default_ logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` +> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no service-selector is provided, the same default logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` ::: zone pivot="programming-language-csharp" ```csharp @@ -220,7 +235,7 @@ agent = ChatCompletionAgent( ::: zone pivot="programming-language-csharp" -Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a _Chat Completion_ [AI service](../../concepts/ai-services/index.md). +Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a Chat Completion [AI service](../../concepts/ai-services/index.md). ```csharp // Define agent From b96f06b9e5f214518d9bcb8e6030bced11a3baf4 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:05:59 +0900 Subject: [PATCH 084/117] Merge main to live - agent framework updates. (#493) * Update Python API links for agents (#489) * Add missing forward slash (#491) * Update Python Chat Completion Agent sample code (#492) * Update Python Chat Completion Agent sample code * Updates --- .../Frameworks/agent/chat-completion-agent.md | 37 +++++++++++++------ 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 957dc3aa..6ec5ff91 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -36,7 +36,7 @@ Detailed API documentation related to this discussion is available at: ::: zone-end -## Chat Completion in _Semantic Kernel_ +## Chat Completion in Semantic Kernel [_Chat Completion_](../../concepts/ai-services/chat-completion/index.md) is fundamentally a protocol for a chat-based interaction with an AI model where the chat-history maintained and presented to the model with each request. _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) offer a unified framework for integrating the chat-completion capabilities of various AI models. @@ -48,7 +48,7 @@ For .NET, _chat-completion_ AI Services are based on the [`IChatCompletionServic For .NET, some of AI services that support models with chat-completion include: -Model|_Semantic Kernel_ AI Service +Model|Semantic Kernel AI Service --|-- Azure OpenAI|[`Microsoft.SemanticKernel.Connectors.AzureOpenAI`](/dotnet/api/microsoft.semantickernel.connectors.azureopenai) Gemini|[`Microsoft.SemanticKernel.Connectors.Google`](/dotnet/api/microsoft.semantickernel.connectors.google) @@ -61,8 +61,8 @@ Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantic ::: zone pivot="programming-language-python" -- [`azure_chat_completion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion) -- [`open_ai_chat_completion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion) +- [`AzureChatCompletion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion.azurechatcompletion) +- [`OpenAIChatCompletion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion.openaichatcompletion) ::: zone-end @@ -106,7 +106,7 @@ pip install semantic-kernel ## Creating a `ChatCompletionAgent` -A _chat completion agent_ is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating an _chat completion agent_ starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. +A `ChatCompletionAgent` is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating a `ChatCompletionAgent` starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. ::: zone pivot="programming-language-csharp" ```csharp @@ -129,20 +129,35 @@ ChatCompletionAgent agent = ::: zone-end ::: zone pivot="programming-language-python" +There are two ways to create a `ChatCompletionAgent`: + +### 1. By providing the chat completion service directly: + ```python -# Define the Kernel +# Create the agent by directly providing the chat completion service +agent = ChatCompletionAgent( + service=AzureChatCompletion(), # your chat completion service instance + name="", + instructions="", +) +``` +### 2. By creating a Kernel first, adding the service to it, then providing the kernel: + +```python +# Define the kernel kernel = Kernel() -# Add the AzureChatCompletion AI Service to the Kernel +# Add the chat completion service to the kernel kernel.add_service(AzureChatCompletion()) -# Create the agent +# Create the agent using the kernel agent = ChatCompletionAgent( kernel=kernel, name="", instructions="", ) ``` +The first method is useful when you already have a chat completion service ready. The second method is beneficial when you need a kernel that manages multiple services or additional functionalities. ::: zone-end ::: zone pivot="programming-language-java" @@ -154,9 +169,9 @@ agent = ChatCompletionAgent( ## AI Service Selection -No different from using _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a _service-selector_. A _service-selector_ identifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. +No different from using Semantic Kernel [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a service-selector. A service-selector identifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. -> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no _service-selector_ is provided, the same _default_ logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` +> Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no service-selector is provided, the same default logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` ::: zone pivot="programming-language-csharp" ```csharp @@ -220,7 +235,7 @@ agent = ChatCompletionAgent( ::: zone pivot="programming-language-csharp" -Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a _Chat Completion_ [AI service](../../concepts/ai-services/index.md). +Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a Chat Completion [AI service](../../concepts/ai-services/index.md). ```csharp // Define agent From 28f8c3ecfe85177854d0df074d5d8ed953ab7e08 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Tue, 11 Mar 2025 12:00:41 +0000 Subject: [PATCH 085/117] Apply suggestions from code review Co-authored-by: Dmytro Struk <13853051+dmytrostruk@users.noreply.github.com> --- .../concepts/vector-store-connectors/hybrid-search.md | 2 +- semantic-kernel/support/migration/vectorstore-march-2025.md | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md index a11df0af..f962d91c 100644 --- a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md @@ -21,7 +21,7 @@ Currently the type of hybrid search supported is based on a vector search, plus are returned. Sparse vector based hybrid search is not currently supported. To execute a hybrid search, your database schema needs to have a vector field and a string field with full text search capabilities enabled. -If you are creating a collection using the semantic kernel vector storage connectors, make sure to enable the `IsFullTextSearchable` option +If you are creating a collection using the Semantic Kernel vector storage connectors, make sure to enable the `IsFullTextSearchable` option on the string field that you want to target for the keyword search. > [!TIP] diff --git a/semantic-kernel/support/migration/vectorstore-march-2025.md b/semantic-kernel/support/migration/vectorstore-march-2025.md index 18e0c56c..dececefe 100644 --- a/semantic-kernel/support/migration/vectorstore-march-2025.md +++ b/semantic-kernel/support/migration/vectorstore-march-2025.md @@ -12,7 +12,7 @@ ms.service: semantic-kernel # Vector Store changes - March 2025 -## Linq based filtering +## LINQ based filtering When doing vector searches it is possible to create a filter (in addition to the vector similarity) that act on data properties to constrain the list of records matched. @@ -125,7 +125,6 @@ These parameters were all optional and the options classes did not contain any o If you were passing these options in the past, you will need to remove these with this update. ```csharp - // Before collection.DeleteAsync("mykey", new DeleteRecordOptions(), cancellationToken); From 37966851d46d21b27b16c7ecfb1d0a1e50bc23fd Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 12:05:04 +0100 Subject: [PATCH 086/117] added faiss page and updated pinecone --- .../out-of-the-box-connectors/faiss.md | 121 ++++++++++++++ .../out-of-the-box-connectors/index.md | 8 +- .../pinecone-connector.md | 154 ++++++++++++++++-- 3 files changed, 264 insertions(+), 19 deletions(-) create mode 100644 semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md new file mode 100644 index 00000000..21ddf37c --- /dev/null +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md @@ -0,0 +1,121 @@ +--- +title: Using the Semantic Kernel Faiss Vector Store connector (Preview) +description: Contains information on how to use a Semantic Kernel Vector store connector to access and manipulate data in an in-memory Faiss vector store. +zone_pivot_groups: programming-languages +author: eavanvalkenburg +ms.topic: conceptual +ms.author: edvan +ms.date: 03/13/2025 +ms.service: semantic-kernel +--- +# Using the Faiss connector (Preview) + +> [!WARNING] +> The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. + +::: zone pivot="programming-language-csharp" + +## Not supported at this time + +::: zone-end +::: zone pivot="programming-language-python" + +## Overview + +The Faiss Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses no external database and stores data in memory and vectors in a Faiss Index. +This Vector Store is useful for prototyping scenarios or where high-speed in-memory operations are required. + +The connector has the following characteristics. + +| Feature Area | Support | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | In-memory and Faiss indexes dictionary | +| Supported key property types | Any that is allowed to be a dict key, see the python documentation for details [here](https://docs.python.org/3/library/stdtypes.html#typesmapping) | +| Supported data property types | Any type | +| Supported vector property types |

  • list[float \| int]
  • numpy array
  • | +| Supported index types | Flat (see [custom indexes](#custom-indexes)) | +| Supported distance functions |
  • Dot Product Similarity
  • Euclidean Squared Distance
  • | +| Supports multiple vectors in a record | Yes | +| is_filterable supported? | Yes | +| is_full_text_searchable supported? | Yes | + +## Getting started + +Add the Semantic Kernel package to your project. + +```cmd +pip install semantic-kernel[faiss] +``` + +In the snippets below, it is assumed that you have a data model class defined named 'DataModel'. + +```python +from semantic_kernel.connectors.memory.faiss import FaissStore + +vector_store = FaissStore() +vector_collection = vector_store.get_collection("collection_name", DataModel) +``` + +It is possible to construct a direct reference to a named collection. + +```python +from semantic_kernel.connectors.memory.faiss import FaissCollection + +vector_collection = FaissCollection("collection_name", DataModel) +``` + +## Custom indexes + +The Faiss connector is limited to the Flat index type. + +Given the complexity of Faiss indexes, you are free to create your own index(es), including building the faiss-gpu package and using indexes from that. When doing this, any metrics defined on a vector field is ignored. If you have multiple vectors in your datamodel, you can pass in custom indexes only for the ones you want and let the built-in indexes be created, with a flat index and the metric defined in the model. + +Important to note, if the index requires training, then make sure to do that as well, whenever we use the index, a check is done on the `is_trained` attribute of the index. + +The index is always available (custom or built-in) in the `indexes` property of the collection. You can use this to get the index and do any operations you want on it, so you can do training afterwards, just make sure to do that before you want to use any CRUD against it. + +To pass in your custom index, use either: + +```python + +import faiss + +from semantic_kernel.connectors.memory.faiss import FaissCollection + +index = faiss.IndexHNSW(d=768, M=16, efConstruction=200) # or some other index +vector_collection = FaissCollection( + collection_name="collection_name", + data_model_type=DataModel, + indexes={"vector_field_name": index} +) +``` + +or: + +```python + +import faiss + +from semantic_kernel.connectors.memory.faiss import FaissCollection + +index = faiss.IndexHNSW(d=768, M=16, efConstruction=200) # or some other index +vector_collection = FaissCollection( + collection_name="collection_name", + data_model_type=DataModel, +) +await vector_collection.create_collection( + indexes={"vector_field_name": index} +) +# or when you have only one vector field: +await vector_collection.create_collection( + index=index +) + +``` + +::: zone-end +::: zone pivot="programming-language-java" + +## Not supported at this time + +::: zone-end diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index ef0c44cc..8a419e3b 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -52,15 +52,15 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma | [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Elasticsearch](./elasticsearch-connector.md) | Planned | | | -| Faiss | Planned | | | +| [Faiss](./faiss.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | | [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Pinecone](./pinecone-connector.md) | Planned | ✅ | Microsoft Semantic Kernel Project | +| [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| SQL Server | Planned | ✅ | Microsoft Semantic Kernel Project | -| SQLite | Planned | ✅ | Microsoft Semantic Kernel Project | +| SQL Server | Planned | | Microsoft Semantic Kernel Project | +| SQLite | Planned | | Microsoft Semantic Kernel Project | | [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index 2be8a72d..e2f6b80f 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -19,20 +19,20 @@ ms.service: semantic-kernel The Pinecone Vector Store connector can be used to access and manage data in Pinecone. The connector has the following characteristics. -| Feature Area | Support | -|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Collection maps to | Pinecone serverless Index | -| Supported key property types | string | -| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • decimal
    • *enumerables of type* string
    | -| Supported vector property types | ReadOnlyMemory\ | -| Supported index types | PGA (Pinecone Graph Algorithm) | -| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | -| Supported filter clauses |
    • EqualTo
    | -| Supports multiple vectors in a record | No | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | No | -| StoragePropertyName supported? | Yes | -| HybridSearch supported? | No | +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| Collection maps to | Pinecone serverless Index | +| Supported key property types | string | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • decimal
    • *enumerables of type* string
    | +| Supported vector property types | ReadOnlyMemory\ | +| Supported index types | PGA (Pinecone Graph Algorithm) | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | +| Supported filter clauses |
    • EqualTo
    | +| Supports multiple vectors in a record | No | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | No | +| StoragePropertyName supported? | Yes | +| HybridSearch supported? | No | ## Getting started @@ -175,7 +175,131 @@ public class Hotel ::: zone-end ::: zone pivot="programming-language-python" -The Pinecone connector is not yet available in Python. +## Overview + +The Pinecone Vector Store connector can be used to access and manage data in Pinecone. The connector has the following characteristics. + +| Feature Area | Support | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Pinecone serverless Index | +| Supported key property types | string | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and iterables of each of these types*
    | +| Supported vector property types |
  • list[float \| int]
  • numpy array
  • | +| Supported index types | PGA (Pinecone Graph Algorithm) | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | +| Supported filter clauses |
    • EqualTo
    • AnyTagEqualTo
    | +| Supports multiple vectors in a record | No | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | No | +| Integrated Embeddings supported? | Yes, see [here](#integrated-embeddings) | +| GRPC Supported? | Yes, see [here](#grpc-support) | + +## Getting started + +Add the Pinecone Vector Store connector extra to your project. + +```bash +pip install semantic-kernel[pinecone] +``` + +You can then create a PineconeStore instance and use it to create a collection. +This will read the Pinecone API key from the environment variable `PINECONE_API_KEY`. + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeStore + +store = PineconeStore() +collection = store.get_collection(collection_name="collection_name", data_model=DataModel) +``` + +It is possible to construct a direct reference to a named collection. + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeCollection + +collection = PineconeCollection(collection_name="collection_name", data_model=DataModel) +``` + +You can also create your own Pinecone client and pass it into the constructor. +The client needs to be either `PineconeAsyncio` or `PineconeGRCP` (see [GRCP Support](#grpc-support)). + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeStore, PineconeCollection +from pinecone import PineconeAsyncio + +client = PineconeAsyncio(api_key="your_api_key") +store = PineconeStore(client=client) +collection = store.get_collection(collection_name="collection_name", data_model=DataModel) +``` + +### GRPC support + +We also support two options on the collection constructor, the first is to enable GRPC support: + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeCollection + +collection = PineconeCollection(collection_name="collection_name", data_model=DataModel, use_grpc=True) +``` + +Or with your own client: + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeStore +from pinecone.grcp import PineconeGRPC + +client = PineconeGRPC(api_key="your_api_key") +store = PineconeStore(client=client) +collection = store.get_collection(collection_name="collection_name", data_model=DataModel) +``` + +### Integrated Embeddings + +The second is to use the integrated embeddings of Pinecone, this will check for a environment variable for the model to use for that, see [Pinecone docs](https://docs.pinecone.io/guides/indexes/create-an-index) and then the `Use integrated embeddings` sections. + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeCollection + +collection = PineconeCollection(collection_name="collection_name", data_model=DataModel) +``` + +Alternatively, when not settings the environment variable, you can pass the embed settings into the constructor: + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeCollection + +collection = PineconeCollection(collection_name="collection_name", data_model=DataModel, embed_settings={"model": "multilingual-e5-large"}) +``` + +This can include other details about the vector setup, like metric and field mapping. +You can also pass the embed settings into the `create_collection` method, this will override the default settings set during initialization. + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeCollection + +collection = PineconeCollection(collection_name="collection_name", data_model=DataModel) +await collection.create_collection(embed_settings={"model": "multilingual-e5-large"}) +``` + +> Important: GRCP and Integrated embeddings cannot be used together. + +## Index Namespace + +The Vector Store abstraction does not support a multi tiered record grouping mechanism. Collections in the abstraction map to a Pinecone serverless index +and no second level exists in the abstraction. Pinecone does support a second level of grouping called namespaces. + +By default the Pinecone connector will pass `''` as the namespace for all operations. However it is possible to pass a single namespace to the +Pinecone collection when constructing it and use this instead for all operations. + +```python +from semantic_kernel.connectors.memory.pinecone import PineconeCollection + +collection = PineconeCollection( + collection_name="collection_name", + data_model=DataModel, + namespace="seasidehotels" +) +``` ::: zone-end ::: zone pivot="programming-language-java" From 8dde11ed21572ea89dbb7adf595dd4722cd18953 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 12:10:23 +0100 Subject: [PATCH 087/117] toc and some edits --- .../vector-store-connectors/out-of-the-box-connectors/TOC.yml | 2 ++ .../out-of-the-box-connectors/{faiss.md => faiss-connector.md} | 2 +- .../vector-store-connectors/out-of-the-box-connectors/index.md | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) rename semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/{faiss.md => faiss-connector.md} (96%) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml index de9c5f57..3d1bc626 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml @@ -12,6 +12,8 @@ href: couchbase-connector.md - name: Elasticsearch connector href: elasticsearch-connector.md +- name: FAISS connector + href: faiss-connector.md - name: In-memory connector href: inmemory-connector.md - name: JDBC connector diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md similarity index 96% rename from semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md rename to semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md index 21ddf37c..de606f87 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md @@ -22,7 +22,7 @@ ms.service: semantic-kernel ## Overview -The Faiss Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses no external database and stores data in memory and vectors in a Faiss Index. +The Faiss Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses no external database and stores data in memory and vectors in a Faiss Index. It uses the [`InMemoryVectorCollection`](./inmemory-connector.md) for the other parts of the records, while using the Faiss indexes for search. This Vector Store is useful for prototyping scenarios or where high-speed in-memory operations are required. The connector has the following characteristics. diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 8a419e3b..8d6dca7d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -52,7 +52,7 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma | [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Elasticsearch](./elasticsearch-connector.md) | Planned | | | -| [Faiss](./faiss.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | | [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | From b78eb1549b8e2811c252f6f006c0cc2c8898ba85 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 12:15:57 +0100 Subject: [PATCH 088/117] added faiss link --- .../vector-store-connectors/out-of-the-box-connectors/TOC.yml | 2 +- .../out-of-the-box-connectors/faiss-connector.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml index 3d1bc626..161e8b58 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml @@ -12,7 +12,7 @@ href: couchbase-connector.md - name: Elasticsearch connector href: elasticsearch-connector.md -- name: FAISS connector +- name: Faiss connector href: faiss-connector.md - name: In-memory connector href: inmemory-connector.md diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md index de606f87..fe077489 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md @@ -22,7 +22,7 @@ ms.service: semantic-kernel ## Overview -The Faiss Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses no external database and stores data in memory and vectors in a Faiss Index. It uses the [`InMemoryVectorCollection`](./inmemory-connector.md) for the other parts of the records, while using the Faiss indexes for search. +The [Faiss](https://github.com/facebookresearch/faiss) Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses no external database and stores data in memory and vectors in a Faiss Index. It uses the [`InMemoryVectorCollection`](./inmemory-connector.md) for the other parts of the records, while using the Faiss indexes for search. This Vector Store is useful for prototyping scenarios or where high-speed in-memory operations are required. The connector has the following characteristics. From 5aba9e0214761a35c79497ad56d5a4082ae65334 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 12:18:14 +0100 Subject: [PATCH 089/117] fix GRPC abbr --- .../out-of-the-box-connectors/pinecone-connector.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index e2f6b80f..11bd0404 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -221,7 +221,7 @@ collection = PineconeCollection(collection_name="collection_name", data_model=Da ``` You can also create your own Pinecone client and pass it into the constructor. -The client needs to be either `PineconeAsyncio` or `PineconeGRCP` (see [GRCP Support](#grpc-support)). +The client needs to be either `PineconeAsyncio` or `PineconeGRPC` (see [GRPC Support](#grpc-support)). ```python from semantic_kernel.connectors.memory.pinecone import PineconeStore, PineconeCollection @@ -246,7 +246,7 @@ Or with your own client: ```python from semantic_kernel.connectors.memory.pinecone import PineconeStore -from pinecone.grcp import PineconeGRPC +from pinecone.grpc import PineconeGRPC client = PineconeGRPC(api_key="your_api_key") store = PineconeStore(client=client) @@ -281,7 +281,7 @@ collection = PineconeCollection(collection_name="collection_name", data_model=Da await collection.create_collection(embed_settings={"model": "multilingual-e5-large"}) ``` -> Important: GRCP and Integrated embeddings cannot be used together. +> Important: GRPC and Integrated embeddings cannot be used together. ## Index Namespace From 92351738c7f5bde71e1264ed8374eb2b37f9c798 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 12:20:49 +0100 Subject: [PATCH 090/117] added support matrix for dotnet --- .../out-of-the-box-connectors/pinecone-connector.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index 11bd0404..05426ac3 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -33,6 +33,8 @@ The Pinecone Vector Store connector can be used to access and manage data in Pin | IsFullTextSearchable supported? | No | | StoragePropertyName supported? | Yes | | HybridSearch supported? | No | +| Integrated Embeddings supported? | No | +| GRPC Supported? | No | ## Getting started From 9c494530df7cb50f608d6dc1db1e46c4cb0c3142 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 12:23:04 +0100 Subject: [PATCH 091/117] removed grpc from dotnet --- .../out-of-the-box-connectors/pinecone-connector.md | 1 - 1 file changed, 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index 05426ac3..0df8817d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -34,7 +34,6 @@ The Pinecone Vector Store connector can be used to access and manage data in Pin | StoragePropertyName supported? | Yes | | HybridSearch supported? | No | | Integrated Embeddings supported? | No | -| GRPC Supported? | No | ## Getting started From 5f4148ac4a6f985a0809f7e56d79cf5a551024de Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 14:58:02 +0100 Subject: [PATCH 092/117] clarified embedding setup --- .../out-of-the-box-connectors/pinecone-connector.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index 0df8817d..1a2a6fe2 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -256,7 +256,9 @@ collection = store.get_collection(collection_name="collection_name", data_model= ### Integrated Embeddings -The second is to use the integrated embeddings of Pinecone, this will check for a environment variable for the model to use for that, see [Pinecone docs](https://docs.pinecone.io/guides/indexes/create-an-index) and then the `Use integrated embeddings` sections. +The second is to use the integrated embeddings of Pinecone, this will check for a environment variable called `PINECONE_EMBED_MODEL` with the model name, or you can pass in a `embed_settings` dict, which can contain just the model key, or the full settings for the embedding model. In the former case, the other settings will be derived from the data model definition. + +See [Pinecone docs](https://docs.pinecone.io/guides/indexes/create-an-index) and then the `Use integrated embeddings` sections. ```python from semantic_kernel.connectors.memory.pinecone import PineconeCollection From 6136683f94c055dab1c785aef2300e118e21142b Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Thu, 13 Mar 2025 15:43:06 +0100 Subject: [PATCH 093/117] updated all pages --- .../azure-ai-search-connector.md | 4 +- .../azure-cosmosdb-mongodb-connector.md | 2 +- .../azure-cosmosdb-nosql-connector.md | 181 ++++++++++++++++-- .../chroma-connector.md | 24 +-- .../faiss-connector.md | 4 +- .../pinecone-connector.md | 2 +- 6 files changed, 183 insertions(+), 34 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md index dc490281..869374b0 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-ai-search-connector.md @@ -42,9 +42,9 @@ The Azure AI Search Vector Store connector can be used to access and manage data | Collection maps to | Azure AI Search Index | | Supported key property types | string | | Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • DateTimeOffset
    • *and iterables of each of these types*
    | -| Supported vector property types | list[float], list[int], ndarray | +| Supported vector property types |
    • list[float]
    • list[int]
    • numpy array
    | | Supported index types |
    • Hnsw
    • Flat
    | -| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    • Hamming
    | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    • Hamming
    | | Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | | Supports multiple vectors in a record | Yes | | IsFilterable supported? | Yes | diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md index b0fcf275..6af3c29d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-mongodb-connector.md @@ -42,7 +42,7 @@ The Azure CosmosDB MongoDB Vector Store connector can be used to access and mana | Collection maps to | Azure Cosmos DB MongoDB (vCore) Collection + Index | | Supported key property types | string | | Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and iterables of each of these types*
    | -| Supported vector property types |
    • list[float]
    • list[int]
  • ndarray
  • | +| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | | Supported index types |
    • Hnsw
    • IvfFlat
    | | Supported distance functions |
    • CosineDistance
    • DotProductSimilarity
    • EuclideanDistance
    | | Supported filter clauses |
    • EqualTo
    • AnyTagsEqualTo
    | diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md index 2136964d..4b9807ce 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/azure-cosmosdb-nosql-connector.md @@ -19,20 +19,20 @@ ms.service: semantic-kernel The Azure CosmosDB NoSQL Vector Store connector can be used to access and manage data in Azure CosmosDB NoSQL. The connector has the following characteristics. -| Feature Area | Support | -|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Collection maps to | Azure Cosmos DB NoSQL Container | -| Supported key property types |
    • string
    • AzureCosmosDBNoSQLCompositeKey
    | -| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • DateTimeOffset
    • *and enumerables of each of these types*
    | -| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | -| Supported index types |
    • Flat
    • QuantizedFlat
    • DiskAnn
    | -| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    | -| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | -| Supports multiple vectors in a record | Yes | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | Yes | -| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | -| HybridSearch supported? | Yes | +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure Cosmos DB NoSQL Container | +| Supported key property types |
    • string
    • AzureCosmosDBNoSQLCompositeKey
    | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • DateTimeOffset
    • *and enumerables of each of these types*
    | +| Supported vector property types |
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    • ReadOnlyMemory\
    | +| Supported index types |
    • Flat
    • QuantizedFlat
    • DiskAnn
    | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | +| StoragePropertyName supported? | No, use `JsonSerializerOptions` and `JsonPropertyNameAttribute` instead. [See here for more info.](#data-mapping) | +| HybridSearch supported? | Yes | ## Limitations @@ -248,9 +248,158 @@ var record = await collection.GetAsync(new AzureCosmosDBNoSQLCompositeKey("hotel ::: zone-end ::: zone pivot="programming-language-python" -## Coming soon +## Overview -More info coming soon. +The Azure CosmosDB NoSQL Vector Store connector can be used to access and manage data in Azure CosmosDB NoSQL. The connector has the following characteristics. + +| Feature Area | Support | +| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Collection maps to | Azure Cosmos DB NoSQL Container | +| Supported key property types |
    • string
    • AzureCosmosDBNoSQLCompositeKey
    | +| Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • bool
    • DateTimeOffset
    • *and iterables of each of these types*
    | +| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | +| Supported index types |
    • Flat
    • QuantizedFlat
    • DiskAnn
    | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | Yes | +| is_filterable supported? | Yes | +| is_full_text_searchable supported? | Yes | +| HybridSearch supported? | No | + +## Getting started + +Add the Azure extra package to your project. + +```bash +pip install semantic-kernel[azure] +``` + +Next you can create a Azure CosmosDB NoSQL Vector Store instance directly. This reads certain environment variables to configure the connection to Azure CosmosDB NoSQL: + +- AZURE_COSMOS_DB_NO_SQL_URL +- AZURE_COSMOS_DB_NO_SQL_DATABASE_NAME + +And optionally: + +- AZURE_COSMOS_DB_NO_SQL_KEY + +When this is not set, a `AsyncDefaultAzureCredential` is used to authenticate. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLStore + +vector_store = AzureCosmosDBNoSQLStore() +``` + +You can also supply these values in the constructor: + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLStore + +vector_store = AzureCosmosDBNoSQLStore( + url="https://.documents.azure.com:443/", + key="", + database_name="" +) +``` + +And you can pass in a CosmosClient instance, just make sure it is a async client. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLStore +from azure.cosmos.aio import CosmosClient + +client = CosmosClient( + url="https://.documents.azure.com:443/", + credential="" or AsyncDefaultAzureCredential() +) +vector_store = AzureCosmosDBNoSQLStore( + client=client, + database_name="" +) +``` + +The next step needs a data model, a variable called Hotels is used in the example below. + +With a store, you can get a collection: + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLStore + +vector_store = AzureCosmosDBNoSQLStore() +collection = vector_store.get_collection(collection_name="skhotels", data_model=Hotel) +``` + +It is possible to construct a direct reference to a named collection, this uses the same environment variables as above. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLCollection + +collection = AzureCosmosDBNoSQLCollection( + collection_name="skhotels", + data_model_type=Hotel, +) +``` + +## Using partition key + +In the Azure Cosmos DB for NoSQL connector, the partition key property defaults to the key property - `id`. You can also supply a value for the partition key in the constructor. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLCollection + +collection = AzureCosmosDBNoSQLCollection( + collection_name="skhotels", + data_model_type=Hotel, + partition_key="hotel_name" +) +``` + +This can be a more complex key, when using the `PartitionKey` object: + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLCollection +from azure.cosmos import PartitionKey + +partition_key = PartitionKey(path="/hotel_name") +collection = AzureCosmosDBNoSQLCollection( + collection_name="skhotels", + data_model_type=Hotel, + partition_key=partition_key +) +``` + +The `AzureCosmosDBNoSQLVectorStoreRecordCollection` class supports two key types: `string` and `AzureCosmosDBNoSQLCompositeKey`. The `AzureCosmosDBNoSQLCompositeKey` consists of `key` and `partition_key`. + +If the partition key property is not set (and the default key property is used), `string` keys can be used for operations with database records. However, if a partition key property is specified, it is recommended to use `AzureCosmosDBNoSQLCompositeKey` to provide both the key and partition key values to the `get` and `delete` methods. + +```python +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLCollection +from semantic_kernel.connectors.memory.azure_cosmos_db import AzureCosmosDBNoSQLCompositeKey +from semantic_kernel.data import VectorStoreRecordDataField + +@vectorstoremodel +class data_model_type: + id: Annotated[str, VectorStoreRecordKeyField] + product_type: Annotated[str, VectorStoreRecordDataField()] + ... + +collection = store.get_collection( + collection_name=collection_name, + data_model=data_model_type, + partition_key=PartitionKey(path="/product_type"), +) + +# when there is data in the collection +composite_key = AzureCosmosDBNoSQLCompositeKey( + key='key value', partition_key='partition key value' +) +# get a record, with the partition key +record = await collection.get(composite_key) + +# or delete +await collection.delete(composite_key) +``` ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md index 17dc15cc..9cfc7ee8 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/chroma-connector.md @@ -28,18 +28,18 @@ Not supported. The Chroma Vector Store connector can be used to access and manage data in Chroma. The connector has the following characteristics. -| Feature Area | Support | -| ------------------------------------- | ------------------------------------------------------------------------------------------------- | -| Collection maps to | Chroma collection | -| Supported key property types | string | -| Supported data property types | All types that are supported by System.Text.Json (either built-in or by using a custom converter) | -| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | -| Supported index types |
    • HNSW
    | -| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | -| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | -| Supports multiple vectors in a record | No | -| IsFilterable supported? | Yes | -| IsFullTextSearchable supported? | Yes | +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------------ | +| Collection maps to | Chroma collection | +| Supported key property types | string | +| Supported data property types | All types | +| Supported vector property types |
    • list[float]
    • list[int]
    • ndarray
    | +| Supported index types |
    • HNSW
    | +| Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | +| Supported filter clauses |
    • AnyTagEqualTo
    • EqualTo
    | +| Supports multiple vectors in a record | No | +| IsFilterable supported? | Yes | +| IsFullTextSearchable supported? | Yes | ## Limitations diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md index fe077489..7fc2c4a2 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/faiss-connector.md @@ -32,9 +32,9 @@ The connector has the following characteristics. | Collection maps to | In-memory and Faiss indexes dictionary | | Supported key property types | Any that is allowed to be a dict key, see the python documentation for details [here](https://docs.python.org/3/library/stdtypes.html#typesmapping) | | Supported data property types | Any type | -| Supported vector property types |
  • list[float \| int]
  • numpy array
  • | +| Supported vector property types |
    • list[float]
    • list[int]
    • numpy array
    | | Supported index types | Flat (see [custom indexes](#custom-indexes)) | -| Supported distance functions |
  • Dot Product Similarity
  • Euclidean Squared Distance
  • | +| Supported distance functions |
    • Dot Product Similarity
    • Euclidean Squared Distance
    | | Supports multiple vectors in a record | Yes | | is_filterable supported? | Yes | | is_full_text_searchable supported? | Yes | diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md index 1a2a6fe2..cbd94d3b 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/pinecone-connector.md @@ -185,7 +185,7 @@ The Pinecone Vector Store connector can be used to access and manage data in Pin | Collection maps to | Pinecone serverless Index | | Supported key property types | string | | Supported data property types |
    • string
    • int
    • long
    • double
    • float
    • decimal
    • bool
    • DateTime
    • *and iterables of each of these types*
    | -| Supported vector property types |
  • list[float \| int]
  • numpy array
  • | +| Supported vector property types |
    • list[float]
    • list[int]
    • numpy array
    | | Supported index types | PGA (Pinecone Graph Algorithm) | | Supported distance functions |
    • CosineSimilarity
    • DotProductSimilarity
    • EuclideanSquaredDistance
    | | Supported filter clauses |
    • EqualTo
    • AnyTagEqualTo
    | From 354c5d92a7c20458babac20d83098ffe25a69192 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 21 Mar 2025 14:31:57 +0100 Subject: [PATCH 094/117] SQL Server Connector --- .../out-of-the-box-connectors/TOC.yml | 2 + .../out-of-the-box-connectors/index.md | 34 ++--- .../sql-connector.md | 121 ++++++++++++++++++ 3 files changed, 140 insertions(+), 17 deletions(-) create mode 100644 semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml index 161e8b58..4822cb9d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml @@ -28,6 +28,8 @@ href: qdrant-connector.md - name: Redis connector href: redis-connector.md +- name: SQL Server connector + href: sql-connector.md - name: SQLite connector href: sqlite-connector.md - name: Volatile (in-memory) connector diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 8d6dca7d..317cc1ec 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -45,23 +45,23 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma ::: zone-end ::: zone pivot="programming-language-python" -| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | -| ------------------------------------------------------------------ | :-----: | :---------------------------: | :-------------------------------: | -| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | -| [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | -| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| SQL Server | Planned | | Microsoft Semantic Kernel Project | -| SQLite | Planned | | Microsoft Semantic Kernel Project | -| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------------------------ | :-----: | :----------------------------------------: | :-------------------------------: | +| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | +| [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [SQL Server](./sql-connector.md) | ✅ | [pyodbc](https://pypi.org/project/pyodbc/) | Microsoft Semantic Kernel Project | +| SQLite | Planned | | Microsoft Semantic Kernel Project | +| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md new file mode 100644 index 00000000..cab80687 --- /dev/null +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md @@ -0,0 +1,121 @@ +--- +title: Using the Semantic Kernel SQL Server Vector Store connector (Preview) +description: Contains information on how to use a Semantic Kernel Vector store connector to access and manipulate data in SQL Server. +zone_pivot_groups: programming-languages +author: eavanvalkenburg +ms.topic: conceptual +ms.author: edvan +ms.date: 03/21/2024 +ms.service: semantic-kernel +--- +# Using the SQL Server Vector Store connector (Preview) + +> [!WARNING] +> The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. + +::: zone pivot="programming-language-csharp" + +## Coming soon + +More info coming soon. + +::: zone-end +::: zone pivot="programming-language-python" + +## Overview + +The [SQL Server](https://learn.microsoft.com/sql) Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses Azure SQL as a vector store. Once SQL Server on-prem supports vectors it can also be used with that. + +The connector has the following characteristics. + +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------- | +| Collection maps to | Table dictionary | +| Supported key property types |
    • str
    • int
    | +| Supported data property types | Any type | +| Supported vector property types |
    • list[float]
    • numpy array
    | +| Supported index types |
    • Flat
    | +| Supported distance functions |
    • Cosine Distance
    • Dot Product Similarity
    • Euclidean Distance
    | +| Supports multiple vectors in a record | Yes | +| is_filterable supported? | Yes | +| is_full_text_searchable supported? | No | + +## Getting started + +Add the Semantic Kernel package to your project. + +```cmd +pip install semantic-kernel[sql] +``` + +The SQL Server connector uses the [pyodbc](https://pypi.org/project/pyodbc/) package to connect to SQL Server. The extra will install the package, but you will need to install the ODBC driver for SQL Server separately, this differs by platform, see the [Azure SQL Documentation](azure/azure-sql/database/azure-sql-python-quickstart?view=azuresql&tabs=windows%2Csql-inter) for details. + +In order for the store and collection to work, it needs a connection string, this can be passed to the constructor or be set in the environment variable `SQL_SERVER_CONNECTION_STRING`. In order to properly deal with vectors, the `LongAsMax=yes` option will be added if not found. It also can use both username/password or integrated security, for the latter, the `DefaultAzureCredential` is used. + +In the snippets below, it is assumed that you have a data model class defined named 'DataModel'. + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerStore + +vector_store = SqlServerStore() + +# OR + +vector_store = SqlServerStore(connection_string="Driver={ODBC Driver 18 for SQL Server};Server=server_name;Database=database_name;UID=user;PWD=password;LongAsMax=yes;") + +vector_collection = vector_store.get_collection("dbo.table_name", DataModel) +``` + +It is possible to construct a direct reference to a named collection. + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerCollection + +vector_collection = SqlServerCollection("dbo.table_name", DataModel) +``` + +> Note: The collection name can be specified as a simple string (e.g. `table_name`) or as a fully qualified name (e.g. `dbo.table_name`). The latter is recommended to avoid ambiguity, if no schema is specified, the default schema (`dbo`) will be used. + +When you have specific requirements for the connection, you can also pass in a `pyodbc.Connection` object to the `SqlServerStore` constructor. This allows you to use a custom connection string or other connection options: + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerStore +import pyodbc + +# Create a connection to the SQL Server database +connection = pyodbc.connect("Driver={ODBC Driver 18 for SQL Server};Server=server_name;Database=database_name;UID=user;PWD=password;LongAsMax=yes;") +# Create a SqlServerStore with the connection +vector_store = SqlServerStore(connection=connection) +``` + +You will have to make sure to close the connection yourself, as the store or collection will not do that for you. + +## Custom create queries + +The SQL Server connector is limited to the Flat index type. + +The `create_collection` method on the `SqlServerCollection` allows you to pass in a single or multiple custom queries to create the collection. The queries are executed in the order they are passed in, no results are returned. + +If this is done, there is no guarantee that the other methods still work as expected. The connector is not aware of the custom queries and will not validate them. + +If the `DataModel` has `id`, `content`, and `vector` as fields, then for instance you could create the table like this in order to also create a index on the content field: + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerCollection + +# Create a collection with a custom query +async with SqlServerCollection("dbo.table_name", DataModel) as collection: + collection.create_collection( + queries=["CREATE TABLE dbo.table_name (id INT PRIMARY KEY, content NVARCHAR(3000) NULL, vector VECTOR(1536) NULL ) PRIMARY KEY (id);", + "CREATE INDEX idx_content ON dbo.table_name (content);"] + ) +``` + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +More info coming soon. + +::: zone-end From 6bb9aff2d911becbcf51682b650335e24c8c5223 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 21 Mar 2025 14:47:11 +0100 Subject: [PATCH 095/117] bash --- .../out-of-the-box-connectors/sql-connector.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md index cab80687..bdb2ad30 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md @@ -44,7 +44,7 @@ The connector has the following characteristics. Add the Semantic Kernel package to your project. -```cmd +```bash pip install semantic-kernel[sql] ``` From 72a98fe621997f46b6ca7426211b34d7e99c75ac Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Fri, 21 Mar 2025 14:58:43 +0100 Subject: [PATCH 096/117] azure links updated --- .../out-of-the-box-connectors/sql-connector.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md index bdb2ad30..ef59a1c9 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md @@ -24,7 +24,7 @@ More info coming soon. ## Overview -The [SQL Server](https://learn.microsoft.com/sql) Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses Azure SQL as a vector store. Once SQL Server on-prem supports vectors it can also be used with that. +The [SQL Server](/sql) Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses Azure SQL as a vector store. Once SQL Server on-prem supports vectors it can also be used with that. The connector has the following characteristics. @@ -48,7 +48,7 @@ Add the Semantic Kernel package to your project. pip install semantic-kernel[sql] ``` -The SQL Server connector uses the [pyodbc](https://pypi.org/project/pyodbc/) package to connect to SQL Server. The extra will install the package, but you will need to install the ODBC driver for SQL Server separately, this differs by platform, see the [Azure SQL Documentation](azure/azure-sql/database/azure-sql-python-quickstart?view=azuresql&tabs=windows%2Csql-inter) for details. +The SQL Server connector uses the [pyodbc](https://pypi.org/project/pyodbc/) package to connect to SQL Server. The extra will install the package, but you will need to install the ODBC driver for SQL Server separately, this differs by platform, see the [Azure SQL Documentation](/azure/azure-sql/database/azure-sql-python-quickstart) for details. In order for the store and collection to work, it needs a connection string, this can be passed to the constructor or be set in the environment variable `SQL_SERVER_CONNECTION_STRING`. In order to properly deal with vectors, the `LongAsMax=yes` option will be added if not found. It also can use both username/password or integrated security, for the latter, the `DefaultAzureCredential` is used. From 5bfc1f5938db065e4e2c43bd47331ed6d90e4593 Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Tue, 25 Mar 2025 00:02:14 +0000 Subject: [PATCH 097/117] Python / .Net: Common Agent Invocation API migration guide and docs update (#501) * Python: update python RC agent migration doc based on thread api (#499) * update python RC agent migration doc based on thread api * revert c# change * PR feedback * Cleanup * .Net: Update migration guide and Docs for Common Agents API (#500) * Update migration guide * Fix formatting issue. * Update architecture page and samples in individual docs pages * Small tweak to messaging * Address pr comments * Use different example so that it works for Python too. * Python agent framework docs updates (#502) * Update semantic-kernel/support/migration/agent-framework-rc-migration-guide.md Co-authored-by: westey <164392973+westey-m@users.noreply.github.com> * Update semantic-kernel/Frameworks/agent/examples/example-assistant-code.md Co-authored-by: westey <164392973+westey-m@users.noreply.github.com> --------- Co-authored-by: Evan Mattson <35585003+moonbox3@users.noreply.github.com> --- .../Frameworks/agent/agent-architecture.md | 23 +- .../Frameworks/agent/agent-streaming.md | 62 +- .../Frameworks/agent/agent-templates.md | 5 +- .../Frameworks/agent/assistant-agent.md | 62 +- .../Frameworks/agent/azure-ai-agent.md | 49 +- .../Frameworks/agent/chat-completion-agent.md | 58 +- .../agent/examples/example-assistant-code.md | 66 +- .../examples/example-assistant-search.md | 12 +- .../agent/examples/example-chat-agent.md | 42 +- .../agent-framework-rc-migration-guide.md | 568 +++++++++++++++++- 10 files changed, 778 insertions(+), 169 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index 67c99849..589f2e5b 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -53,6 +53,7 @@ Agents can either be invoked directly to perform tasks or orchestrated within an #### Deep Dive: +- [`AzureAIAgent`](./azure-ai-agent.md) - [`ChatCompletionAgent`](./chat-completion-agent.md) - [`OpenAIAssistantAgent`](./assistant-agent.md) @@ -61,10 +62,23 @@ Agents can either be invoked directly to perform tasks or orchestrated within an ## Agent Exensibility --> +## Agent Thread + +The abstract `AgentThread` class serves as the core abstraction for threads or conversation state. +It abstracts away the different ways in which convesation state may be managed for different agents. + +Stateful agent services often store conversation state in the service, and you can interact with it via an id. +Other agents may require the entire chat history to be passed to the agent on each invocation, in which +case the conversation state is managed locally in the application. + +Stateful agents typically only work with a matching `AgentThread` implementation, while other types of agents could work with more than one `AgentThread` type. +For example, `AzureAIAgent` requires a matching `AzureAIAgentThread`. +This is because the Azure AI Agent service stores conversations in the service, and requires specific service calls to create a thread and update it. +If a different agent thread type was used with `AzureAIAgent`, no thread would be created in the Azure AI Agent service and invoke calls would fail. ## Agent Chat -The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. +The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. This structure facilitates more complex, multi-agent scenarios where different agents can work together, share information, and dynamically respond to evolving conversations, making it an ideal solution for advanced use cases such as customer support, multi-faceted task management, or collaborative problem-solving environments. @@ -94,7 +108,7 @@ The _Agent Channel_ class enables agents of various types to participate in an [ ::: zone-end -## Agent Alignment with _Semantic Kernel_ Features +## Agent Alignment with Semantic Kernel Features The `Agent Framework` is built on the foundational concepts and features that many developers have come to know within the _Semantic Kernel_ ecosystem. These core principles serve as the building blocks for the Agent Framework’s design. By leveraging the familiar structure and capabilities of the _Semantic Kernel_, the Agent Framework extends its functionality to enable more advanced, autonomous agent behaviors, while maintaining consistency with the broader _Semantic Kernel_ architecture. This ensures a smooth transition for developers, allowing them to apply their existing knowledge to create intelligent, adaptable agents within the framework. @@ -103,8 +117,9 @@ The `Agent Framework` is built on the foundational concepts and features that ma At the heart of the Semantic Kernel ecosystem is the [`Kernel`](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The `Kernel` acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. -The [`ChatCompletionAgent`](./chat-completion-agent.md) and [`OpenAIAssistantAgent`](./assistant-agent.md) articles provide specific details on how to create each type of agent. - These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. +The [`AzureAIAgent`](./azure-ai-agent.md), [`ChatCompletionAgent`](./chat-completion-agent.md) and [`OpenAIAssistantAgent`](./assistant-agent.md) articles provide specific details on how to create each type of agent. + +These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. #### Related API's: diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index 7030413c..70c6f2d8 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -58,42 +58,49 @@ The `Agent Framework` supports _streamed_ responses when using [`AgentChat`](./a ### Streamed response from `ChatCompletionAgent` -When invoking a streamed response from a [`ChatCompletionAgent`](./chat-completion-agent.md), the `ChatHistory` is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the `ChatHistory` reflects fully formed responses for consistency. +When invoking a streamed response from a [`ChatCompletionAgent`](./chat-completion-agent.md), the `ChatHistory` in the `AgentThread` is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the `ChatHistory` reflects fully formed responses for consistency. ::: zone pivot="programming-language-csharp" ```csharp // Define agent ChatCompletionAgent agent = ...; -// Create a ChatHistory object to maintain the conversation state. -ChatHistory chat = []; +ChatHistoryAgentThread agentThread = new(); -// Add a user message to the conversation -chat.Add(new ChatMessageContent(AuthorRole.User, "")); +// Create a user message +var message = ChatMessageContent(AuthorRole.User, ""); // Generate the streamed agent response(s) -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(chat)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { // Process streamed response(s)... } + +// It's also possible to read the messages that were added to the ChatHistoryAgentThread. +await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) +{ + // Process messages... +} ``` ::: zone-end ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread + # Define agent agent = ChatCompletionAgent(...) -# Create a ChatHistory object to maintain the conversation state. -chat = ChatHistory() - -# Add a user message to the conversation -chat.add_message(ChatMessageContent(AuthorRole.USER, "")) +# Create a thread object to maintain the conversation state. +# If no thread is provided one will be created and returned with +# the initial response. +thread: ChatHistoryAgentThread = None # Generate the streamed agent response(s) -async for response in agent.invoke_stream(chat) +async for response in agent.invoke_stream(messages="user input", thread=thread) { # Process streamed response(s)... + thread = response.thread } ``` ::: zone-end @@ -106,7 +113,7 @@ async for response in agent.invoke_stream(chat) ### Streamed response from `OpenAIAssistantAgent` -When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-agent.md), an optional `ChatHistory` can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. +When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-agent.md), the assistant maintains the conversation state as a remote thread. It is possible to read the messages from the remote thread if required. ::: zone pivot="programming-language-csharp" ```csharp @@ -114,36 +121,43 @@ When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-a OpenAIAssistantAgent agent = ...; // Create a thread for the agent conversation. -string threadId = await agent.CreateThreadAsync(); +OpenAIAssistantAgentThread agentThread = new(assistantClient); -// Add a user message to the conversation -chat.Add(threadId, new ChatMessageContent(AuthorRole.User, "")); +// Cerate a user message +var message = new ChatMessageContent(AuthorRole.User, ""); // Generate the streamed agent response(s) -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(threadId)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { // Process streamed response(s)... } +// It's possible to read the messages from the remote thread. +await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) +{ + // Process messages... +} + // Delete the thread when it is no longer needed -await agent.DeleteThreadAsync(threadId); +await agentThread.DeleteAsync(); ``` ::: zone-end ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent, OpenAIAssistantAgent # Define agent -agent = OpenAIAssistantAgent(...) +agent = OpenAIAssistantAgent(...) # or = AzureAssistantAgent(...) # Create a thread for the agent conversation. -thread_id = await agent.create_thread() - -# Add user message to the conversation -await agent.add_chat_message(message="") +# If no thread is provided one will be created and returned with +# the initial response. +thread: AssistantAgentThread = None # Generate the streamed agent response(s) -async for response in agent.invoke_stream(thread_id=thread_id): +async for response in agent.invoke_stream(messages="user input", thread=thread): # Process streamed response(s)... + thread = response.thread ``` ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index b8d0a4ca..dd4a5b6d 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -235,9 +235,6 @@ ChatCompletionAgent agent = } }; -// Create a ChatHistory object to maintain the conversation state. -ChatHistory chat = []; - KernelArguments overrideArguments = new() { @@ -246,7 +243,7 @@ KernelArguments overrideArguments = }); // Generate the agent response(s) -await foreach (ChatMessageContent response in agent.InvokeAsync(chat, overrideArguments)) +await foreach (ChatMessageContent response in agent.InvokeAsync([], options: new() { KernelArguments = overrideArguments })) { // Process agent response(s)... } diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index c6d8ca77..bf7873b7 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -97,7 +97,7 @@ OpenAIAssistantAgent agent = new(assistant, client); ::: zone pivot="programming-language-python" ```python -from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent, OpenAIAssistantAgent # Set up the client and model using Azure OpenAI Resources client, model = AzureAssistantAgent.setup_resources() @@ -194,50 +194,78 @@ agent = AzureAssistantAgent( ## Using an `OpenAIAssistantAgent` -As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your `OpenAIAssistantAgent` are tied to this specific thread identifier which must be specified when calling the agent/ +As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your `OpenAIAssistantAgent` are tied to this specific thread identifier. The specifics of the _Assistant API thread_ is abstracted away via the `OpenAIAssistantAgentThread` class, which is an implementation of `AgentThread`. + +The `OpenAIAssistantAgent` currently only supports threads of type `OpenAIAssistantAgentThread`. + +You can invoke the `OpenAIAssistantAgent` without specifying an `AgentThread`, to start a new thread and a new `AgentThread` will be returned as part of the response. ::: zone pivot="programming-language-csharp" ```csharp + // Define agent OpenAIAssistantAgent agent = ...; +AgentThread? agentThread = null; + +// Generate the agent response(s) +await foreach (AgentResponseItem response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""))) +{ + // Process agent response(s)... + agentThread = response.Thread; +} + +// Delete the thread if no longer needed +if (agentThread is not null) +{ + await agentThread.DeleteAsync(); +} +``` -// Create a thread for the agent conversation. -string threadId = await agent.CreateThreadAsync(); +You can also invoke the `OpenAIAssistantAgent` with an `AgentThread` that you created. -// Add a user message to the conversation -chat.Add(threadId, new ChatMessageContent(AuthorRole.User, "")); +```csharp +// Define agent +OpenAIAssistantAgent agent = ...; + +// Create a thread with some custom metadata. +AgentThread agentThread = new OpenAIAssistantAgentThread(this.AssistantClient, metadata: myMetadata); // Generate the agent response(s) -await foreach (ChatMessageContent response in agent.InvokeAsync(threadId)) +await foreach (ChatMessageContent response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""), agentThread)) { // Process agent response(s)... } // Delete the thread when it is no longer needed -await agent.DeleteThreadAsync(threadId); +await agentThread.DeleteAsync(); +``` + +You can also create an `OpenAIAssistantAgentThread` that resumes an earlier conversation by id. + +```csharp +// Create a thread with an existing thread id. +AgentThread agentThread = new OpenAIAssistantAgentThread(this.AssistantClient, "existing-thread-id"); ``` + ::: zone-end ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent + # Define agent openai_agent = await ... # Create a thread for the agent conversation -thread_id = await agent.create_thread() - -# Add a user message to the conversation -await agent.add_chat_message( - thread_id=thread_id, - message=ChatMessageContent(role=AuthorRole.USER, content=""), -) +thread: AssistantAgentThread = None # Generate the agent response(s) -async for response in agent.invoke(thread_id=thread_id): +async for response in agent.invoke(messages="user input", thread=thread): # process agent response(s)... + thread = response.thread # Delete the thread when it is no longer needed -await agent.delete_thread(thread_id) +await thread.delete() if thread else None ``` ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 6d6eabd7..71455d48 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -121,6 +121,8 @@ AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" Once the configuration is defined, the client may be created: ```python +from semantic_kernel.agents import AzureAIAgent + async with ( DefaultAzureCredential() as creds, AzureAIAgent.create_client(credential=creds) as client, @@ -162,7 +164,7 @@ AzureAIAgent agent = new(definition, agentsClient); ```python from azure.identity.aio import DefaultAzureCredential -from semantic_kernel.agents.azure_ai import AzureAIAgent, AzureAIAgentSettings +from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings ai_agent_settings = AzureAIAgentSettings.create() @@ -194,23 +196,25 @@ async with ( ## Interacting with an `AzureAIAgent` -Interaction with the `AzureAIAgent` is straightforward. The agent maintains the conversation history automatically using a thread: +Interaction with the `AzureAIAgent` is straightforward. The agent maintains the conversation history automatically using a thread. +The specifics of the _Azure AI Agent thread_ is abstracted away via the `AzureAIAgentThread` class, which is an implementation of `AgentThread`. + +The `AzureAIAgent` currently only supports threads of type `AzureAIAgentThread`. ::: zone pivot="programming-language-csharp" ```c# -AgentThread thread = await agentsClient.CreateThreadAsync(); +AgentThread agentThread = new AzureAIAgentThread(agentsClient); try { ChatMessageContent message = new(AuthorRole.User, ""); - await agent.AddChatMessageAsync(threadId, message); - await foreach (ChatMessageContent response in agent.InvokeAsync(thread.Id)) + await foreach (ChatMessageContent response in agent.InvokeAsync(message, agentThread)) { Console.WriteLine(response.Content); } } finally { - await this.AgentsClient.DeleteThreadAsync(thread.Id); + await agentThread.DeleteAsync(); await this.AgentsClient.DeleteAgentAsync(agent.Id); } ``` @@ -220,24 +224,40 @@ finally ```python USER_INPUTS = ["Hello", "What's your name?"] -thread = await client.agents.create_thread() +thread: AzureAIAgentThread = AzureAIAgentThread() try: for user_input in USER_INPUTS: - await agent.add_chat_message(thread_id=thread.id, message=user_input) - response = await agent.get_response(thread_id=thread.id) + response = await agent.get_response(messages=user_inputs, thread=thread) print(response) + thread = response.thread finally: - await client.agents.delete_thread(thread.id) + await thread.delete() if thread else None ``` Optionally, an agent may be invoked as: ```python for user_input in USER_INPUTS: - await agent.add_chat_message(thread_id=thread.id, message=user_input) - async for content in agent.invoke(thread_id=thread.id): + async for content in agent.invoke(message=user_input, thread=thread): print(content.content) + thread = response.thread +``` + +You may also pass in a list of messages to the `get_response(...)`, `invoke(...)`, or `invoke_stream(...)` methods: + +```python +USER_INPUTS = ["Hello", "What's your name?"] + +thread: AzureAIAgentThread = AzureAIAgentThread() + +try: + for user_input in USER_INPUTS: + response = await agent.get_response(messages=USER_INPUTS, thread=thread) + print(response) + thread = response.thread +finally: + await thread.delete() if thread else None ``` ::: zone-end @@ -247,8 +267,7 @@ An agent may also produce a streamed response: ::: zone pivot="programming-language-csharp" ```c# ChatMessageContent message = new(AuthorRole.User, ""); -await agent.AddChatMessageAsync(threadId, message); -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { Console.Write(response.Content); } @@ -607,7 +626,7 @@ Agents and their associated threads can be deleted when no longer needed: ::: zone pivot="programming-language-csharp" ```c# -await agentsClient.DeleteThreadAsync(thread.Id); +await agentThread.DeleteAsync(); await agentsClient.DeleteAgentAsync(agent.Id); ``` ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 6ec5ff91..7b53e4cc 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -134,6 +134,8 @@ There are two ways to create a `ChatCompletionAgent`: ### 1. By providing the chat completion service directly: ```python +from semantic_kernel.agents import ChatCompletionAgent + # Create the agent by directly providing the chat completion service agent = ChatCompletionAgent( service=AzureChatCompletion(), # your chat completion service instance @@ -237,22 +239,38 @@ agent = ChatCompletionAgent( Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a Chat Completion [AI service](../../concepts/ai-services/index.md). +You can simply invoke the agent with your user message. + ```csharp // Define agent ChatCompletionAgent agent = ...; -// Create a ChatHistory object to maintain the conversation state. -ChatHistory chat = []; +// Generate the agent response(s) +await foreach (ChatMessageContent response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""))) +{ + // Process agent response(s)... +} +``` + +You can also use an `AgentThread` to have a conversation with your agent. +Here we are using a `ChatHistoryAgentThread`. -// Add a user message to the conversation -chat.Add(new ChatMessageContent(AuthorRole.User, "")); +The `ChatHistoryAgentThread` can also take an optional `ChatHistory` +object as input, via its constructor, if resuming a previous conversation. (not shown) + +```csharp +// Define agent +ChatCompletionAgent agent = ...; + +AgentThread thread = new ChatHistoryAgentThread(); // Generate the agent response(s) -await foreach (ChatMessageContent response in agent.InvokeAsync(chat)) +await foreach (ChatMessageContent response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""), thread)) { // Process agent response(s)... } ``` + ::: zone-end ::: zone pivot="programming-language-python" @@ -265,29 +283,24 @@ The easiest is to call and await `get_response`: # Define agent agent = ChatCompletionAgent(...) -# Define the chat history -chat = ChatHistory() +# Define the thread +thread = ChatHistoryAgentThread() -# Add the user message -chat.add_user_message(user_input) # Generate the agent response -response = await agent.get_response(chat) -# response is a `ChatMessageContent` object +response = await agent.get_response(messages="user input", thread=thread) +# response is an `AgentResponseItem[ChatMessageContent]` object ``` -Otherwise, calling the `invoke` method returns an `AsyncIterable` of `ChatMessageContent`. +Otherwise, calling the `invoke` method returns an `AsyncIterable` of `AgentResponseItem[ChatMessageContent]`. ```python # Define agent agent = ChatCompletionAgent(...) -# Define the chat history -chat = ChatHistory() - -# Add the user message -chat.add_user_message(user_input) +# Define the thread +thread = ChatHistoryAgentThread() # Generate the agent response(s) -async for response in agent.invoke(chat): +async for response in agent.invoke(messages="user input", thread=thread): # process agent response(s) ``` @@ -297,14 +310,11 @@ The `ChatCompletionAgent` also supports streaming in which the `invoke_stream` m # Define agent agent = ChatCompletionAgent(...) -# Define the chat history -chat = ChatHistory() - -# Add the user message -chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) +# Define the thread +thread = ChatHistoryAgentThread() # Generate the agent response(s) -async for response in agent.invoke_stream(chat): +async for response in agent.invoke_stream(messages="user input", thread=thread): # process agent response(s) ``` diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 0ce0dfcc..92f32c25 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -40,7 +40,7 @@ dotnet add package Microsoft.SemanticKernel dotnet add package Microsoft.SemanticKernel.Agents.OpenAI --prerelease ``` -> If managing _NuGet_ packages in _Visual Studio_, ensure `Include prerelease` is checked. +> If managing NuGet packages in Visual Studio, ensure `Include prerelease` is checked. The project file (`.csproj`) should contain the following `PackageReference` definitions: @@ -86,7 +86,7 @@ Start by creating a folder that will hold your script (`.py` file) and the sampl import asyncio import os -from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent from semantic_kernel.contents import StreamingFileReferenceContent ``` @@ -198,7 +198,7 @@ The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. 2. [Agent Definition](#agent-definition) - Create the _OpenAI_Assistant`Agent` with templatized instructions and plug-in. -3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. +3. [The Chat Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. @@ -345,7 +345,7 @@ agent = AzureAssistantAgent( ::: zone-end -### The _Chat_ Loop +### The Chat Loop At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. @@ -384,8 +384,7 @@ finally ::: zone pivot="programming-language-python" ```python -print("Creating thread...") -thread_id = await agent.create_thread() +thread: AssistantAgentThread = None try: is_complete: bool = False @@ -395,7 +394,7 @@ try: finally: print("\nCleaning up resources...") [await client.files.delete(file_id) for file_id in file_ids] - await client.beta.threads.delete(thread.id) + await thread.delete() if thread else None await client.beta.assistants.delete(agent.id) ``` ::: zone-end @@ -438,8 +437,6 @@ if not user_input: if user_input.lower() == "exit": is_complete = True break - -await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input)) ``` ::: zone-end @@ -565,18 +562,31 @@ fileIds.Clear(); ::: zone pivot="programming-language-python" ```python -is_code: bool = False -async for response in agent.invoke(stream(thread_id=thread_id): - if is_code != metadata.get("code"): - print() - is_code = not is_code - - print(f"{response.content}) - - file_ids.extend( - [item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent)] - ) - +is_code = False +last_role = None +async for response in agent.invoke_stream(messages=user_input, thread=thread): + current_is_code = response.metadata.get("code", False) + + if current_is_code: + if not is_code: + print("\n\n```python") + is_code = True + print(response.content, end="", flush=True) + else: + if is_code: + print("\n```") + is_code = False + last_role = None + if hasattr(response, "role") and response.role is not None and last_role != response.role: + print(f"\n# {response.role}: ", end="", flush=True) + last_role = response.role + print(response.content, end="", flush=True) + file_ids.extend([ + item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent) + ]) + thread = response.thread +if is_code: + print("```\n") print() await download_response_image(agent, file_ids) @@ -770,7 +780,7 @@ import asyncio import logging import os -from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent from semantic_kernel.contents import StreamingFileReferenceContent logging.basicConfig(level=logging.ERROR) @@ -779,7 +789,7 @@ logging.basicConfig(level=logging.ERROR) The following sample demonstrates how to create a simple, OpenAI assistant agent that utilizes the code interpreter to analyze uploaded files. -""" +""" # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( @@ -861,8 +871,7 @@ async def main(): definition=definition, ) - print("Creating thread...") - thread = await client.beta.threads.create() + thread: AssistantAgentThread = None try: is_complete: bool = False @@ -876,11 +885,9 @@ async def main(): is_complete = True break - await agent.add_chat_message(thread_id=thread.id, message=user_input) - is_code = False last_role = None - async for response in agent.invoke_stream(thread_id=thread.id): + async for response in agent.invoke_stream(messages=user_input, thread=thread): current_is_code = response.metadata.get("code", False) if current_is_code: @@ -900,6 +907,7 @@ async def main(): file_ids.extend([ item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent) ]) + thread = response.thread if is_code: print("```\n") print() @@ -910,7 +918,7 @@ async def main(): finally: print("\nCleaning up resources...") [await client.files.delete(file_id) for file_id in file_ids] - await client.beta.threads.delete(thread.id) + await thread.delete() if thread else None await client.beta.assistants.delete(agent.id) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index 81bcc550..77f01209 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -86,7 +86,7 @@ Start by creating a folder that will hold your script (`.py` file) and the sampl import asyncio import os -from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent from semantic_kernel.contents import StreamingAnnotationContent ``` @@ -480,10 +480,6 @@ if not user_input: if user_input.lower() == "exit": is_complete = True break - -await agent.add_chat_message( - thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) -) ``` ::: zone-end @@ -545,7 +541,8 @@ if (footnotes.Count > 0) ::: zone pivot="programming-language-python" ```python footnotes: list[StreamingAnnotationContent] = [] -async for response in agent.invoke_stream(thread_id=thread_id): +async for response in agent.invoke_stream(messages=user_input, thread=thread): + thread = response.thread footnotes.extend([item for item in response.items if isinstance(item, StreamingAnnotationContent)]) print(f"{response.content}", end="", flush=True) @@ -796,8 +793,7 @@ async def main(): definition=definition, ) - print("Creating thread...") - thread = await client.beta.threads.create() + thread: AssistantAgentThread = None try: is_complete: bool = False diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index d24a85f8..516d5982 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -75,10 +75,9 @@ import os import sys from datetime import datetime -from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.contents import AuthorRole, ChatHistory, ChatMessageContent from semantic_kernel.functions import KernelArguments from semantic_kernel.kernel import Kernel @@ -200,7 +199,7 @@ The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. 2. [`Agent` Definition](#agent-definition) - Create the `ChatCompletionAgent` with templatized instructions and plug-in. -3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. +3. [The Chat Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. @@ -355,7 +354,7 @@ agent = ChatCompletionAgent( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating a `ChatHistory` object to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating a `ChatHistoryAgentThread` object to maintain the conversation state and creating an empty loop. ::: zone pivot="programming-language-csharp" ```csharp @@ -370,7 +369,7 @@ do ::: zone pivot="programming-language-python" ```python -history = ChatHistory() +thread: ChatHistoryAgentThread = None is_complete: bool = False while not is_complete: # processing logic here @@ -383,7 +382,7 @@ while not is_complete: ::: zone-end -Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the `ChatHistory` as a _User_ message. +Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. ::: zone pivot="programming-language-csharp" ```csharp @@ -415,8 +414,6 @@ if not user_input: if user_input.lower() == "exit": is_complete = True break - -history.add_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) ``` ::: zone-end @@ -453,8 +450,9 @@ arguments = KernelArguments( now=datetime.now().strftime("%Y-%m-%d %H:%M") ) -async for response in agent.invoke(history, arguments): +async for response in agent.invoke(messages=user_input, thread=thread, arguments=arguments): print(f"{response.content}") + thread = response.thread ``` ::: zone-end @@ -585,13 +583,10 @@ import os import sys from datetime import datetime -from semantic_kernel.agents import ChatCompletionAgent -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread +from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions import KernelArguments from semantic_kernel.kernel import Kernel # Adjust the sys.path so we can use the GitHubPlugin and GitHubSettings classes @@ -601,11 +596,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from plugins.GithubPlugin.github import GitHubPlugin, GitHubSettings # noqa: E402 -################################################################### -# The following sample demonstrates how to create a simple, # -# ChatCompletionAgent to use a GitHub plugin to interact # -# with the GitHub API. # -################################################################### +""" +The following sample demonstrates how to create a simple, +ChatCompletionAgent to use a GitHub plugin to interact +with the GitHub API. +""" async def main(): @@ -641,7 +636,7 @@ async def main(): arguments=KernelArguments(settings=settings), ) - history = ChatHistory() + thread: ChatHistoryAgentThread = None is_complete: bool = False while not is_complete: user_input = input("User:> ") @@ -652,14 +647,13 @@ async def main(): is_complete = True break - history.add_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) - arguments = KernelArguments( now=datetime.now().strftime("%Y-%m-%d %H:%M") ) - async for response in agent.invoke(history=history, arguments): + async for response in agent.invoke(messages=user_input, thread=thread, arguments=arguments): print(f"{response.content}") + thread = response.thread if __name__ == "__main__": diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md index a7eb9cfd..dce391f3 100644 --- a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -15,7 +15,135 @@ As we transition some agents from the experimental stage to the release candidat ::: zone pivot="programming-language-csharp" -## OpenAIAssistantAgent C# Migration Guide +## Common Agent Invocation API + +In version 1.43.0 we are releasing a new common agent invocation API, that will allow all agent types to be invoked via a common API. + +To enable this new API we are introducing the concept of an `AgentThread`, which represents a conversation thread and abstracts away the different thread management requirements of different agent types. For some agent types it will also, in future, allow different thread imlementations to be used with the same agent. + +The common `Invoke` methods that we are introducing allow you to provide the message(s) that you want to pass to the agent and an optional `AgentThread`. If an `AgentThread` is provided, this will continue the conversation already on the `AgentThread`. If no `AgentThread` is provided, a new default thread will be created and returned as part of the response. + +It is also possible to manually create an `AgentThread` instance, for example in cases where you may have a thread id from the underlying agent service, and you want to continue that thread. You may also want to customize the options for the thread, e.g. associate tools. + +Here is a simple example of how any agent can now be used with agent agnostic code. + +```csharp +private async Task UseAgentAsync(Agent agent, AgentThread? agentThread = null) +{ + // Invoke the agent, and continue the existing thread if provided. + var responses = agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, "Hi"), agentThread); + + // Output results. + await foreach (AgentResponseItem response in responses) + { + Console.WriteLine(response); + agentThread = response.Thread; + } + + // Delete the thread if required. + if (agentThread is not null) + { + await agentThread.DeleteAsync(); + } +} +``` + +These changes were applied in: + +- [PR #11116](https://github.com/microsoft/semantic-kernel/pull/11116) + +### Azure AI Agent Thread Options + +The `AzureAIAgent` currently only supports threads of type `AzureAIAgentThread`. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `AzureAIAgentThread`. + +`AzureAIAgentThread` supports being created with customized tools and metadata, plus messages to seed the conversation with. + +```csharp +AgentThread thread = new AzureAIAgentThread( + agentsClient, + messages: seedMessages, + toolResources: tools, + metadata: metadata); +``` + +You can also construct an instance of an `AzureAIAgentThread` that continues an existing conversation. + +```csharp +AgentThread thread = new AzureAIAgentThread( + agentsClient, + id: "my-existing-thread-id"); +``` + +### Bedrock Agent Thread Options + +The `BedrockAgent` currently only supports threads of type `BedrockAgentThread`. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `BedrockAgentThread`. + +```csharp +AgentThread thread = new BedrockAgentThread(amazonBedrockAgentRuntimeClient); +``` + +You can also construct an instance of an `BedrockAgentThread` that continues an existing conversation. + +```csharp +AgentThread thread = new BedrockAgentThread( + amazonBedrockAgentRuntimeClient, + sessionId: "my-existing-session-id"); +``` + +### Chat Completion Agent Thread Options + +The `ChatCompletionAgent` currently only supports threads of type `ChatHistoryAgentThread`. +`ChatHistoryAgentThread` uses an in-memory `ChatHistory` object to store the messages on the thread. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `ChatHistoryAgentThread`. + +```csharp +AgentThread thread = new ChatHistoryAgentThread(); +``` + +You can also construct an instance of an `ChatHistoryAgentThread` that continues an existing conversation +by passing in a `ChatHistory` object with the existing messages. + +```csharp +ChatHistory chatHistory = new([new ChatMessageContent(AuthorRole.User, "Hi")]); + +AgentThread thread = new ChatHistoryAgentThread(chatHistory: chatHistory); +``` + +### OpenAI Assistant Thread Options + +The `OpenAIAssistantAgent` currently only supports threads of type `OpenAIAssistantAgentThread`. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `OpenAIAssistantAgentThread`. + +`OpenAIAssistantAgentThread` supports being created with customized tools and metadata, plus messages to seed the conversation with. + +```csharp +AgentThread thread = new OpenAIAssistantAgentThread( + assistantClient, + messages: seedMessages, + codeInterpreterFileIds: fileIds, + vectorStoreId: "my-vector-store", + metadata: metadata); +``` + +You can also construct an instance of an `OpenAIAssistantAgentThread` that continues an existing conversation. + +```csharp +AgentThread thread = new OpenAIAssistantAgentThread( + assistantClient, + id: "my-existing-thread-id"); +``` + +## OpenAIAssistantAgent C# Migration Guide We recently applied a significant shift around the [`OpenAIAssistantAgent`](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs) in the _Semantic Kernel Agent Framework_. @@ -104,24 +232,29 @@ await assistantClient.DeleteAssistantAsync(agent.Id); ## 5. Thread Lifecycle ### **Creating a Thread** -Threads are now created directly using `AssistantClient`. + +Threads are now managed via `AssistantAgentThread`. ##### **New Way** -```csharp -AssistantThread thread = await assistantClient.CreateThreadAsync(); -``` -Using a convenience extension method: ```csharp -string threadId = await assistantClient.CreateThreadAsync(messages: [new ChatMessageContent(AuthorRole.User, "")]); +var thread = new AssistantAgentThread(assistantClient); +// Calling CreateAsync is an optional step. +// A thread will be created automatically on first use if CreateAsync was not called. +// Note that CreateAsync is not on the AgentThread base implementation since not all +// agent services support explicit thread creation. +await thread.CreateAsync(); ``` ##### **Old Way (Deprecated)** + Previously, thread management was indirect or agent-bound. ### **Thread Deletion** + ```csharp -await assistantClient.DeleteThreadAsync(thread.Id); +var thread = new AssistantAgentThread(assistantClient, "existing-thread-id"); +await thread.DeleteAsync(); ``` ## 6. File Lifecycle @@ -165,12 +298,18 @@ Deprecated patterns are marked with `[Obsolete]`. To suppress obsolete warnings This migration guide helps you transition smoothly to the new implementation, simplifying client initialization, resource management, and integration with the **Semantic Kernel .NET SDK**. ::: zone-end + ::: zone pivot="programming-language-python" -For developers upgrading to Semantic Kernel Python 1.22.0 or later, the ChatCompletionAgent and OpenAI Assistant abstractions have been updated. +> [!IMPORTANT] +> For developers upgrading to Semantic Kernel Python 1.26.0 or later, significant updates and breaking changes have been introduced to improve our agent framework as we approach GA. These changes were applied in: +- [PR #11116](https://github.com/microsoft/semantic-kernel/pull/11116) + +Previous changes were applied in: + - [PR #10666](https://github.com/microsoft/semantic-kernel/pull/10666) - [PR #10667](https://github.com/microsoft/semantic-kernel/pull/10667) - [PR #10701](https://github.com/microsoft/semantic-kernel/pull/10701) @@ -178,6 +317,368 @@ These changes were applied in: This guide provides step-by-step instructions for migrating your Python code from the old implementation to the new implementation. +## Agent Imports + +All agent import paths have been consolidated under `semantic_kernel.agents`. + +#### Updated import style + +```python +from semantic_kernel.agents import ( + AutoGenConversableAgent, + AzureAIAgent, + AzureAssistantAgent, + BedrockAgent, + ChatCompletionAgent, + OpenAIAssistantAgent, +) +``` + +#### Previous import style (deprecated): + +``` +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.agents.autogen import AutoGenConversableAgent +from semantic_kernel.agents.azure_ai import AzureAIAgent +from semantic_kernel.agents.bedrock import BedrockAgent +from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent +``` + +## Common Agent Invocation API + +As of Semantic Kernel Python 1.26.0 and later, we introduced a new common abstraction to manage threads for all agents. For each agent we now expose a thread class that implements the `AgentThread` base class, allowing context management via methods like `create()` and `delete()`. + +Agent responses `get_response(...)`, `invoke(...)`, `invoke_stream(...)` now return an `AgentResponseItem[ChatMessageContent]`, which has two attributes: + +```python +message: TMessage # Usually ChatMessageContent +thread: AgentThread # Contains the concrete type for the given agent +``` + +### Adding Messages to a Thread + +Messages should be added to a thread via the `messages` argument as part of the agent's `get_response(...)`, `invoke(...)` or `invoke_stream(...)` methods. + +### Azure AI Agent Thread + +An `AzureAIAgentThread` can be created as follows: + +```python +from semantic_kernel.agents import AzureAIAgentThread + +thread = AzureAIAgentThread( + client: AIProjectClient, # required + messages: list[ThreadMessageOptions] | None = None, # optional + metadata: dict[str, str] | None = None, # optional + thread_id: str | None = None, # optional + tool_resources: "ToolResources | None" = None, # optional +) +``` + +Providing a `thread_id` (string) allows you to continue an existing conversation. If omitted, a new thread is created and returned as part of the agent response. + +A complete implementation example: + +```python +import asyncio + +from azure.identity.aio import DefaultAzureCredential + +from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread + +USER_INPUTS = [ + "Why is the sky blue?", + "What are we talking about?", +] + +async def main() -> None: + ai_agent_settings = AzureAIAgentSettings.create() + + async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + # 1. Create an agent on the Azure AI agent service + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + name="Assistant", + instructions="Answer the user's questions.", + ) + + # 2. Create a Semantic Kernel agent for the Azure AI agent + agent = AzureAIAgent( + client=client, + definition=agent_definition, + ) + + # 3. Create a thread for the agent + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: AzureAIAgentThread = None + + try: + for user_input in USER_INPUTS: + print(f"# User: {user_input}") + # 4. Invoke the agent with the specified message for response + response = await agent.get_response(messages=user_input, thread=thread) + print(f"# {response.content}: {response}") + thread = response.thread + finally: + # 6. Cleanup: Delete the thread and agent + await thread.delete() if thread else None + await client.agents.delete_agent(agent.id) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### Bedrock Agent Thread + +A `BedrockAgent` uses a `BedrockAgentThread` to manage conversation history and context. You may provide a `session_id` to either continue or initiate a fresh conversation context. + +```python +from semantic_kernel.agents import BedrockAgentThread + +thread = BedrockAgentThread( + bedrock_runtime_client: Any, + session_id: str | None = None, +) +``` + +If no `session_id` is provided, a new context is created automatically. + +A complete implementation example: + +```python +import asyncio + +from semantic_kernel.agents import BedrockAgent, BedrockAgentThread + +async def main(): + bedrock_agent = await BedrockAgent.create_and_prepare_agent( + "semantic-kernel-bedrock-agent", + instructions="You are a friendly assistant. You help people find information.", + ) + + # Create a thread for the agent + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: BedrockAgentThread = None + + try: + while True: + user_input = input("User:> ") + if user_input == "exit": + print("\n\nExiting chat...") + break + + # Invoke the agent + # The chat history is maintained in the session + response = await bedrock_agent.get_response( + input_text=user_input, + thread=thread, + ) + print(f"Bedrock agent: {response}") + thread = response.thread + except KeyboardInterrupt: + print("\n\nExiting chat...") + return False + except EOFError: + print("\n\nExiting chat...") + return False + finally: + # Delete the agent + await bedrock_agent.delete_agent() + await thread.delete() if thread else None + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### Chat History Agent Thread + +A `ChatCompletionAgent` uses `ChatHistoryAgentThread` to manage conversation history. It can be initialized as follows: + +```python +from semantic_kernel.agents import ChatHistoryAgentThread + +thread = ChatHistoryAgentThread( + chat_history: ChatHistory | None = None, + thread_id: str | None = None +) +``` + +Providing a `thread_id` allows continuing existing conversations. Omitting it creates a new thread. Serialization and rehydration of thread state are supported for persistent conversation contexts. + +A complete implementation example: + +```python +import asyncio + +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + +# Simulate a conversation with the agent +USER_INPUTS = [ + "Hello, I am John Doe.", + "What is your name?", + "What is my name?", +] + + +async def main(): + # 1. Create the agent by specifying the service + agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="Assistant", + instructions="Answer the user's questions.", + ) + + # 2. Create a thread to hold the conversation + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: ChatHistoryAgentThread = None + + for user_input in USER_INPUTS: + print(f"# User: {user_input}") + # 3. Invoke the agent for a response + response = await agent.get_response( + messages=user_input, + thread=thread, + ) + print(f"# {response.name}: {response}") + # 4. Store the thread, which allows the agent to + # maintain conversation history across multiple messages. + thread = response.thread + + # 5. Cleanup: Clear the thread + await thread.delete() if thread else None + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### OpenAI Assistant Thread + +The `AzureAssistantAgent` and `OpenAIAssistantAgent` use `AssistantAgentThread` to manage conversation history and context: + +```python +from semantic_kernel.agents import ChatHistoryAgentThread + +thread = AssistantAgentThread( + client: AsyncOpenAI, + thread_id: str | None = None, + messages: Iterable["ThreadCreateMessage"] | NotGiven = NOT_GIVEN, + metadata: dict[str, Any] | NotGiven = NOT_GIVEN, + tool_resources: ToolResources | NotGiven = NOT_GIVEN, +) +``` + +Providing a `thread_id` continues an existing conversation; otherwise, a new thread is created. + +A complete implementation example: + +```python +# Copyright (c) Microsoft. All rights reserved. +import asyncio + +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent + + +# Simulate a conversation with the agent +USER_INPUTS = [ + "Why is the sky blue?", + "What is the speed of light?", + "What have we been talking about?", +] + + +async def main(): + # 1. Create the client using Azure OpenAI resources and configuration + client, model = AzureAssistantAgent.setup_resources() + + # 2. Create the assistant on the Azure OpenAI service + definition = await client.beta.assistants.create( + model=model, + instructions="Answer questions about the world in one sentence.", + name="Assistant", + ) + + # 3. Create a Semantic Kernel agent for the Azure OpenAI assistant + agent = AzureAssistantAgent( + client=client, + definition=definition, + ) + + # 4. Create a new thread for use with the assistant + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: AssistantAgentThread = None + + try: + for user_input in USER_INPUTS: + print(f"# User: '{user_input}'") + # 6. Invoke the agent for the current thread and print the response + response = await agent.get_response(messages=user_input, thread=thread) + print(f"# {response.name}: {response}") + thread = response.thread + + finally: + # 7. Clean up the resources + await thread.delete() if thread else None + await agent.client.beta.assistants.delete(assistant_id=agent.id) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +## Message Inputs for Agent Invocation + +Previous implementations allowed only a single message input to methods like `get_response(...)`, `invoke(...)`, and `invoke_stream(...)`. We've now updated these methods to support multiple `messages (str | ChatMessageContent | list[str | ChatMessageContent])`. + +Agent invocation methods need updates as follows: + +### Old Way + +```python +response = await agent.get_response(message="some user input", thread=thread) +``` + +### New Way + +```python +response = await agent.get_response(messages=["some initial inputer", "other input"], thread=thread) +``` + +## `AzureAIAgent` + +In Semantic Kernel Python 1.26.0+, `AzureAIAgent` thread creation is now managed via the `AzureAIAgentThread` object, not directly on the client. + +### Old Way + +```python +thread = await client.agents.create_thread() +``` + +### New Way + +```python +from semantic_kernel.agents import AzureAIAgentThread + +thread = AzureAIAgentThread( + client: AIProjectClient, # required + messages: list[ThreadMessageOptions] | None = None, # optional + metadata: dict[str, str] | None = None, # optional + thread_id: str | None = None, # optional + tool_resources: "ToolResources | None" = None, # optional +) +``` + +If no `thread_id` is provided initially, a new thread is created and returned in the agent response. + ## `ChatCompletionAgent` The `ChatCompletionAgent` has been updated to simplify service configuration, plugin handling, and function calling behaviors. Below are the key changes you should consider when migrating. @@ -189,6 +690,9 @@ You can now specify the service directly as part of the agent constructor: #### New Way ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + agent = ChatCompletionAgent( service=AzureChatCompletion(), name="", @@ -203,6 +707,9 @@ Note: If both a kernel and a service are provided, the service will take precede Previously, you would first add a service to a kernel and then pass the kernel to the agent: ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + kernel = Kernel() kernel.add_service(AzureChatCompletion()) @@ -220,6 +727,9 @@ Plugins can now be supplied directly through the constructor: #### New Way ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + agent = ChatCompletionAgent( service=AzureChatCompletion(), name="", @@ -233,6 +743,9 @@ agent = ChatCompletionAgent( Plugins previously had to be added to the kernel separately: ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + kernel = Kernel() kernel.add_plugin(SamplePlugin()) @@ -249,22 +762,31 @@ Note: Both approaches are valid, but directly specifying plugins simplifies init You now have two ways to invoke the agent. The new method directly retrieves a single response, while the old method supports streaming. -#### New Way (Single Response) +#### New Way (No Conversation Thread/Context) ```python -chat_history = ChatHistory() -chat_history.add_user_message("") -response = await agent.get_response(chat_history) -# response is of type ChatMessageContent +response = await agent.get_response(messages="user input") +# response is of type AgentResponseItem[ChatMessageContent] ``` +Note: if the next response does not use the returned thread, the conversation will use a new thread and thus will not continue with previous context. -#### Old Way (Still Valid) +#### New Way (Single Response with Context) + +```python +thread = ChatHistoryAgentThread() + +for user_input in ["First user input", "Second User Input"]: + response = await agent.get_response(messages=user_input, thread=thread) + # response is of type AgentResponseItem[ChatMessageContent] + thread = response.thread +``` + +#### Old Way (No Longer Valid) ```python chat_history = ChatHistory() chat_history.add_user_message("") -async for response in agent.invoke(chat_history): - # handle response +response = agent.get_response(message="user input", chat_history=chat_history) ``` ### 4. Controlling Function Calling @@ -404,8 +926,14 @@ thread_id = await agent.create_thread() ### New Way ```python -thread = await agent.client.beta.threads.create() -# Use thread.id for the thread_id string +from semantic_kernel.agents AssistantAgentThread + +thread = AssistantAgentThread() + +async for response in agent.invoke(messages="user input", thread=thread) + # handle response + print(response) + thread = response.thread ``` ## 3. Handling Plugins @@ -580,7 +1108,7 @@ await agent.delete() ### New Way ```python await client.files.delete(file_id) -await client.beta.threads.delete(thread.id) +await thread.delete() await client.beta.assistants.delete(agent.id) ``` From f69ff6dfb04c451f0b426afb1a5868af75070f16 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Tue, 25 Mar 2025 09:11:08 +0900 Subject: [PATCH 098/117] Merging main to live. (#503) * SQL Server Connector * bash * azure links updated * Python / .Net: Common Agent Invocation API migration guide and docs update (#501) * Python: update python RC agent migration doc based on thread api (#499) * update python RC agent migration doc based on thread api * revert c# change * PR feedback * Cleanup * .Net: Update migration guide and Docs for Common Agents API (#500) * Update migration guide * Fix formatting issue. * Update architecture page and samples in individual docs pages * Small tweak to messaging * Address pr comments * Use different example so that it works for Python too. * Python agent framework docs updates (#502) * Update semantic-kernel/support/migration/agent-framework-rc-migration-guide.md Co-authored-by: westey <164392973+westey-m@users.noreply.github.com> * Update semantic-kernel/Frameworks/agent/examples/example-assistant-code.md Co-authored-by: westey <164392973+westey-m@users.noreply.github.com> --------- Co-authored-by: Evan Mattson <35585003+moonbox3@users.noreply.github.com> --------- Co-authored-by: eavanvalkenburg Co-authored-by: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Co-authored-by: westey <164392973+westey-m@users.noreply.github.com> --- .../Frameworks/agent/agent-architecture.md | 23 +- .../Frameworks/agent/agent-streaming.md | 62 +- .../Frameworks/agent/agent-templates.md | 5 +- .../Frameworks/agent/assistant-agent.md | 62 +- .../Frameworks/agent/azure-ai-agent.md | 49 +- .../Frameworks/agent/chat-completion-agent.md | 58 +- .../agent/examples/example-assistant-code.md | 66 +- .../examples/example-assistant-search.md | 12 +- .../agent/examples/example-chat-agent.md | 42 +- .../out-of-the-box-connectors/TOC.yml | 2 + .../out-of-the-box-connectors/index.md | 34 +- .../sql-connector.md | 121 ++++ .../agent-framework-rc-migration-guide.md | 568 +++++++++++++++++- 13 files changed, 918 insertions(+), 186 deletions(-) create mode 100644 semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index 67c99849..589f2e5b 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -53,6 +53,7 @@ Agents can either be invoked directly to perform tasks or orchestrated within an #### Deep Dive: +- [`AzureAIAgent`](./azure-ai-agent.md) - [`ChatCompletionAgent`](./chat-completion-agent.md) - [`OpenAIAssistantAgent`](./assistant-agent.md) @@ -61,10 +62,23 @@ Agents can either be invoked directly to perform tasks or orchestrated within an ## Agent Exensibility --> +## Agent Thread + +The abstract `AgentThread` class serves as the core abstraction for threads or conversation state. +It abstracts away the different ways in which convesation state may be managed for different agents. + +Stateful agent services often store conversation state in the service, and you can interact with it via an id. +Other agents may require the entire chat history to be passed to the agent on each invocation, in which +case the conversation state is managed locally in the application. + +Stateful agents typically only work with a matching `AgentThread` implementation, while other types of agents could work with more than one `AgentThread` type. +For example, `AzureAIAgent` requires a matching `AzureAIAgentThread`. +This is because the Azure AI Agent service stores conversations in the service, and requires specific service calls to create a thread and update it. +If a different agent thread type was used with `AzureAIAgent`, no thread would be created in the Azure AI Agent service and invoke calls would fail. ## Agent Chat -The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. +The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. This structure facilitates more complex, multi-agent scenarios where different agents can work together, share information, and dynamically respond to evolving conversations, making it an ideal solution for advanced use cases such as customer support, multi-faceted task management, or collaborative problem-solving environments. @@ -94,7 +108,7 @@ The _Agent Channel_ class enables agents of various types to participate in an [ ::: zone-end -## Agent Alignment with _Semantic Kernel_ Features +## Agent Alignment with Semantic Kernel Features The `Agent Framework` is built on the foundational concepts and features that many developers have come to know within the _Semantic Kernel_ ecosystem. These core principles serve as the building blocks for the Agent Framework’s design. By leveraging the familiar structure and capabilities of the _Semantic Kernel_, the Agent Framework extends its functionality to enable more advanced, autonomous agent behaviors, while maintaining consistency with the broader _Semantic Kernel_ architecture. This ensures a smooth transition for developers, allowing them to apply their existing knowledge to create intelligent, adaptable agents within the framework. @@ -103,8 +117,9 @@ The `Agent Framework` is built on the foundational concepts and features that ma At the heart of the Semantic Kernel ecosystem is the [`Kernel`](../../concepts/kernel.md), which serves as the core object that drives AI operations and interactions. To create any agent within this framework, a _Kernel instance_ is required as it provides the foundational context and capabilities for the agent’s functionality. The `Kernel` acts as the engine for processing instructions, managing state, and invoking the necessary AI services that power the agent's behavior. -The [`ChatCompletionAgent`](./chat-completion-agent.md) and [`OpenAIAssistantAgent`](./assistant-agent.md) articles provide specific details on how to create each type of agent. - These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. +The [`AzureAIAgent`](./azure-ai-agent.md), [`ChatCompletionAgent`](./chat-completion-agent.md) and [`OpenAIAssistantAgent`](./assistant-agent.md) articles provide specific details on how to create each type of agent. + +These resources offer step-by-step instructions and highlight the key configurations needed to tailor the agents to different conversational or task-based applications, demonstrating how the Kernel enables dynamic and intelligent agent behaviors across diverse use cases. #### Related API's: diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index 7030413c..70c6f2d8 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -58,42 +58,49 @@ The `Agent Framework` supports _streamed_ responses when using [`AgentChat`](./a ### Streamed response from `ChatCompletionAgent` -When invoking a streamed response from a [`ChatCompletionAgent`](./chat-completion-agent.md), the `ChatHistory` is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the `ChatHistory` reflects fully formed responses for consistency. +When invoking a streamed response from a [`ChatCompletionAgent`](./chat-completion-agent.md), the `ChatHistory` in the `AgentThread` is updated after the full response is received. Although the response is streamed incrementally, the history records only the complete message. This ensures that the `ChatHistory` reflects fully formed responses for consistency. ::: zone pivot="programming-language-csharp" ```csharp // Define agent ChatCompletionAgent agent = ...; -// Create a ChatHistory object to maintain the conversation state. -ChatHistory chat = []; +ChatHistoryAgentThread agentThread = new(); -// Add a user message to the conversation -chat.Add(new ChatMessageContent(AuthorRole.User, "")); +// Create a user message +var message = ChatMessageContent(AuthorRole.User, ""); // Generate the streamed agent response(s) -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(chat)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { // Process streamed response(s)... } + +// It's also possible to read the messages that were added to the ChatHistoryAgentThread. +await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) +{ + // Process messages... +} ``` ::: zone-end ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread + # Define agent agent = ChatCompletionAgent(...) -# Create a ChatHistory object to maintain the conversation state. -chat = ChatHistory() - -# Add a user message to the conversation -chat.add_message(ChatMessageContent(AuthorRole.USER, "")) +# Create a thread object to maintain the conversation state. +# If no thread is provided one will be created and returned with +# the initial response. +thread: ChatHistoryAgentThread = None # Generate the streamed agent response(s) -async for response in agent.invoke_stream(chat) +async for response in agent.invoke_stream(messages="user input", thread=thread) { # Process streamed response(s)... + thread = response.thread } ``` ::: zone-end @@ -106,7 +113,7 @@ async for response in agent.invoke_stream(chat) ### Streamed response from `OpenAIAssistantAgent` -When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-agent.md), an optional `ChatHistory` can be provided to capture the complete messages for further analysis if needed. Since the assistant maintains the conversation state as a remote thread, capturing these messages is not always necessary. The decision to store and analyze the full response depends on the specific requirements of the interaction. +When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-agent.md), the assistant maintains the conversation state as a remote thread. It is possible to read the messages from the remote thread if required. ::: zone pivot="programming-language-csharp" ```csharp @@ -114,36 +121,43 @@ When invoking a streamed response from an [`OpenAIAssistantAgent`](./assistant-a OpenAIAssistantAgent agent = ...; // Create a thread for the agent conversation. -string threadId = await agent.CreateThreadAsync(); +OpenAIAssistantAgentThread agentThread = new(assistantClient); -// Add a user message to the conversation -chat.Add(threadId, new ChatMessageContent(AuthorRole.User, "")); +// Cerate a user message +var message = new ChatMessageContent(AuthorRole.User, ""); // Generate the streamed agent response(s) -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(threadId)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { // Process streamed response(s)... } +// It's possible to read the messages from the remote thread. +await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) +{ + // Process messages... +} + // Delete the thread when it is no longer needed -await agent.DeleteThreadAsync(threadId); +await agentThread.DeleteAsync(); ``` ::: zone-end ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent, OpenAIAssistantAgent # Define agent -agent = OpenAIAssistantAgent(...) +agent = OpenAIAssistantAgent(...) # or = AzureAssistantAgent(...) # Create a thread for the agent conversation. -thread_id = await agent.create_thread() - -# Add user message to the conversation -await agent.add_chat_message(message="") +# If no thread is provided one will be created and returned with +# the initial response. +thread: AssistantAgentThread = None # Generate the streamed agent response(s) -async for response in agent.invoke_stream(thread_id=thread_id): +async for response in agent.invoke_stream(messages="user input", thread=thread): # Process streamed response(s)... + thread = response.thread ``` ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/agent-templates.md b/semantic-kernel/Frameworks/agent/agent-templates.md index b8d0a4ca..dd4a5b6d 100644 --- a/semantic-kernel/Frameworks/agent/agent-templates.md +++ b/semantic-kernel/Frameworks/agent/agent-templates.md @@ -235,9 +235,6 @@ ChatCompletionAgent agent = } }; -// Create a ChatHistory object to maintain the conversation state. -ChatHistory chat = []; - KernelArguments overrideArguments = new() { @@ -246,7 +243,7 @@ KernelArguments overrideArguments = }); // Generate the agent response(s) -await foreach (ChatMessageContent response in agent.InvokeAsync(chat, overrideArguments)) +await foreach (ChatMessageContent response in agent.InvokeAsync([], options: new() { KernelArguments = overrideArguments })) { // Process agent response(s)... } diff --git a/semantic-kernel/Frameworks/agent/assistant-agent.md b/semantic-kernel/Frameworks/agent/assistant-agent.md index c6d8ca77..bf7873b7 100644 --- a/semantic-kernel/Frameworks/agent/assistant-agent.md +++ b/semantic-kernel/Frameworks/agent/assistant-agent.md @@ -97,7 +97,7 @@ OpenAIAssistantAgent agent = new(assistant, client); ::: zone pivot="programming-language-python" ```python -from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent, OpenAIAssistantAgent # Set up the client and model using Azure OpenAI Resources client, model = AzureAssistantAgent.setup_resources() @@ -194,50 +194,78 @@ agent = AzureAssistantAgent( ## Using an `OpenAIAssistantAgent` -As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your `OpenAIAssistantAgent` are tied to this specific thread identifier which must be specified when calling the agent/ +As with all aspects of the _Assistant API_, conversations are stored remotely. Each conversation is referred to as a _thread_ and identified by a unique `string` identifier. Interactions with your `OpenAIAssistantAgent` are tied to this specific thread identifier. The specifics of the _Assistant API thread_ is abstracted away via the `OpenAIAssistantAgentThread` class, which is an implementation of `AgentThread`. + +The `OpenAIAssistantAgent` currently only supports threads of type `OpenAIAssistantAgentThread`. + +You can invoke the `OpenAIAssistantAgent` without specifying an `AgentThread`, to start a new thread and a new `AgentThread` will be returned as part of the response. ::: zone pivot="programming-language-csharp" ```csharp + // Define agent OpenAIAssistantAgent agent = ...; +AgentThread? agentThread = null; + +// Generate the agent response(s) +await foreach (AgentResponseItem response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""))) +{ + // Process agent response(s)... + agentThread = response.Thread; +} + +// Delete the thread if no longer needed +if (agentThread is not null) +{ + await agentThread.DeleteAsync(); +} +``` -// Create a thread for the agent conversation. -string threadId = await agent.CreateThreadAsync(); +You can also invoke the `OpenAIAssistantAgent` with an `AgentThread` that you created. -// Add a user message to the conversation -chat.Add(threadId, new ChatMessageContent(AuthorRole.User, "")); +```csharp +// Define agent +OpenAIAssistantAgent agent = ...; + +// Create a thread with some custom metadata. +AgentThread agentThread = new OpenAIAssistantAgentThread(this.AssistantClient, metadata: myMetadata); // Generate the agent response(s) -await foreach (ChatMessageContent response in agent.InvokeAsync(threadId)) +await foreach (ChatMessageContent response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""), agentThread)) { // Process agent response(s)... } // Delete the thread when it is no longer needed -await agent.DeleteThreadAsync(threadId); +await agentThread.DeleteAsync(); +``` + +You can also create an `OpenAIAssistantAgentThread` that resumes an earlier conversation by id. + +```csharp +// Create a thread with an existing thread id. +AgentThread agentThread = new OpenAIAssistantAgentThread(this.AssistantClient, "existing-thread-id"); ``` + ::: zone-end ::: zone pivot="programming-language-python" ```python +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent + # Define agent openai_agent = await ... # Create a thread for the agent conversation -thread_id = await agent.create_thread() - -# Add a user message to the conversation -await agent.add_chat_message( - thread_id=thread_id, - message=ChatMessageContent(role=AuthorRole.USER, content=""), -) +thread: AssistantAgentThread = None # Generate the agent response(s) -async for response in agent.invoke(thread_id=thread_id): +async for response in agent.invoke(messages="user input", thread=thread): # process agent response(s)... + thread = response.thread # Delete the thread when it is no longer needed -await agent.delete_thread(thread_id) +await thread.delete() if thread else None ``` ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/azure-ai-agent.md b/semantic-kernel/Frameworks/agent/azure-ai-agent.md index 6d6eabd7..71455d48 100644 --- a/semantic-kernel/Frameworks/agent/azure-ai-agent.md +++ b/semantic-kernel/Frameworks/agent/azure-ai-agent.md @@ -121,6 +121,8 @@ AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME = "" Once the configuration is defined, the client may be created: ```python +from semantic_kernel.agents import AzureAIAgent + async with ( DefaultAzureCredential() as creds, AzureAIAgent.create_client(credential=creds) as client, @@ -162,7 +164,7 @@ AzureAIAgent agent = new(definition, agentsClient); ```python from azure.identity.aio import DefaultAzureCredential -from semantic_kernel.agents.azure_ai import AzureAIAgent, AzureAIAgentSettings +from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings ai_agent_settings = AzureAIAgentSettings.create() @@ -194,23 +196,25 @@ async with ( ## Interacting with an `AzureAIAgent` -Interaction with the `AzureAIAgent` is straightforward. The agent maintains the conversation history automatically using a thread: +Interaction with the `AzureAIAgent` is straightforward. The agent maintains the conversation history automatically using a thread. +The specifics of the _Azure AI Agent thread_ is abstracted away via the `AzureAIAgentThread` class, which is an implementation of `AgentThread`. + +The `AzureAIAgent` currently only supports threads of type `AzureAIAgentThread`. ::: zone pivot="programming-language-csharp" ```c# -AgentThread thread = await agentsClient.CreateThreadAsync(); +AgentThread agentThread = new AzureAIAgentThread(agentsClient); try { ChatMessageContent message = new(AuthorRole.User, ""); - await agent.AddChatMessageAsync(threadId, message); - await foreach (ChatMessageContent response in agent.InvokeAsync(thread.Id)) + await foreach (ChatMessageContent response in agent.InvokeAsync(message, agentThread)) { Console.WriteLine(response.Content); } } finally { - await this.AgentsClient.DeleteThreadAsync(thread.Id); + await agentThread.DeleteAsync(); await this.AgentsClient.DeleteAgentAsync(agent.Id); } ``` @@ -220,24 +224,40 @@ finally ```python USER_INPUTS = ["Hello", "What's your name?"] -thread = await client.agents.create_thread() +thread: AzureAIAgentThread = AzureAIAgentThread() try: for user_input in USER_INPUTS: - await agent.add_chat_message(thread_id=thread.id, message=user_input) - response = await agent.get_response(thread_id=thread.id) + response = await agent.get_response(messages=user_inputs, thread=thread) print(response) + thread = response.thread finally: - await client.agents.delete_thread(thread.id) + await thread.delete() if thread else None ``` Optionally, an agent may be invoked as: ```python for user_input in USER_INPUTS: - await agent.add_chat_message(thread_id=thread.id, message=user_input) - async for content in agent.invoke(thread_id=thread.id): + async for content in agent.invoke(message=user_input, thread=thread): print(content.content) + thread = response.thread +``` + +You may also pass in a list of messages to the `get_response(...)`, `invoke(...)`, or `invoke_stream(...)` methods: + +```python +USER_INPUTS = ["Hello", "What's your name?"] + +thread: AzureAIAgentThread = AzureAIAgentThread() + +try: + for user_input in USER_INPUTS: + response = await agent.get_response(messages=USER_INPUTS, thread=thread) + print(response) + thread = response.thread +finally: + await thread.delete() if thread else None ``` ::: zone-end @@ -247,8 +267,7 @@ An agent may also produce a streamed response: ::: zone pivot="programming-language-csharp" ```c# ChatMessageContent message = new(AuthorRole.User, ""); -await agent.AddChatMessageAsync(threadId, message); -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { Console.Write(response.Content); } @@ -607,7 +626,7 @@ Agents and their associated threads can be deleted when no longer needed: ::: zone pivot="programming-language-csharp" ```c# -await agentsClient.DeleteThreadAsync(thread.Id); +await agentThread.DeleteAsync(); await agentsClient.DeleteAgentAsync(agent.Id); ``` ::: zone-end diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 6ec5ff91..7b53e4cc 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -134,6 +134,8 @@ There are two ways to create a `ChatCompletionAgent`: ### 1. By providing the chat completion service directly: ```python +from semantic_kernel.agents import ChatCompletionAgent + # Create the agent by directly providing the chat completion service agent = ChatCompletionAgent( service=AzureChatCompletion(), # your chat completion service instance @@ -237,22 +239,38 @@ agent = ChatCompletionAgent( Conversing with your `ChatCompletionAgent` is based on a `ChatHistory` instance, no different from interacting with a Chat Completion [AI service](../../concepts/ai-services/index.md). +You can simply invoke the agent with your user message. + ```csharp // Define agent ChatCompletionAgent agent = ...; -// Create a ChatHistory object to maintain the conversation state. -ChatHistory chat = []; +// Generate the agent response(s) +await foreach (ChatMessageContent response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""))) +{ + // Process agent response(s)... +} +``` + +You can also use an `AgentThread` to have a conversation with your agent. +Here we are using a `ChatHistoryAgentThread`. -// Add a user message to the conversation -chat.Add(new ChatMessageContent(AuthorRole.User, "")); +The `ChatHistoryAgentThread` can also take an optional `ChatHistory` +object as input, via its constructor, if resuming a previous conversation. (not shown) + +```csharp +// Define agent +ChatCompletionAgent agent = ...; + +AgentThread thread = new ChatHistoryAgentThread(); // Generate the agent response(s) -await foreach (ChatMessageContent response in agent.InvokeAsync(chat)) +await foreach (ChatMessageContent response in agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, ""), thread)) { // Process agent response(s)... } ``` + ::: zone-end ::: zone pivot="programming-language-python" @@ -265,29 +283,24 @@ The easiest is to call and await `get_response`: # Define agent agent = ChatCompletionAgent(...) -# Define the chat history -chat = ChatHistory() +# Define the thread +thread = ChatHistoryAgentThread() -# Add the user message -chat.add_user_message(user_input) # Generate the agent response -response = await agent.get_response(chat) -# response is a `ChatMessageContent` object +response = await agent.get_response(messages="user input", thread=thread) +# response is an `AgentResponseItem[ChatMessageContent]` object ``` -Otherwise, calling the `invoke` method returns an `AsyncIterable` of `ChatMessageContent`. +Otherwise, calling the `invoke` method returns an `AsyncIterable` of `AgentResponseItem[ChatMessageContent]`. ```python # Define agent agent = ChatCompletionAgent(...) -# Define the chat history -chat = ChatHistory() - -# Add the user message -chat.add_user_message(user_input) +# Define the thread +thread = ChatHistoryAgentThread() # Generate the agent response(s) -async for response in agent.invoke(chat): +async for response in agent.invoke(messages="user input", thread=thread): # process agent response(s) ``` @@ -297,14 +310,11 @@ The `ChatCompletionAgent` also supports streaming in which the `invoke_stream` m # Define agent agent = ChatCompletionAgent(...) -# Define the chat history -chat = ChatHistory() - -# Add the user message -chat.add_message(ChatMessageContent(role=AuthorRole.USER, content=input)) +# Define the thread +thread = ChatHistoryAgentThread() # Generate the agent response(s) -async for response in agent.invoke_stream(chat): +async for response in agent.invoke_stream(messages="user input", thread=thread): # process agent response(s) ``` diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 0ce0dfcc..92f32c25 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -40,7 +40,7 @@ dotnet add package Microsoft.SemanticKernel dotnet add package Microsoft.SemanticKernel.Agents.OpenAI --prerelease ``` -> If managing _NuGet_ packages in _Visual Studio_, ensure `Include prerelease` is checked. +> If managing NuGet packages in Visual Studio, ensure `Include prerelease` is checked. The project file (`.csproj`) should contain the following `PackageReference` definitions: @@ -86,7 +86,7 @@ Start by creating a folder that will hold your script (`.py` file) and the sampl import asyncio import os -from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent from semantic_kernel.contents import StreamingFileReferenceContent ``` @@ -198,7 +198,7 @@ The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. 2. [Agent Definition](#agent-definition) - Create the _OpenAI_Assistant`Agent` with templatized instructions and plug-in. -3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. +3. [The Chat Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. @@ -345,7 +345,7 @@ agent = AzureAssistantAgent( ::: zone-end -### The _Chat_ Loop +### The Chat Loop At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. @@ -384,8 +384,7 @@ finally ::: zone pivot="programming-language-python" ```python -print("Creating thread...") -thread_id = await agent.create_thread() +thread: AssistantAgentThread = None try: is_complete: bool = False @@ -395,7 +394,7 @@ try: finally: print("\nCleaning up resources...") [await client.files.delete(file_id) for file_id in file_ids] - await client.beta.threads.delete(thread.id) + await thread.delete() if thread else None await client.beta.assistants.delete(agent.id) ``` ::: zone-end @@ -438,8 +437,6 @@ if not user_input: if user_input.lower() == "exit": is_complete = True break - -await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input)) ``` ::: zone-end @@ -565,18 +562,31 @@ fileIds.Clear(); ::: zone pivot="programming-language-python" ```python -is_code: bool = False -async for response in agent.invoke(stream(thread_id=thread_id): - if is_code != metadata.get("code"): - print() - is_code = not is_code - - print(f"{response.content}) - - file_ids.extend( - [item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent)] - ) - +is_code = False +last_role = None +async for response in agent.invoke_stream(messages=user_input, thread=thread): + current_is_code = response.metadata.get("code", False) + + if current_is_code: + if not is_code: + print("\n\n```python") + is_code = True + print(response.content, end="", flush=True) + else: + if is_code: + print("\n```") + is_code = False + last_role = None + if hasattr(response, "role") and response.role is not None and last_role != response.role: + print(f"\n# {response.role}: ", end="", flush=True) + last_role = response.role + print(response.content, end="", flush=True) + file_ids.extend([ + item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent) + ]) + thread = response.thread +if is_code: + print("```\n") print() await download_response_image(agent, file_ids) @@ -770,7 +780,7 @@ import asyncio import logging import os -from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent from semantic_kernel.contents import StreamingFileReferenceContent logging.basicConfig(level=logging.ERROR) @@ -779,7 +789,7 @@ logging.basicConfig(level=logging.ERROR) The following sample demonstrates how to create a simple, OpenAI assistant agent that utilizes the code interpreter to analyze uploaded files. -""" +""" # Let's form the file paths that we will later pass to the assistant csv_file_path_1 = os.path.join( @@ -861,8 +871,7 @@ async def main(): definition=definition, ) - print("Creating thread...") - thread = await client.beta.threads.create() + thread: AssistantAgentThread = None try: is_complete: bool = False @@ -876,11 +885,9 @@ async def main(): is_complete = True break - await agent.add_chat_message(thread_id=thread.id, message=user_input) - is_code = False last_role = None - async for response in agent.invoke_stream(thread_id=thread.id): + async for response in agent.invoke_stream(messages=user_input, thread=thread): current_is_code = response.metadata.get("code", False) if current_is_code: @@ -900,6 +907,7 @@ async def main(): file_ids.extend([ item.file_id for item in response.items if isinstance(item, StreamingFileReferenceContent) ]) + thread = response.thread if is_code: print("```\n") print() @@ -910,7 +918,7 @@ async def main(): finally: print("\nCleaning up resources...") [await client.files.delete(file_id) for file_id in file_ids] - await client.beta.threads.delete(thread.id) + await thread.delete() if thread else None await client.beta.assistants.delete(agent.id) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index 81bcc550..77f01209 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -86,7 +86,7 @@ Start by creating a folder that will hold your script (`.py` file) and the sampl import asyncio import os -from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent from semantic_kernel.contents import StreamingAnnotationContent ``` @@ -480,10 +480,6 @@ if not user_input: if user_input.lower() == "exit": is_complete = True break - -await agent.add_chat_message( - thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=user_input) -) ``` ::: zone-end @@ -545,7 +541,8 @@ if (footnotes.Count > 0) ::: zone pivot="programming-language-python" ```python footnotes: list[StreamingAnnotationContent] = [] -async for response in agent.invoke_stream(thread_id=thread_id): +async for response in agent.invoke_stream(messages=user_input, thread=thread): + thread = response.thread footnotes.extend([item for item in response.items if isinstance(item, StreamingAnnotationContent)]) print(f"{response.content}", end="", flush=True) @@ -796,8 +793,7 @@ async def main(): definition=definition, ) - print("Creating thread...") - thread = await client.beta.threads.create() + thread: AssistantAgentThread = None try: is_complete: bool = False diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index d24a85f8..516d5982 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -75,10 +75,9 @@ import os import sys from datetime import datetime -from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.contents import AuthorRole, ChatHistory, ChatMessageContent from semantic_kernel.functions import KernelArguments from semantic_kernel.kernel import Kernel @@ -200,7 +199,7 @@ The coding process for this sample involves: 1. [Setup](#setup) - Initializing settings and the plug-in. 2. [`Agent` Definition](#agent-definition) - Create the `ChatCompletionAgent` with templatized instructions and plug-in. -3. [The _Chat_ Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. +3. [The Chat Loop](#the-chat-loop) - Write the loop that drives user / agent interaction. The full example code is provided in the [Final](#final) section. Refer to that section for the complete implementation. @@ -355,7 +354,7 @@ agent = ChatCompletionAgent( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating a `ChatHistory` object to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating a `ChatHistoryAgentThread` object to maintain the conversation state and creating an empty loop. ::: zone pivot="programming-language-csharp" ```csharp @@ -370,7 +369,7 @@ do ::: zone pivot="programming-language-python" ```python -history = ChatHistory() +thread: ChatHistoryAgentThread = None is_complete: bool = False while not is_complete: # processing logic here @@ -383,7 +382,7 @@ while not is_complete: ::: zone-end -Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the `ChatHistory` as a _User_ message. +Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. ::: zone pivot="programming-language-csharp" ```csharp @@ -415,8 +414,6 @@ if not user_input: if user_input.lower() == "exit": is_complete = True break - -history.add_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) ``` ::: zone-end @@ -453,8 +450,9 @@ arguments = KernelArguments( now=datetime.now().strftime("%Y-%m-%d %H:%M") ) -async for response in agent.invoke(history, arguments): +async for response in agent.invoke(messages=user_input, thread=thread, arguments=arguments): print(f"{response.content}") + thread = response.thread ``` ::: zone-end @@ -585,13 +583,10 @@ import os import sys from datetime import datetime -from semantic_kernel.agents import ChatCompletionAgent -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread +from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions import KernelArguments from semantic_kernel.kernel import Kernel # Adjust the sys.path so we can use the GitHubPlugin and GitHubSettings classes @@ -601,11 +596,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from plugins.GithubPlugin.github import GitHubPlugin, GitHubSettings # noqa: E402 -################################################################### -# The following sample demonstrates how to create a simple, # -# ChatCompletionAgent to use a GitHub plugin to interact # -# with the GitHub API. # -################################################################### +""" +The following sample demonstrates how to create a simple, +ChatCompletionAgent to use a GitHub plugin to interact +with the GitHub API. +""" async def main(): @@ -641,7 +636,7 @@ async def main(): arguments=KernelArguments(settings=settings), ) - history = ChatHistory() + thread: ChatHistoryAgentThread = None is_complete: bool = False while not is_complete: user_input = input("User:> ") @@ -652,14 +647,13 @@ async def main(): is_complete = True break - history.add_message(ChatMessageContent(role=AuthorRole.USER, content=user_input)) - arguments = KernelArguments( now=datetime.now().strftime("%Y-%m-%d %H:%M") ) - async for response in agent.invoke(history=history, arguments): + async for response in agent.invoke(messages=user_input, thread=thread, arguments=arguments): print(f"{response.content}") + thread = response.thread if __name__ == "__main__": diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml index 161e8b58..4822cb9d 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/TOC.yml @@ -28,6 +28,8 @@ href: qdrant-connector.md - name: Redis connector href: redis-connector.md +- name: SQL Server connector + href: sql-connector.md - name: SQLite connector href: sqlite-connector.md - name: Volatile (in-memory) connector diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 8d6dca7d..317cc1ec 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -45,23 +45,23 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma ::: zone-end ::: zone pivot="programming-language-python" -| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | -| ------------------------------------------------------------------ | :-----: | :---------------------------: | :-------------------------------: | -| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | -| [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | -| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| SQL Server | Planned | | Microsoft Semantic Kernel Project | -| SQLite | Planned | | Microsoft Semantic Kernel Project | -| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Vector Store Connectors | Python | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------------------------ | :-----: | :----------------------------------------: | :-------------------------------: | +| [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | +| [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| [SQL Server](./sql-connector.md) | ✅ | [pyodbc](https://pypi.org/project/pyodbc/) | Microsoft Semantic Kernel Project | +| SQLite | Planned | | Microsoft Semantic Kernel Project | +| [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end ::: zone pivot="programming-language-java" diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md new file mode 100644 index 00000000..ef59a1c9 --- /dev/null +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/sql-connector.md @@ -0,0 +1,121 @@ +--- +title: Using the Semantic Kernel SQL Server Vector Store connector (Preview) +description: Contains information on how to use a Semantic Kernel Vector store connector to access and manipulate data in SQL Server. +zone_pivot_groups: programming-languages +author: eavanvalkenburg +ms.topic: conceptual +ms.author: edvan +ms.date: 03/21/2024 +ms.service: semantic-kernel +--- +# Using the SQL Server Vector Store connector (Preview) + +> [!WARNING] +> The Semantic Kernel Vector Store functionality is in preview, and improvements that require breaking changes may still occur in limited circumstances before release. + +::: zone pivot="programming-language-csharp" + +## Coming soon + +More info coming soon. + +::: zone-end +::: zone pivot="programming-language-python" + +## Overview + +The [SQL Server](/sql) Vector Store connector is a Vector Store implementation provided by Semantic Kernel that uses Azure SQL as a vector store. Once SQL Server on-prem supports vectors it can also be used with that. + +The connector has the following characteristics. + +| Feature Area | Support | +| ------------------------------------- | ------------------------------------------------------------------------------------------- | +| Collection maps to | Table dictionary | +| Supported key property types |
    • str
    • int
    | +| Supported data property types | Any type | +| Supported vector property types |
    • list[float]
    • numpy array
    | +| Supported index types |
    • Flat
    | +| Supported distance functions |
    • Cosine Distance
    • Dot Product Similarity
    • Euclidean Distance
    | +| Supports multiple vectors in a record | Yes | +| is_filterable supported? | Yes | +| is_full_text_searchable supported? | No | + +## Getting started + +Add the Semantic Kernel package to your project. + +```bash +pip install semantic-kernel[sql] +``` + +The SQL Server connector uses the [pyodbc](https://pypi.org/project/pyodbc/) package to connect to SQL Server. The extra will install the package, but you will need to install the ODBC driver for SQL Server separately, this differs by platform, see the [Azure SQL Documentation](/azure/azure-sql/database/azure-sql-python-quickstart) for details. + +In order for the store and collection to work, it needs a connection string, this can be passed to the constructor or be set in the environment variable `SQL_SERVER_CONNECTION_STRING`. In order to properly deal with vectors, the `LongAsMax=yes` option will be added if not found. It also can use both username/password or integrated security, for the latter, the `DefaultAzureCredential` is used. + +In the snippets below, it is assumed that you have a data model class defined named 'DataModel'. + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerStore + +vector_store = SqlServerStore() + +# OR + +vector_store = SqlServerStore(connection_string="Driver={ODBC Driver 18 for SQL Server};Server=server_name;Database=database_name;UID=user;PWD=password;LongAsMax=yes;") + +vector_collection = vector_store.get_collection("dbo.table_name", DataModel) +``` + +It is possible to construct a direct reference to a named collection. + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerCollection + +vector_collection = SqlServerCollection("dbo.table_name", DataModel) +``` + +> Note: The collection name can be specified as a simple string (e.g. `table_name`) or as a fully qualified name (e.g. `dbo.table_name`). The latter is recommended to avoid ambiguity, if no schema is specified, the default schema (`dbo`) will be used. + +When you have specific requirements for the connection, you can also pass in a `pyodbc.Connection` object to the `SqlServerStore` constructor. This allows you to use a custom connection string or other connection options: + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerStore +import pyodbc + +# Create a connection to the SQL Server database +connection = pyodbc.connect("Driver={ODBC Driver 18 for SQL Server};Server=server_name;Database=database_name;UID=user;PWD=password;LongAsMax=yes;") +# Create a SqlServerStore with the connection +vector_store = SqlServerStore(connection=connection) +``` + +You will have to make sure to close the connection yourself, as the store or collection will not do that for you. + +## Custom create queries + +The SQL Server connector is limited to the Flat index type. + +The `create_collection` method on the `SqlServerCollection` allows you to pass in a single or multiple custom queries to create the collection. The queries are executed in the order they are passed in, no results are returned. + +If this is done, there is no guarantee that the other methods still work as expected. The connector is not aware of the custom queries and will not validate them. + +If the `DataModel` has `id`, `content`, and `vector` as fields, then for instance you could create the table like this in order to also create a index on the content field: + +```python +from semantic_kernel.connectors.memory.sql_server import SqlServerCollection + +# Create a collection with a custom query +async with SqlServerCollection("dbo.table_name", DataModel) as collection: + collection.create_collection( + queries=["CREATE TABLE dbo.table_name (id INT PRIMARY KEY, content NVARCHAR(3000) NULL, vector VECTOR(1536) NULL ) PRIMARY KEY (id);", + "CREATE INDEX idx_content ON dbo.table_name (content);"] + ) +``` + +::: zone-end +::: zone pivot="programming-language-java" + +## Coming soon + +More info coming soon. + +::: zone-end diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md index a7eb9cfd..dce391f3 100644 --- a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -15,7 +15,135 @@ As we transition some agents from the experimental stage to the release candidat ::: zone pivot="programming-language-csharp" -## OpenAIAssistantAgent C# Migration Guide +## Common Agent Invocation API + +In version 1.43.0 we are releasing a new common agent invocation API, that will allow all agent types to be invoked via a common API. + +To enable this new API we are introducing the concept of an `AgentThread`, which represents a conversation thread and abstracts away the different thread management requirements of different agent types. For some agent types it will also, in future, allow different thread imlementations to be used with the same agent. + +The common `Invoke` methods that we are introducing allow you to provide the message(s) that you want to pass to the agent and an optional `AgentThread`. If an `AgentThread` is provided, this will continue the conversation already on the `AgentThread`. If no `AgentThread` is provided, a new default thread will be created and returned as part of the response. + +It is also possible to manually create an `AgentThread` instance, for example in cases where you may have a thread id from the underlying agent service, and you want to continue that thread. You may also want to customize the options for the thread, e.g. associate tools. + +Here is a simple example of how any agent can now be used with agent agnostic code. + +```csharp +private async Task UseAgentAsync(Agent agent, AgentThread? agentThread = null) +{ + // Invoke the agent, and continue the existing thread if provided. + var responses = agent.InvokeAsync(new ChatMessageContent(AuthorRole.User, "Hi"), agentThread); + + // Output results. + await foreach (AgentResponseItem response in responses) + { + Console.WriteLine(response); + agentThread = response.Thread; + } + + // Delete the thread if required. + if (agentThread is not null) + { + await agentThread.DeleteAsync(); + } +} +``` + +These changes were applied in: + +- [PR #11116](https://github.com/microsoft/semantic-kernel/pull/11116) + +### Azure AI Agent Thread Options + +The `AzureAIAgent` currently only supports threads of type `AzureAIAgentThread`. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `AzureAIAgentThread`. + +`AzureAIAgentThread` supports being created with customized tools and metadata, plus messages to seed the conversation with. + +```csharp +AgentThread thread = new AzureAIAgentThread( + agentsClient, + messages: seedMessages, + toolResources: tools, + metadata: metadata); +``` + +You can also construct an instance of an `AzureAIAgentThread` that continues an existing conversation. + +```csharp +AgentThread thread = new AzureAIAgentThread( + agentsClient, + id: "my-existing-thread-id"); +``` + +### Bedrock Agent Thread Options + +The `BedrockAgent` currently only supports threads of type `BedrockAgentThread`. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `BedrockAgentThread`. + +```csharp +AgentThread thread = new BedrockAgentThread(amazonBedrockAgentRuntimeClient); +``` + +You can also construct an instance of an `BedrockAgentThread` that continues an existing conversation. + +```csharp +AgentThread thread = new BedrockAgentThread( + amazonBedrockAgentRuntimeClient, + sessionId: "my-existing-session-id"); +``` + +### Chat Completion Agent Thread Options + +The `ChatCompletionAgent` currently only supports threads of type `ChatHistoryAgentThread`. +`ChatHistoryAgentThread` uses an in-memory `ChatHistory` object to store the messages on the thread. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `ChatHistoryAgentThread`. + +```csharp +AgentThread thread = new ChatHistoryAgentThread(); +``` + +You can also construct an instance of an `ChatHistoryAgentThread` that continues an existing conversation +by passing in a `ChatHistory` object with the existing messages. + +```csharp +ChatHistory chatHistory = new([new ChatMessageContent(AuthorRole.User, "Hi")]); + +AgentThread thread = new ChatHistoryAgentThread(chatHistory: chatHistory); +``` + +### OpenAI Assistant Thread Options + +The `OpenAIAssistantAgent` currently only supports threads of type `OpenAIAssistantAgentThread`. + +In addition to allowing a thread to be created for you automatically on agent invocation, you can also manually +construct an instance of an `OpenAIAssistantAgentThread`. + +`OpenAIAssistantAgentThread` supports being created with customized tools and metadata, plus messages to seed the conversation with. + +```csharp +AgentThread thread = new OpenAIAssistantAgentThread( + assistantClient, + messages: seedMessages, + codeInterpreterFileIds: fileIds, + vectorStoreId: "my-vector-store", + metadata: metadata); +``` + +You can also construct an instance of an `OpenAIAssistantAgentThread` that continues an existing conversation. + +```csharp +AgentThread thread = new OpenAIAssistantAgentThread( + assistantClient, + id: "my-existing-thread-id"); +``` + +## OpenAIAssistantAgent C# Migration Guide We recently applied a significant shift around the [`OpenAIAssistantAgent`](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs) in the _Semantic Kernel Agent Framework_. @@ -104,24 +232,29 @@ await assistantClient.DeleteAssistantAsync(agent.Id); ## 5. Thread Lifecycle ### **Creating a Thread** -Threads are now created directly using `AssistantClient`. + +Threads are now managed via `AssistantAgentThread`. ##### **New Way** -```csharp -AssistantThread thread = await assistantClient.CreateThreadAsync(); -``` -Using a convenience extension method: ```csharp -string threadId = await assistantClient.CreateThreadAsync(messages: [new ChatMessageContent(AuthorRole.User, "")]); +var thread = new AssistantAgentThread(assistantClient); +// Calling CreateAsync is an optional step. +// A thread will be created automatically on first use if CreateAsync was not called. +// Note that CreateAsync is not on the AgentThread base implementation since not all +// agent services support explicit thread creation. +await thread.CreateAsync(); ``` ##### **Old Way (Deprecated)** + Previously, thread management was indirect or agent-bound. ### **Thread Deletion** + ```csharp -await assistantClient.DeleteThreadAsync(thread.Id); +var thread = new AssistantAgentThread(assistantClient, "existing-thread-id"); +await thread.DeleteAsync(); ``` ## 6. File Lifecycle @@ -165,12 +298,18 @@ Deprecated patterns are marked with `[Obsolete]`. To suppress obsolete warnings This migration guide helps you transition smoothly to the new implementation, simplifying client initialization, resource management, and integration with the **Semantic Kernel .NET SDK**. ::: zone-end + ::: zone pivot="programming-language-python" -For developers upgrading to Semantic Kernel Python 1.22.0 or later, the ChatCompletionAgent and OpenAI Assistant abstractions have been updated. +> [!IMPORTANT] +> For developers upgrading to Semantic Kernel Python 1.26.0 or later, significant updates and breaking changes have been introduced to improve our agent framework as we approach GA. These changes were applied in: +- [PR #11116](https://github.com/microsoft/semantic-kernel/pull/11116) + +Previous changes were applied in: + - [PR #10666](https://github.com/microsoft/semantic-kernel/pull/10666) - [PR #10667](https://github.com/microsoft/semantic-kernel/pull/10667) - [PR #10701](https://github.com/microsoft/semantic-kernel/pull/10701) @@ -178,6 +317,368 @@ These changes were applied in: This guide provides step-by-step instructions for migrating your Python code from the old implementation to the new implementation. +## Agent Imports + +All agent import paths have been consolidated under `semantic_kernel.agents`. + +#### Updated import style + +```python +from semantic_kernel.agents import ( + AutoGenConversableAgent, + AzureAIAgent, + AzureAssistantAgent, + BedrockAgent, + ChatCompletionAgent, + OpenAIAssistantAgent, +) +``` + +#### Previous import style (deprecated): + +``` +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.agents.autogen import AutoGenConversableAgent +from semantic_kernel.agents.azure_ai import AzureAIAgent +from semantic_kernel.agents.bedrock import BedrockAgent +from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent +``` + +## Common Agent Invocation API + +As of Semantic Kernel Python 1.26.0 and later, we introduced a new common abstraction to manage threads for all agents. For each agent we now expose a thread class that implements the `AgentThread` base class, allowing context management via methods like `create()` and `delete()`. + +Agent responses `get_response(...)`, `invoke(...)`, `invoke_stream(...)` now return an `AgentResponseItem[ChatMessageContent]`, which has two attributes: + +```python +message: TMessage # Usually ChatMessageContent +thread: AgentThread # Contains the concrete type for the given agent +``` + +### Adding Messages to a Thread + +Messages should be added to a thread via the `messages` argument as part of the agent's `get_response(...)`, `invoke(...)` or `invoke_stream(...)` methods. + +### Azure AI Agent Thread + +An `AzureAIAgentThread` can be created as follows: + +```python +from semantic_kernel.agents import AzureAIAgentThread + +thread = AzureAIAgentThread( + client: AIProjectClient, # required + messages: list[ThreadMessageOptions] | None = None, # optional + metadata: dict[str, str] | None = None, # optional + thread_id: str | None = None, # optional + tool_resources: "ToolResources | None" = None, # optional +) +``` + +Providing a `thread_id` (string) allows you to continue an existing conversation. If omitted, a new thread is created and returned as part of the agent response. + +A complete implementation example: + +```python +import asyncio + +from azure.identity.aio import DefaultAzureCredential + +from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread + +USER_INPUTS = [ + "Why is the sky blue?", + "What are we talking about?", +] + +async def main() -> None: + ai_agent_settings = AzureAIAgentSettings.create() + + async with ( + DefaultAzureCredential() as creds, + AzureAIAgent.create_client(credential=creds) as client, + ): + # 1. Create an agent on the Azure AI agent service + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + name="Assistant", + instructions="Answer the user's questions.", + ) + + # 2. Create a Semantic Kernel agent for the Azure AI agent + agent = AzureAIAgent( + client=client, + definition=agent_definition, + ) + + # 3. Create a thread for the agent + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: AzureAIAgentThread = None + + try: + for user_input in USER_INPUTS: + print(f"# User: {user_input}") + # 4. Invoke the agent with the specified message for response + response = await agent.get_response(messages=user_input, thread=thread) + print(f"# {response.content}: {response}") + thread = response.thread + finally: + # 6. Cleanup: Delete the thread and agent + await thread.delete() if thread else None + await client.agents.delete_agent(agent.id) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### Bedrock Agent Thread + +A `BedrockAgent` uses a `BedrockAgentThread` to manage conversation history and context. You may provide a `session_id` to either continue or initiate a fresh conversation context. + +```python +from semantic_kernel.agents import BedrockAgentThread + +thread = BedrockAgentThread( + bedrock_runtime_client: Any, + session_id: str | None = None, +) +``` + +If no `session_id` is provided, a new context is created automatically. + +A complete implementation example: + +```python +import asyncio + +from semantic_kernel.agents import BedrockAgent, BedrockAgentThread + +async def main(): + bedrock_agent = await BedrockAgent.create_and_prepare_agent( + "semantic-kernel-bedrock-agent", + instructions="You are a friendly assistant. You help people find information.", + ) + + # Create a thread for the agent + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: BedrockAgentThread = None + + try: + while True: + user_input = input("User:> ") + if user_input == "exit": + print("\n\nExiting chat...") + break + + # Invoke the agent + # The chat history is maintained in the session + response = await bedrock_agent.get_response( + input_text=user_input, + thread=thread, + ) + print(f"Bedrock agent: {response}") + thread = response.thread + except KeyboardInterrupt: + print("\n\nExiting chat...") + return False + except EOFError: + print("\n\nExiting chat...") + return False + finally: + # Delete the agent + await bedrock_agent.delete_agent() + await thread.delete() if thread else None + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### Chat History Agent Thread + +A `ChatCompletionAgent` uses `ChatHistoryAgentThread` to manage conversation history. It can be initialized as follows: + +```python +from semantic_kernel.agents import ChatHistoryAgentThread + +thread = ChatHistoryAgentThread( + chat_history: ChatHistory | None = None, + thread_id: str | None = None +) +``` + +Providing a `thread_id` allows continuing existing conversations. Omitting it creates a new thread. Serialization and rehydration of thread state are supported for persistent conversation contexts. + +A complete implementation example: + +```python +import asyncio + +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + +# Simulate a conversation with the agent +USER_INPUTS = [ + "Hello, I am John Doe.", + "What is your name?", + "What is my name?", +] + + +async def main(): + # 1. Create the agent by specifying the service + agent = ChatCompletionAgent( + service=AzureChatCompletion(), + name="Assistant", + instructions="Answer the user's questions.", + ) + + # 2. Create a thread to hold the conversation + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: ChatHistoryAgentThread = None + + for user_input in USER_INPUTS: + print(f"# User: {user_input}") + # 3. Invoke the agent for a response + response = await agent.get_response( + messages=user_input, + thread=thread, + ) + print(f"# {response.name}: {response}") + # 4. Store the thread, which allows the agent to + # maintain conversation history across multiple messages. + thread = response.thread + + # 5. Cleanup: Clear the thread + await thread.delete() if thread else None + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### OpenAI Assistant Thread + +The `AzureAssistantAgent` and `OpenAIAssistantAgent` use `AssistantAgentThread` to manage conversation history and context: + +```python +from semantic_kernel.agents import ChatHistoryAgentThread + +thread = AssistantAgentThread( + client: AsyncOpenAI, + thread_id: str | None = None, + messages: Iterable["ThreadCreateMessage"] | NotGiven = NOT_GIVEN, + metadata: dict[str, Any] | NotGiven = NOT_GIVEN, + tool_resources: ToolResources | NotGiven = NOT_GIVEN, +) +``` + +Providing a `thread_id` continues an existing conversation; otherwise, a new thread is created. + +A complete implementation example: + +```python +# Copyright (c) Microsoft. All rights reserved. +import asyncio + +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent + + +# Simulate a conversation with the agent +USER_INPUTS = [ + "Why is the sky blue?", + "What is the speed of light?", + "What have we been talking about?", +] + + +async def main(): + # 1. Create the client using Azure OpenAI resources and configuration + client, model = AzureAssistantAgent.setup_resources() + + # 2. Create the assistant on the Azure OpenAI service + definition = await client.beta.assistants.create( + model=model, + instructions="Answer questions about the world in one sentence.", + name="Assistant", + ) + + # 3. Create a Semantic Kernel agent for the Azure OpenAI assistant + agent = AzureAssistantAgent( + client=client, + definition=definition, + ) + + # 4. Create a new thread for use with the assistant + # If no thread is provided, a new thread will be + # created and returned with the initial response + thread: AssistantAgentThread = None + + try: + for user_input in USER_INPUTS: + print(f"# User: '{user_input}'") + # 6. Invoke the agent for the current thread and print the response + response = await agent.get_response(messages=user_input, thread=thread) + print(f"# {response.name}: {response}") + thread = response.thread + + finally: + # 7. Clean up the resources + await thread.delete() if thread else None + await agent.client.beta.assistants.delete(assistant_id=agent.id) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +## Message Inputs for Agent Invocation + +Previous implementations allowed only a single message input to methods like `get_response(...)`, `invoke(...)`, and `invoke_stream(...)`. We've now updated these methods to support multiple `messages (str | ChatMessageContent | list[str | ChatMessageContent])`. + +Agent invocation methods need updates as follows: + +### Old Way + +```python +response = await agent.get_response(message="some user input", thread=thread) +``` + +### New Way + +```python +response = await agent.get_response(messages=["some initial inputer", "other input"], thread=thread) +``` + +## `AzureAIAgent` + +In Semantic Kernel Python 1.26.0+, `AzureAIAgent` thread creation is now managed via the `AzureAIAgentThread` object, not directly on the client. + +### Old Way + +```python +thread = await client.agents.create_thread() +``` + +### New Way + +```python +from semantic_kernel.agents import AzureAIAgentThread + +thread = AzureAIAgentThread( + client: AIProjectClient, # required + messages: list[ThreadMessageOptions] | None = None, # optional + metadata: dict[str, str] | None = None, # optional + thread_id: str | None = None, # optional + tool_resources: "ToolResources | None" = None, # optional +) +``` + +If no `thread_id` is provided initially, a new thread is created and returned in the agent response. + ## `ChatCompletionAgent` The `ChatCompletionAgent` has been updated to simplify service configuration, plugin handling, and function calling behaviors. Below are the key changes you should consider when migrating. @@ -189,6 +690,9 @@ You can now specify the service directly as part of the agent constructor: #### New Way ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + agent = ChatCompletionAgent( service=AzureChatCompletion(), name="", @@ -203,6 +707,9 @@ Note: If both a kernel and a service are provided, the service will take precede Previously, you would first add a service to a kernel and then pass the kernel to the agent: ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + kernel = Kernel() kernel.add_service(AzureChatCompletion()) @@ -220,6 +727,9 @@ Plugins can now be supplied directly through the constructor: #### New Way ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + agent = ChatCompletionAgent( service=AzureChatCompletion(), name="", @@ -233,6 +743,9 @@ agent = ChatCompletionAgent( Plugins previously had to be added to the kernel separately: ```python +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion + kernel = Kernel() kernel.add_plugin(SamplePlugin()) @@ -249,22 +762,31 @@ Note: Both approaches are valid, but directly specifying plugins simplifies init You now have two ways to invoke the agent. The new method directly retrieves a single response, while the old method supports streaming. -#### New Way (Single Response) +#### New Way (No Conversation Thread/Context) ```python -chat_history = ChatHistory() -chat_history.add_user_message("") -response = await agent.get_response(chat_history) -# response is of type ChatMessageContent +response = await agent.get_response(messages="user input") +# response is of type AgentResponseItem[ChatMessageContent] ``` +Note: if the next response does not use the returned thread, the conversation will use a new thread and thus will not continue with previous context. -#### Old Way (Still Valid) +#### New Way (Single Response with Context) + +```python +thread = ChatHistoryAgentThread() + +for user_input in ["First user input", "Second User Input"]: + response = await agent.get_response(messages=user_input, thread=thread) + # response is of type AgentResponseItem[ChatMessageContent] + thread = response.thread +``` + +#### Old Way (No Longer Valid) ```python chat_history = ChatHistory() chat_history.add_user_message("") -async for response in agent.invoke(chat_history): - # handle response +response = agent.get_response(message="user input", chat_history=chat_history) ``` ### 4. Controlling Function Calling @@ -404,8 +926,14 @@ thread_id = await agent.create_thread() ### New Way ```python -thread = await agent.client.beta.threads.create() -# Use thread.id for the thread_id string +from semantic_kernel.agents AssistantAgentThread + +thread = AssistantAgentThread() + +async for response in agent.invoke(messages="user input", thread=thread) + # handle response + print(response) + thread = response.thread ``` ## 3. Handling Plugins @@ -580,7 +1108,7 @@ await agent.delete() ### New Way ```python await client.files.delete(file_id) -await client.beta.threads.delete(thread.id) +await thread.delete() await client.beta.assistants.delete(agent.id) ``` From 879a1230daeba206d0239deeef82b1017d79ed64 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Tue, 25 Mar 2025 13:10:39 +0900 Subject: [PATCH 099/117] Python Agent thread updates --- .../Frameworks/agent/agent-architecture.md | 2 +- .../Frameworks/agent/agent-streaming.md | 59 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index 589f2e5b..000c0713 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -74,7 +74,7 @@ case the conversation state is managed locally in the application. Stateful agents typically only work with a matching `AgentThread` implementation, while other types of agents could work with more than one `AgentThread` type. For example, `AzureAIAgent` requires a matching `AzureAIAgentThread`. This is because the Azure AI Agent service stores conversations in the service, and requires specific service calls to create a thread and update it. -If a different agent thread type was used with `AzureAIAgent`, no thread would be created in the Azure AI Agent service and invoke calls would fail. +If a different agent thread type is used with the `AzureAIAgent`, we fail fast due to an unexpected thread type and raise an exception to alert the caller. ## Agent Chat diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index 70c6f2d8..c689158f 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -138,6 +138,34 @@ await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) // Process messages... } +// Delete the thread when it is no longer needed +await agentThread.DeleteAsync(); +``` + +To create a thread using an existing `threadId`, pass it to the constructor of `OpenAIAssistantAgentThread`: + +```csharp +// Define agent +OpenAIAssistantAgent agent = ...; + +// Create a thread for the agent conversation. +OpenAIAssistantAgentThread agentThread = new(assistantClient, threadId); + +// Cerate a user message +var message = new ChatMessageContent(AuthorRole.User, ""); + +// Generate the streamed agent response(s) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) +{ + // Process streamed response(s)... +} + +// It's possible to read the messages from the remote thread. +await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) +{ + // Process messages... +} + // Delete the thread when it is no longer needed await agentThread.DeleteAsync(); ``` @@ -146,6 +174,7 @@ await agentThread.DeleteAsync(); ::: zone pivot="programming-language-python" ```python from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent, OpenAIAssistantAgent + # Define agent agent = OpenAIAssistantAgent(...) # or = AzureAssistantAgent(...) @@ -158,7 +187,37 @@ thread: AssistantAgentThread = None async for response in agent.invoke_stream(messages="user input", thread=thread): # Process streamed response(s)... thread = response.thread + +# Read the messages from the remote thread +async for response in thread.get_messages(): + # Process messages + +# Delete the thread +await thread.delete() +``` + +To create a thread using an existing `thread_id`, pass it to the constructor of `AssistantAgentThread`: + +```python +from semantic_kernel.agents import AssistantAgentThread, AzureAssistantAgent, OpenAIAssistantAgent + +# Define agent +agent = OpenAIAssistantAgent(...) # or = AzureAssistantAgent(...) + +# Create a thread for the agent conversation. +# If no thread is provided one will be created and returned with +# the initial response. +thread = AssistantAgentThread(client=client, thread_id="your-existing-thread-id") + +# Generate the streamed agent response(s) +async for response in agent.invoke_stream(messages="user input", thread=thread): + # Process streamed response(s)... + thread = response.thread + +# Delete the thread +await thread.delete() ``` + ::: zone-end ::: zone pivot="programming-language-java" From 633722ea152674a60ca0200911300c6d8e7abe18 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Tue, 25 Mar 2025 13:13:38 +0900 Subject: [PATCH 100/117] Fix c# id property --- semantic-kernel/Frameworks/agent/agent-streaming.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index c689158f..8f331aa9 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -142,14 +142,14 @@ await foreach (ChatMessageContent response in agentThread.GetMessagesAsync()) await agentThread.DeleteAsync(); ``` -To create a thread using an existing `threadId`, pass it to the constructor of `OpenAIAssistantAgentThread`: +To create a thread using an existing `Id`, pass it to the constructor of `OpenAIAssistantAgentThread`: ```csharp // Define agent OpenAIAssistantAgent agent = ...; // Create a thread for the agent conversation. -OpenAIAssistantAgentThread agentThread = new(assistantClient, threadId); +OpenAIAssistantAgentThread agentThread = new(assistantClient, "your-existing-thread-id"); // Cerate a user message var message = new ChatMessageContent(AuthorRole.User, ""); From 400f8835fcd9cb71cd03af2cd4f66cdfc6c7f8d5 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Tue, 25 Mar 2025 17:13:11 +0900 Subject: [PATCH 101/117] Version update --- semantic-kernel/Frameworks/agent/agent-architecture.md | 2 +- .../support/migration/agent-framework-rc-migration-guide.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index 000c0713..8027b20f 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -98,7 +98,7 @@ The _Agent Channel_ class enables agents of various types to participate in an [ ::: zone pivot="programming-language-python" -- [`agent_channel](/python/api/semantic-kernel/semantic_kernel.agents.channels.agent_channel) +- [`agent_channel`](/python/api/semantic-kernel/semantic_kernel.agents.channels.agent_channel) ::: zone-end diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md index dce391f3..93305c32 100644 --- a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -302,7 +302,7 @@ This migration guide helps you transition smoothly to the new implementation, si ::: zone pivot="programming-language-python" > [!IMPORTANT] -> For developers upgrading to Semantic Kernel Python 1.26.0 or later, significant updates and breaking changes have been introduced to improve our agent framework as we approach GA. +> For developers upgrading to Semantic Kernel Python 1.26.1 or later, significant updates and breaking changes have been introduced to improve our agent framework as we approach GA. These changes were applied in: From 73a750868ec6a52cd7bc10a21bedfde1ae23f12e Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Tue, 25 Mar 2025 11:53:17 +0000 Subject: [PATCH 102/117] Update how to guides for agents with new common invoke pattern (#506) --- .../agent/examples/example-assistant-code.md | 22 +++++++++---------- .../examples/example-assistant-search.md | 22 +++++++++---------- .../agent/examples/example-chat-agent.md | 12 +++++----- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md index 92f32c25..edd70215 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-code.md @@ -347,14 +347,14 @@ agent = AzureAssistantAgent( ### The Chat Loop -At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an `AgentThread` to maintain the conversation state and creating an empty loop. Let's also ensure the resources are removed at the end of execution to minimize unnecessary charges. ::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine("Creating thread..."); -AssistantThread thread = await assistantClient.CreateThreadAsync(); +AssistantAgentThread agentThread = new(); Console.WriteLine("Ready!"); @@ -373,7 +373,7 @@ finally Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - assistantClient.DeleteThreadAsync(thread.Id), + agentThread.DeleteAsync(), assistantClient.DeleteAssistantAsync(assistant.Id), fileClient.DeleteFileAsync(fileDataCountryList.Id), fileClient.DeleteFileAsync(fileDataCountryDetail.Id), @@ -405,7 +405,7 @@ finally: ::: zone-end -Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the _Assistant Thread_ as a _User_ message. +Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. ::: zone pivot="programming-language-csharp" ```csharp @@ -422,7 +422,7 @@ if (input.Trim().Equals("EXIT", StringComparison.OrdinalIgnoreCase)) break; } -await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); +var message = new ChatMessageContent(AuthorRole.User, input); Console.WriteLine(); ``` @@ -533,12 +533,12 @@ async def download_response_image(agent, file_ids: list[str]): ::: zone-end -To generate an `Agent` response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any generated _File References_ for download and review at the end of the response cycle. It's important to note that generated code is identified by the presence of a _Metadata_ key in the response message, distinguishing it from the conversational reply. +To generate an `Agent` response to user input, invoke the agent by providing the message and the `AgentThread`. In this example, we choose a streamed response and capture any generated _File References_ for download and review at the end of the response cycle. It's important to note that generated code is identified by the presence of a _Metadata_ key in the response message, distinguishing it from the conversational reply. ::: zone pivot="programming-language-csharp" ```csharp bool isCode = false; -await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) +await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { if (isCode != (response.Metadata?.ContainsKey(OpenAIAssistantAgent.CodeInterpreterMetadataKey) ?? false)) { @@ -668,7 +668,7 @@ public static class Program // Create the conversation thread Console.WriteLine("Creating thread..."); - AssistantThread thread = await assistantClient.CreateThreadAsync(); + AssistantAgentThread agentThread = new(); Console.WriteLine("Ready!"); @@ -691,12 +691,12 @@ public static class Program break; } - await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); + var message = new ChatMessageContent(AuthorRole.User, input); Console.WriteLine(); bool isCode = false; - await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(thread.Id)) + await foreach (StreamingChatMessageContent response in agent.InvokeStreamingAsync(message, agentThread)) { if (isCode != (response.Metadata?.ContainsKey(OpenAIAssistantAgent.CodeInterpreterMetadataKey) ?? false)) { @@ -724,7 +724,7 @@ public static class Program Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - assistantClient.DeleteThreadAsync(thread.Id), + agentThread.DeleteAsync(), assistantClient.DeleteAssistantAsync(assistant.Id), fileClient.DeleteFileAsync(fileDataCountryList.Id), fileClient.DeleteFileAsync(fileDataCountryDetail.Id), diff --git a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md index 77f01209..0b355dd6 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md +++ b/semantic-kernel/Frameworks/agent/examples/example-assistant-search.md @@ -390,14 +390,14 @@ agent = AzureAssistantAgent( ### The _Chat_ Loop -At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an _Assistant Thread_ to maintain the conversation state and creating an empty loop. +At last, we are able to coordinate the interaction between the user and the `Agent`. Start by creating an `AgentThread` to maintain the conversation state and creating an empty loop. Let's also ensure the resources are removed at the end of execution to minimize unnecessary charges. ::: zone pivot="programming-language-csharp" ```csharp Console.WriteLine("Creating thread..."); -AssistantThread thread = await assistantClient.CreateThreadAsync(); +OpenAIAssistantAgent agentThread = new(); Console.WriteLine("Ready!"); @@ -415,7 +415,7 @@ finally Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - assistantClient.DeleteThreadAsync(thread.Id), + agentThread.DeleteAsync(); assistantClient.DeleteAssistantAsync(assistant.Id), storeClient.DeleteVectorStoreAsync(storeId), ..fileReferences.Select(fileReference => fileClient.DeleteFileAsync(fileReference.Key)) @@ -449,7 +449,7 @@ finally: ::: zone-end -Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. Valid input will be added to the _Assistant Thread_ as a _User_ message. +Now let's capture user input within the previous loop. In this case, empty input will be ignored and the term `EXIT` will signal that the conversation is completed. ::: zone pivot="programming-language-csharp" ```csharp @@ -466,7 +466,7 @@ if (input.Trim().Equals("EXIT", StringComparison.OrdinalIgnoreCase)) break; } -await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); +var message = new ChatMessageContent(AuthorRole.User, input); Console.WriteLine(); ``` ::: zone-end @@ -510,12 +510,12 @@ private static string ReplaceUnicodeBrackets(this string content) => ::: zone-end -To generate an `Agent` response to user input, invoke the agent by specifying the _Assistant Thread_. In this example, we choose a streamed response and capture any associated _Citation Annotations_ for display at the end of the response cycle. Note each streamed chunk is being reformatted using the previous helper method. +To generate an `Agent` response to user input, invoke the agent by specifying the message and agent thread. In this example, we choose a streamed response and capture any associated _Citation Annotations_ for display at the end of the response cycle. Note each streamed chunk is being reformatted using the previous helper method. ::: zone pivot="programming-language-csharp" ```csharp List footnotes = []; -await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(thread.Id)) +await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(message, agentThread)) { // Capture annotations for footnotes footnotes.AddRange(chunk.Items.OfType()); @@ -654,7 +654,7 @@ public static class Program // Create the conversation thread Console.WriteLine("Creating thread..."); - AssistantThread thread = await assistantClient.CreateThreadAsync(); + AssistantAgentThread agentThread = new(); Console.WriteLine("Ready!"); @@ -676,11 +676,11 @@ public static class Program break; } - await agent.AddChatMessageAsync(thread.Id, new ChatMessageContent(AuthorRole.User, input)); + var message = new ChatMessageContent(AuthorRole.User, input); Console.WriteLine(); List footnotes = []; - await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(thread.Id)) + await foreach (StreamingChatMessageContent chunk in agent.InvokeStreamingAsync(message, agentThread)) { // Capture annotations for footnotes footnotes.AddRange(chunk.Items.OfType()); @@ -708,7 +708,7 @@ public static class Program Console.WriteLine("Cleaning-up..."); await Task.WhenAll( [ - assistantClient.DeleteThreadAsync(thread.Id), + agentThread.DeleteAsync(), assistantClient.DeleteAssistantAsync(assistant.Id), storeClient.DeleteVectorStoreAsync(storeId), ..fileReferences.Select(fileReference => fileClient.DeleteFileAsync(fileReference.Key)) diff --git a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md index 516d5982..4481e9fc 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md +++ b/semantic-kernel/Frameworks/agent/examples/example-chat-agent.md @@ -358,7 +358,7 @@ At last, we are able to coordinate the interaction between the user and the `Age ::: zone pivot="programming-language-csharp" ```csharp -ChatHistory history = []; +ChatHistoryAgentThread agentThread = new(); bool isComplete = false; do { @@ -399,7 +399,7 @@ if (input.Trim().Equals("EXIT", StringComparison.OrdinalIgnoreCase)) break; } -history.Add(new ChatMessageContent(AuthorRole.User, input)); +var message = new ChatMessageContent(AuthorRole.User, input); Console.WriteLine(); ``` @@ -435,7 +435,7 @@ KernelArguments arguments = { { "now", $"{now.ToShortDateString()} {now.ToShortTimeString()}" } }; -await foreach (ChatMessageContent response in agent.InvokeAsync(history, arguments)) +await foreach (ChatMessageContent response in agent.InvokeAsync(message, agentThread, options: new() { KernelArguments = arguments })) { Console.WriteLine($"{response.Content}"); } @@ -537,7 +537,7 @@ public static class Program Console.WriteLine("Ready!"); - ChatHistory history = []; + ChatHistoryAgentThread agentThread = new(); bool isComplete = false; do { @@ -554,7 +554,7 @@ public static class Program break; } - history.Add(new ChatMessageContent(AuthorRole.User, input)); + var message = new ChatMessageContent(AuthorRole.User, input); Console.WriteLine(); @@ -564,7 +564,7 @@ public static class Program { { "now", $"{now.ToShortDateString()} {now.ToShortTimeString()}" } }; - await foreach (ChatMessageContent response in agent.InvokeAsync(history, arguments)) + await foreach (ChatMessageContent response in agent.InvokeAsync(message, agentThread, options: new() { KernelArguments = arguments })) { // Display response. Console.WriteLine($"{response.Content}"); From 5d88a670db7669795f482c904f884603aedb0d9d Mon Sep 17 00:00:00 2001 From: SergeyMenshykh Date: Fri, 28 Mar 2025 20:09:24 +0000 Subject: [PATCH 103/117] add note oneOf and anyOf --- .../concepts/plugins/adding-openapi-plugins.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/semantic-kernel/concepts/plugins/adding-openapi-plugins.md b/semantic-kernel/concepts/plugins/adding-openapi-plugins.md index f9f2b402..4f4560ab 100644 --- a/semantic-kernel/concepts/plugins/adding-openapi-plugins.md +++ b/semantic-kernel/concepts/plugins/adding-openapi-plugins.md @@ -345,6 +345,18 @@ To handle payloads with non-unique property names, consider the following altern If payloads schemas use any of the `oneOf`, `anyOf`, `allOf` composite keywords or recursive references, consider disabling dynamic payload construction and allow the LLM to create the payload based on its schema, as explained in the [The payload parameter](./adding-openapi-plugins.md#the-payload-parameter) section. +#### Note on the `oneOf` and `anyOf` Keywords +The `anyOf` and `oneOf` keywords assume that a payload can be composed of properties defined by multiple schemas. +The `anyOf` keyword allows a payload to include properties defined in one or more schemas, while `oneOf` restricts the payload to contain properties from only one schema among the many provided. +For more information, you can refer to the [Swagger documentation on oneOf and anyOf](https://swagger.io/docs/specification/v3_0/data-models/oneof-anyof-allof-not/). + +With both `anyOf` and `oneOf` keywords, which offer alternatives to the payload structure, it's impossible to predict which alternative a caller will choose +when invoking operations that define payloads with these keywords. For example, it is not possible to determine in advance whether a caller will invoke an operation with a Dog or Cat object, or with an object composed of some or perhaps all properties from the PetByAge and PetByType schemas +described in the examples for `anyOf` and `oneOf` in the [Swagger documentation](https://swagger.io/docs/specification/v3_0/data-models/oneof-anyof-allof-not/). +As a result, because there's no set of parameters known in advance that Semantic Kernel can use to create the a plugin function with for such operations, Semantic Kernel creates a function with only one [payload](./adding-openapi-plugins.md#the-payload-parameter) parameter +having a schema from the operation describing a multitude of possible alternatives, offloading the payload creation to the operation caller: LLM or calling code +that must have all the context to know which one of the available alternatives to invoke the function with. + ### Payload namespacing Payload namespacing helps prevent naming conflicts that can occur due to non-unique property names in OpenAPI plugin payloads. From 2d4d04b331a12dad73bef512937e6e59d96aef2a Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Mon, 31 Mar 2025 11:39:48 +0100 Subject: [PATCH 104/117] Update docs to indicate NEON postgres support (#510) --- .../out-of-the-box-connectors/index.md | 24 ++++++++++--------- .../postgres-connector.md | 4 +++- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 317cc1ec..11721759 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -22,24 +22,25 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma ::: zone pivot="programming-language-csharp" -| Vector Store Connectors | C# | Uses officially supported SDK | Maintainer / Vendor | -| ------------------------------------------------------------------ | :------------------------: | :---------------------------: | :-------------------------------: | +| Vector Store Connectors | C# | Uses officially supported SDK | Maintainer / Vendor | +| ------------------------------------------------------------------ | :--------------------------: | :----------------------------: | :-------------------------------: | | [Azure AI Search](./azure-ai-search-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Couchbase](./couchbase-connector.md) | ✅ | ✅ | Couchbase | | [Elasticsearch](./elasticsearch-connector.md) | ✅ | ✅ | Elastic | -| Chroma | Planned | | | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | -| Milvus | Planned | | | +| Chroma | Planned | | | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| Milvus | Planned | | | | [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Neon |Use [Postgres](./postgres-connector.md)| ✅ | Microsoft Semantic Kernel Project | | [Pinecone](./pinecone-connector.md) | ✅ | ❌ | Microsoft Semantic Kernel Project | | [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| Sql Server | Planned | | | +| Sql Server | Planned | | | | [SQLite](./sqlite-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | +| [Volatile (In-Memory)](./volatile-connector.md) | Deprecated (use In-Memory) | N/A | Microsoft Semantic Kernel Project | | [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end @@ -51,16 +52,17 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma | [Cosmos DB MongoDB (vCore)](./azure-cosmosdb-mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Cosmos DB No SQL](./azure-cosmosdb-nosql-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Chroma](./chroma-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | +| [Elasticsearch](./elasticsearch-connector.md) | Planned | | | | [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | +| [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | | [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | +| Neon |Use [Postgres](./postgres-connector.md)| ✅ | Microsoft Semantic Kernel Project | | [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Redis](./redis-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| [SQL Server](./sql-connector.md) | ✅ | [pyodbc](https://pypi.org/project/pyodbc/) | Microsoft Semantic Kernel Project | -| SQLite | Planned | | Microsoft Semantic Kernel Project | +| [SQL Server](./sql-connector.md) | ✅ | [pyodbc](https://pypi.org/project/pyodbc/) | Microsoft Semantic Kernel Project | +| SQLite | Planned | | Microsoft Semantic Kernel Project | | [Weaviate](./weaviate-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | ::: zone-end diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md index e253ab5e..2dadacac 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md @@ -17,7 +17,9 @@ ms.service: semantic-kernel ## Overview -The Postgres Vector Store connector can be used to access and manage data in Postgres. The connector has the following characteristics. +The Postgres Vector Store connector can be used to access and manage data in Postgres and also supports [Neon Serverless Postgres](https://neon.tech/). + +The connector has the following characteristics. | Feature Area | Support | |-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------| From 878e0971c159f9bf1e7f89ad824d761af316cc12 Mon Sep 17 00:00:00 2001 From: Ben Thomas Date: Wed, 2 Apr 2025 15:25:44 -0700 Subject: [PATCH 105/117] =?UTF-8?q?Fixing=20a=20few=20typos=20and=20removi?= =?UTF-8?q?ng=20incomplete=20list=20of=20ChatCompletionServ=E2=80=A6=20(#5?= =?UTF-8?q?09)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixing a few typos and removing incomplete list of ChatCompletionServices. * Fixing typo --------- Co-authored-by: Ben Thomas --- .../Frameworks/agent/chat-completion-agent.md | 81 ++++++++----------- 1 file changed, 34 insertions(+), 47 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/chat-completion-agent.md b/semantic-kernel/Frameworks/agent/chat-completion-agent.md index 7b53e4cc..1cce9c01 100644 --- a/semantic-kernel/Frameworks/agent/chat-completion-agent.md +++ b/semantic-kernel/Frameworks/agent/chat-completion-agent.md @@ -16,6 +16,7 @@ ms.service: semantic-kernel Detailed API documentation related to this discussion is available at: ::: zone pivot="programming-language-csharp" + - [`ChatCompletionAgent`](/dotnet/api/microsoft.semantickernel.agents.chatcompletionagent) - [`Microsoft.SemanticKernel.Agents`](/dotnet/api/microsoft.semantickernel.agents) - [`IChatCompletionService`](/dotnet/api/microsoft.semantickernel.chatcompletion.ichatcompletionservice) @@ -35,47 +36,15 @@ Detailed API documentation related to this discussion is available at: ::: zone-end - ## Chat Completion in Semantic Kernel -[_Chat Completion_](../../concepts/ai-services/chat-completion/index.md) is fundamentally a protocol for a chat-based interaction with an AI model where the chat-history maintained and presented to the model with each request. _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) offer a unified framework for integrating the chat-completion capabilities of various AI models. - -A _chat completion agent_ can leverage any of these [AI services](../../concepts/ai-services/index.md) to generate responses, whether directed to a user or another agent. - -::: zone pivot="programming-language-csharp" - -For .NET, _chat-completion_ AI Services are based on the [`IChatCompletionService`](/dotnet/api/microsoft.semantickernel.chatcompletion.ichatcompletionservice) interface. - -For .NET, some of AI services that support models with chat-completion include: - -Model|Semantic Kernel AI Service ---|-- -Azure OpenAI|[`Microsoft.SemanticKernel.Connectors.AzureOpenAI`](/dotnet/api/microsoft.semantickernel.connectors.azureopenai) -Gemini|[`Microsoft.SemanticKernel.Connectors.Google`](/dotnet/api/microsoft.semantickernel.connectors.google) -HuggingFace|[`Microsoft.SemanticKernel.Connectors.HuggingFace`](/dotnet/api/microsoft.semantickernel.connectors.huggingface) -Mistral|[`Microsoft.SemanticKernel.Connectors.MistralAI`](/dotnet/api/microsoft.semantickernel.connectors.mistralai) -OpenAI|[`Microsoft.SemanticKernel.Connectors.OpenAI`](/dotnet/api/microsoft.semantickernel.connectors.openai) -Onnx|[`Microsoft.SemanticKernel.Connectors.Onnx`](/dotnet/api/microsoft.semantickernel.connectors.onnx) - -::: zone-end - -::: zone pivot="programming-language-python" - -- [`AzureChatCompletion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion.azurechatcompletion) -- [`OpenAIChatCompletion`](/python/api/semantic-kernel/semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion.openaichatcompletion) - -::: zone-end - -::: zone pivot="programming-language-java" - -> Agents are currently unavailable in Java. - -::: zone-end +[_Chat Completion_](../../concepts/ai-services/chat-completion/index.md) is fundamentally a protocol for a chat-based interaction with an AI model where the chat-history is maintained and presented to the model with each request. _Semantic Kernel_ [AI services](../../concepts/ai-services/index.md) offer a unified framework for integrating the chat-completion capabilities of various AI models. +A _chat completion agent_ can leverage any of these [AI services](../../concepts/ai-services/chat-completion/index.md) to generate responses, whether directed to a user or another agent. ## Preparing Your Development Environment -To proceed with developing an `AzureAIAgent`, configure your development environment with the appropriate packages. +To proceed with developing an `ChatCompletionAgent`, configure your development environment with the appropriate packages. ::: zone pivot="programming-language-csharp" @@ -103,12 +72,12 @@ pip install semantic-kernel ::: zone-end - ## Creating a `ChatCompletionAgent` A `ChatCompletionAgent` is fundamentally based on an [AI services](../../concepts/ai-services/index.md). As such, creating a `ChatCompletionAgent` starts with creating a [`Kernel`](../../concepts/kernel.md) instance that contains one or more chat-completion services and then instantiating the agent with a reference to that [`Kernel`](../../concepts/kernel.md) instance. ::: zone pivot="programming-language-csharp" + ```csharp // Initialize a Kernel with a chat-completion service IKernelBuilder builder = Kernel.CreateBuilder(); @@ -126,12 +95,13 @@ ChatCompletionAgent agent = Kernel = kernel }; ``` + ::: zone-end ::: zone pivot="programming-language-python" There are two ways to create a `ChatCompletionAgent`: -### 1. By providing the chat completion service directly: +### 1. By providing the chat completion service directly ```python from semantic_kernel.agents import ChatCompletionAgent @@ -143,7 +113,8 @@ agent = ChatCompletionAgent( instructions="", ) ``` -### 2. By creating a Kernel first, adding the service to it, then providing the kernel: + +### 2. By creating a Kernel first, adding the service to it, then providing the kernel ```python # Define the kernel @@ -159,6 +130,7 @@ agent = ChatCompletionAgent( instructions="", ) ``` + The first method is useful when you already have a chat completion service ready. The second method is beneficial when you need a kernel that manages multiple services or additional functionalities. ::: zone-end @@ -168,7 +140,6 @@ The first method is useful when you already have a chat completion service ready ::: zone-end - ## AI Service Selection No different from using Semantic Kernel [AI services](../../concepts/ai-services/index.md) directly, a `ChatCompletionAgent` supports the specification of a service-selector. A service-selector identifies which [AI service](../../concepts/ai-services/index.md) to target when the [`Kernel`](../../concepts/kernel.md) contains more than one. @@ -176,6 +147,7 @@ No different from using Semantic Kernel [AI services](../../concepts/ai-services > Note: If multiple [AI services](../../concepts/ai-services/index.md) are present and no service-selector is provided, the same default logic is applied for the agent that you'd find when using an [AI services](../../concepts/ai-services/index.md) outside of the `Agent Framework` ::: zone pivot="programming-language-csharp" + ```csharp IKernelBuilder builder = Kernel.CreateBuilder(); @@ -199,9 +171,11 @@ ChatCompletionAgent agent = }); }; ``` + ::: zone-end ::: zone pivot="programming-language-python" + ```python from semantic_kernel.connectors.ai.open_ai import ( AzureChatCompletion, @@ -225,6 +199,7 @@ agent = ChatCompletionAgent( arguments=KernelArguments(settings=settings) ) ``` + ::: zone-end ::: zone pivot="programming-language-java" @@ -282,15 +257,29 @@ The easiest is to call and await `get_response`: ```python # Define agent agent = ChatCompletionAgent(...) - -# Define the thread -thread = ChatHistoryAgentThread() # Generate the agent response -response = await agent.get_response(messages="user input", thread=thread) +response = await agent.get_response(messages="user input") # response is an `AgentResponseItem[ChatMessageContent]` object ``` -Otherwise, calling the `invoke` method returns an `AsyncIterable` of `AgentResponseItem[ChatMessageContent]`. + +If you want the agent to maintain conversation history between invocations, you can pass it a `ChatHistoryAgentThread` as follows: + +```python + +# Define agent +agent = ChatCompletionAgent(...) + +# Generate the agent response(s) +response = await agent.get_response(messages="user input") + +# Generate another response, continuing the conversation thread from the first response. +response2 = await agent.get_response(messages="user input", thread=response.thread) +# process agent response(s) + +``` + +Calling the `invoke` method returns an `AsyncIterable` of `AgentResponseItem[ChatMessageContent]`. ```python # Define agent @@ -326,13 +315,11 @@ async for response in agent.invoke_stream(messages="user input", thread=thread): ::: zone-end - -#### How-To: +### How-To For an end-to-end example for a `ChatCompletionAgent`, see: - [How-To: `ChatCompletionAgent`](./examples/example-chat-agent.md) - > [!div class="nextstepaction"] > [Exploring the OpenAI Assistant Agent](./assistant-agent.md) From 74c6a2a1e0a8e178e96585004eace9dd388f1aaf Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Thu, 3 Apr 2025 19:19:46 +0900 Subject: [PATCH 106/117] Update Agent Framework Docs (#512) * Update Agent framework docs. Add Responses agent related content. * Clean up * More cleanup * Unique metadata for OpenAI Responses Agent page * Fix alt text warning * Responses Agent cleanup * Fix uri * Clarify AgentChat patterns --- semantic-kernel/Frameworks/agent/TOC.yml | 2 + .../Frameworks/agent/agent-architecture.md | 56 ++- .../Frameworks/agent/agent-chat.md | 28 +- .../Frameworks/agent/agent-functions.md | 10 +- .../Frameworks/agent/agent-streaming.md | 146 +++++++- .../Frameworks/agent/agent-templates.md | 5 +- .../Frameworks/agent/assistant-agent.md | 159 +++++++- .../Frameworks/agent/azure-ai-agent.md | 160 ++++++++- .../Frameworks/agent/chat-completion-agent.md | 151 +++++++- .../examples/example-agent-collaboration.md | 14 +- .../agent/examples/example-assistant-code.md | 10 +- .../examples/example-assistant-search.md | 10 +- .../agent/examples/example-chat-agent.md | 40 +-- semantic-kernel/Frameworks/agent/index.md | 20 +- .../Frameworks/agent/responses-agent.md | 340 ++++++++++++++++++ .../agent-framework-rc-migration-guide.md | 16 +- 16 files changed, 1052 insertions(+), 115 deletions(-) create mode 100644 semantic-kernel/Frameworks/agent/responses-agent.md diff --git a/semantic-kernel/Frameworks/agent/TOC.yml b/semantic-kernel/Frameworks/agent/TOC.yml index 2d6709d8..a8eda2fb 100644 --- a/semantic-kernel/Frameworks/agent/TOC.yml +++ b/semantic-kernel/Frameworks/agent/TOC.yml @@ -8,6 +8,8 @@ href: assistant-agent.md - name: Azure AI Agent href: azure-ai-agent.md +- name: OpenAI Responses Agent + href: responses-agent.md - name: Agent Collaboration href: agent-chat.md - name: Create an Agent from a Template diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index 8027b20f..d0babdc9 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -11,7 +11,7 @@ ms.service: semantic-kernel # An Overview of the Agent Architecture > [!IMPORTANT] -> Single-agent features, such as ChatCompletionAgent and OpenAIAssistantAgent, are in the release candidate stage. These features are nearly complete and generally stable, though they may undergo minor refinements or optimizations before reaching full general availability. However, agent chat patterns are still in the experimental stage. These patterns are under active development and may change significantly before advancing to the preview or release candidate stage. +> `AgentChat` patterns are in the experimental stage. These patterns are under active development and may change significantly before advancing to the preview or release candidate stage. This article covers key concepts in the architecture of the Agent Framework, including foundational principles, design objectives, and strategic goals. @@ -20,7 +20,7 @@ This article covers key concepts in the architecture of the Agent Framework, inc The `Agent Framework` was developed with the following key priorities in mind: -- The _Semantic Kernel_ framework serves as the core foundation for implementing agent functionalities. +- The Semantic Kernel agent framework serves as the core foundation for implementing agent functionalities. - Multiple agents can collaborate within a single conversation, while integrating human input. - An agent can engage in and manage multiple concurrent conversations simultaneously. - Different types of agents can participate in the same conversation, each contributing their unique capabilities. @@ -28,7 +28,7 @@ The `Agent Framework` was developed with the following key priorities in mind: ## Agent -The abstract `Agent` class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [`Kernel`](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [`ChatCompletionAgent`](./chat-completion-agent.md) and the [`OpenAIAssistantAgent`](./assistant-agent.md), both of which leverage the Kernel's capabilities to execute their respective functions. +The abstract `Agent` class serves as the core abstraction for all types of agents, providing a foundational structure that can be extended to create more specialized agents. One key subclass is _Kernel Agent_, which establishes a direct association with a [`Kernel`](../../concepts/kernel.md) object. This relationship forms the basis for more specific agent implementations, such as the [`ChatCompletionAgent`](./chat-completion-agent.md), [`OpenAIAssistantAgent`](./assistant-agent.md), [`AzureAIAgent`](./azure-ai-agent.md), or [`OpenAIResponsesAgent`](./responses-agent.md), all of which leverage the Kernel's capabilities to execute their respective functions. ::: zone pivot="programming-language-csharp" @@ -39,7 +39,9 @@ The abstract `Agent` class serves as the core abstraction for all types of agent ::: zone pivot="programming-language-python" -- [`agent`](/python/api/semantic-kernel/semantic_kernel.agents.agent) +The underlying Semantic Kernel `Agent` abstraction can be found here: + +- [`Agent`](/python/api/semantic-kernel/semantic_kernel.agents.agent) ::: zone-end @@ -53,9 +55,10 @@ Agents can either be invoked directly to perform tasks or orchestrated within an #### Deep Dive: -- [`AzureAIAgent`](./azure-ai-agent.md) - [`ChatCompletionAgent`](./chat-completion-agent.md) - [`OpenAIAssistantAgent`](./assistant-agent.md) +- [`AzureAIAgent`](./azure-ai-agent.md) +- [`OpenAIResponsesAgent`](./responses-agent.md) B[Ask LLM To Write Documentation] --> C[Publish Documentation To Public]](../../../media/first-process-flow.png) @@ -115,20 +115,30 @@ public class GenerateDocumentationStep : KernelProcessStep(); var generatedDocumentationResponse = await chatCompletionService.GetChatMessageContentAsync(this._state.ChatHistory!); - await context.EmitEventAsync("DocumentationGenerated", generatedDocumentationResponse.Content!.ToString()); + DocumentInfo generatedContent = new() + { + Id = Guid.NewGuid().ToString(), + Title = $"Generated document - {productInfo.Title}", + Content = generatedDocumentationResponse.Content!, + }; + + this._state!.LastGeneratedDocument = generatedContent; + + await context.EmitEventAsync("DocumentationGenerated", generatedContent); } public class GeneratedDocumentationState { + public DocumentInfo LastGeneratedDocument { get; set; } = new(); public ChatHistory? ChatHistory { get; set; } } } @@ -137,17 +147,27 @@ public class GenerateDocumentationStep : KernelProcessStep [!TIP] -> **_Event Routing in Process Framework:_** You may be wondering how events that are sent to steps are routed to KernelFunctions within the step. In the code above, each step has only defined a single KernelFunction and each KernelFunction has only a single parameter (other than Kernel and the step context which are special, more on that later). When the event containing the generated documentation is sent to the `docsPublishStep` it will be passed to the `docs` parameter of the `PublishDocumentation` KernelFunction of the `docsGenerationStep` step because there is no other choice. However, steps can have multiple KernelFunctions and KernelFunctions can have multiple parameters, in these advanced scenarios you need to specify the target function and parameter. +> **_Event Routing in Process Framework:_** You may be wondering how events that are sent to steps are routed to KernelFunctions within the step. In the code above, each step has only defined a single KernelFunction and each KernelFunction has only a single parameter (other than Kernel and the step context which are special, more on that later). When the event containing the generated documentation is sent to the `docsPublishStep` it will be passed to the `document` parameter of the `PublishDocumentation` KernelFunction of the `docsGenerationStep` step because there is no other choice. However, steps can have multiple KernelFunctions and KernelFunctions can have multiple parameters, in these advanced scenarios you need to specify the target function and parameter. ::: zone-end diff --git a/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md b/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md index 445f9d10..22217ad3 100644 --- a/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md +++ b/semantic-kernel/Frameworks/process/examples/example-human-in-loop.md @@ -30,14 +30,15 @@ The first change we need to make to the process is to make the publishing step w public class PublishDocumentationStep : KernelProcessStep { [KernelFunction] - public void PublishDocumentation(string docs, bool isApproved) // added the isApproved parameter + public DocumentInfo PublishDocumentation(DocumentInfo document, bool userApproval) // added the userApproval parameter { // Only publish the documentation if it has been approved - if (isApproved) + if (userApproval) { // For example purposes we just write the generated docs to the console - Console.WriteLine($"{nameof(PublishDocumentationStep)}:\n\tPublishing product documentation:\n\n{docs}"); + Console.WriteLine($"[{nameof(PublishDocumentationStep)}]:\tPublishing product documentation approved by user: \n{document.Title}\n{document.Content}"); } + return document; } } ``` @@ -51,33 +52,28 @@ public class PublishDocumentationStep : KernelProcessStep ::: zone pivot="programming-language-java" ::: zone-end -With the code above, the `PublishDocumentation` function in the `PublishDocumentationStep` will only be invoked when the generated documentation has been sent to the `docs` parameter and the result of the approval has been sent to the `isApproved` parameter. +With the code above, the `PublishDocumentation` function in the `PublishDocumentationStep` will only be invoked when the generated documentation has been sent to the `document` parameter and the result of the approval has been sent to the `userApproval` parameter. -We can now update the logic in `ProofreadStep` step to additionally emit an event to our external pubsub system which will notify the human approver that there is a new request. +We can now reuse the existing logic of `ProofreadStep` step to additionally emit an event to our external pubsub system which will notify the human approver that there is a new request. ::: zone pivot="programming-language-csharp" ```csharp // A process step to publish documentation -public class PublishDocumentationStep : KernelProcessStep +public class ProofReadDocumentationStep : KernelProcessStep { ... - if (formattedResponse.MeetsExpectations) - { - await context.EmitEventAsync("DocumentationApproved", data: documentation); - // Emit event to external pubsub to trigger human in the loop approval. - await context.EmitExternalEventAsync("HumanApprovalRequired", data: documentation); - } - else + if (formattedResponse.MeetsExpectations) { - await context.EmitEventAsync("DocumentationRejected", data: new { Explanation = formattedResponse.Explanation, Suggestions = formattedResponse.Suggestions}); + // Events that are getting piped to steps that will be resumed, like PublishDocumentationStep.OnPublishDocumentation + // require events to be marked as public so they are persisted and restored correctly + await context.EmitEventAsync("DocumentationApproved", data: document, visibility: KernelProcessEventVisibility.Public); } ... } ``` - -Now whenever the newly generated documentation is approved by the proofread agent, the approved documents will be queued on the publishing step, and a human will be notified via our external pubsub system. Let's update the process flow to match this new design. +Since we want to publish the newly generated documentation when it is approved by the proofread agent, the approved documents will be queued on the publishing step. In addition, a human will be notified via our external pubsub system with an update on the latest document. Let's update the process flow to match this new design. ::: zone-end @@ -100,16 +96,25 @@ var docsGenerationStep = processBuilder.AddStepFromType(); var docsPublishStep = processBuilder.AddStepFromType(); +// internal component that allows emitting SK events externally, a list of topic names +// is needed to link them to existing SK events +var proxyStep = processBuilder.AddProxyStep(["RequestUserReview", "PublishDocumentation"]); + // Orchestrate the events processBuilder - .OnInputEvent("Start") + .OnInputEvent("StartDocumentGeneration") .SendEventTo(new(infoGatheringStep)); +processBuilder + .OnInputEvent("UserRejectedDocument") + .SendEventTo(new(docsGenerationStep, functionName: "ApplySuggestions")); + // When external human approval event comes in, route it to the 'isApproved' parameter of the docsPublishStep processBuilder - .OnInputEvent("HumanApprovalResponse") - .SendEventTo(new(docsPublishStep, parameterName: "isApproved")); + .OnInputEvent("UserApprovedDocument") + .SendEventTo(new(docsPublishStep, parameterName: "userApproval")); +// Hooking up the rest of the process steps infoGatheringStep .OnFunctionResult() .SendEventTo(new(docsGenerationStep, functionName: "GenerateDocumentation")); @@ -122,15 +127,100 @@ docsProofreadStep .OnEvent("DocumentationRejected") .SendEventTo(new(docsGenerationStep, functionName: "ApplySuggestions")); -// When the proofreader approves the documentation, send it to the 'docs' parameter of the docsPublishStep +// When the proofreader approves the documentation, send it to the 'document' parameter of the docsPublishStep +// Additionally, the generated document is emitted externally for user approval using the pre-configured proxyStep docsProofreadStep .OnEvent("DocumentationApproved") - .SendEventTo(new(docsPublishStep, parameterName: "docs")); + // [NEW] addition to emit messages externally + .EmitExternalEvent(proxyStep, "RequestUserReview") // Hooking up existing "DocumentationApproved" to external topic "RequestUserReview" + .SendEventTo(new(docsPublishStep, parameterName: "document")); + +// When event is approved by user, it gets published externally too +docsPublishStep + .OnFunctionResult() + // [NEW] addition to emit messages externally + .EmitExternalEvent(proxyStep, "PublishDocumentation"); var process = processBuilder.Build(); return process; ``` +Finally, an implementation of the interface `IExternalKernelProcessMessageChannel` should be provided since it is internally use by the new `ProxyStep`. This interface is used to emit messages externally. The implementation of this interface will depend on the external system that you are using. In this example, we will use a custom client that we have created to send messages to an external pubsub system. + +``` csharp +// Example of potential custom IExternalKernelProcessMessageChannel implementation +public class MyCloudEventClient : IExternalKernelProcessMessageChannel +{ + private MyCustomClient? _customClient; + + // Example of an implementation for the process + public async Task EmitExternalEventAsync(string externalTopicEvent, KernelProcessProxyMessage message) + { + // logic used for emitting messages externally. + // Since all topics are received here potentially + // some if else/switch logic is needed to map correctly topics with external APIs/endpoints. + if (this._customClient != null) + { + switch (externalTopicEvent) + { + case "RequestUserReview": + var requestDocument = message.EventData.ToObject() as DocumentInfo; + // As an example only invoking a sample of a custom client with a different endpoint/api route + this._customClient.InvokeAsync("REQUEST_USER_REVIEW", requestDocument); + return; + + case "PublishDocumentation": + var publishedDocument = message.EventData.ToObject() as DocumentInfo; + // As an example only invoking a sample of a custom client with a different endpoint/api route + this._customClient.InvokeAsync("PUBLISH_DOC_EXTERNALLY", publishedDocument); + return; + } + } + } + + public async ValueTask Initialize() + { + // logic needed to initialize proxy step, can be used to initialize custom client + this._customClient = new MyCustomClient("http://localhost:8080"); + this._customClient.Initialize(); + } + + public async ValueTask Uninitialize() + { + // Cleanup to be executed when proxy step is uninitialized + if (this._customClient != null) + { + await this._customClient.ShutdownAsync(); + } + } +} +``` +Finally to allow the process `ProxyStep` to make use of the `IExternalKernelProcessMessageChannel` implementation, in this case `MyCloudEventClient`, we need to pipe it properly. + +When using Local Runtime, the implemented class can be passed when invoking `StartAsync` on the `KernelProcess` class. + +```csharp +KernelProcess process; +IExternalKernelProcessMessageChannel myExternalMessageChannel = new MyCloudEventClient(); +// Start the process with the external message channel +await process.StartAsync(kernel, new KernelProcessEvent + { + Id = inputEvent, + Data = input, + }, + myExternalMessageChannel) +``` + +When using Dapr Runtime, the plumbing has to be done through dependency injection at the Program setup of the project. + +```csharp +var builder = WebApplication.CreateBuilder(args); +... +// depending on the application a singleton or scoped service can be used +// Injecting SK Process custom client IExternalKernelProcessMessageChannel implementation +builder.Services.AddSingleton(); +``` + ::: zone-end ::: zone pivot="programming-language-python" @@ -142,16 +232,16 @@ return process; Two changes have been made to the process flow: -- Added an input event named `HumanApprovalResponse` that will be routed to the `isApproved` parameter of the `docsPublishStep` step. -- Since the KernelFunction in `docsPublishStep` now has two parameters, we need to update the existing route to specify the parameter name of `docs`. +- Added an input event named `HumanApprovalResponse` that will be routed to the `userApproval` parameter of the `docsPublishStep` step. +- Since the KernelFunction in `docsPublishStep` now has two parameters, we need to update the existing route to specify the parameter name of `document`. -Run the process as you did before and notice that this time when the proofreader approves the generated documentation and sends it to the `docs` parameter of the `docPublishStep` step, the step is no longer invoked because it is waiting for the `isApproved` parameter. At this point the process goes idle because there are no steps ready to be invoked and the call that we made to start the process returns. The process will remain in this idle state until our "human-in-the-loop" takes action to approve or reject the publish request. Once this has happened and the result has been communicated back to our program, we can restart the process with the result. +Run the process as you did before and notice that this time when the proofreader approves the generated documentation and sends it to the `document` parameter of the `docPublishStep` step, the step is no longer invoked because it is waiting for the `userApproval` parameter. At this point the process goes idle because there are no steps ready to be invoked and the call that we made to start the process returns. The process will remain in this idle state until our "human-in-the-loop" takes action to approve or reject the publish request. Once this has happened and the result has been communicated back to our program, we can restart the process with the result. ::: zone pivot="programming-language-csharp" ```csharp // Restart the process with approval for publishing the documentation. -await process.StartAsync(kernel, new KernelProcessEvent { Id = "HumanApprovalResponse", Data = true }); +await process.StartAsync(kernel, new KernelProcessEvent { Id = "UserApprovedDocument", Data = true }); ``` ::: zone-end @@ -163,4 +253,6 @@ await process.StartAsync(kernel, new KernelProcessEvent { Id = "HumanApprovalRes ::: zone pivot="programming-language-java" ::: zone-end -When the process is started again with the `HumanApprovalResponse` it will pick up from where it left off and invoke the `docsPublishStep` with `isApproved` set to `true` and our documentation will be published. +When the process is started again with the `UserApprovedDocument` it will pick up from where it left off and invoke the `docsPublishStep` with `userApproval` set to `true` and our documentation will be published. If it is started again with the `UserRejectedDocument` event, the process will kick off the `ApplySuggestions` function in the `docsGenerationStep` step and the process will continue as before. + +The process is now complete and we have successfully added a human-in-the-loop step to our process. The process can now be used to generate documentation for our product, proofread it, and publish it once it has been approved by a human. From 4037b5c0398cb95e5d28192c0abf2272772792e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Estefan=C3=ADa=20Tenorio?= <8483207+esttenorio@users.noreply.github.com> Date: Fri, 11 Apr 2025 09:51:28 -0700 Subject: [PATCH 113/117] updating version --- .../Frameworks/process/examples/example-first-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/Frameworks/process/examples/example-first-process.md b/semantic-kernel/Frameworks/process/examples/example-first-process.md index 3e600ccb..cdd181f7 100644 --- a/semantic-kernel/Frameworks/process/examples/example-first-process.md +++ b/semantic-kernel/Frameworks/process/examples/example-first-process.md @@ -28,7 +28,7 @@ Before we get started, make sure you have the required Semantic Kernel packages ::: zone pivot="programming-language-csharp" ```dotnetcli -dotnet add package Microsoft.SemanticKernel.Process.LocalRuntime --version 1.45.0-alpha +dotnet add package Microsoft.SemanticKernel.Process.LocalRuntime --version 1.46.0-alpha ``` ::: zone-end From de1619f4be5263ef48b00b57242531ead51bfc8a Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Fri, 11 Apr 2025 18:10:38 +0100 Subject: [PATCH 114/117] Update naming and links for Neon --- .../out-of-the-box-connectors/index.md | 4 ++-- .../out-of-the-box-connectors/postgres-connector.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md index 11721759..051e1dc2 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/index.md @@ -33,7 +33,7 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma | [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | | Milvus | Planned | | | | [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| Neon |Use [Postgres](./postgres-connector.md)| ✅ | Microsoft Semantic Kernel Project | +| [Neon Serverless Postgres](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/neon1722366567200.neon_serverless_postgres_azure_prod) |Use [Postgres Connector](./postgres-connector.md)| ✅ | Microsoft Semantic Kernel Project | | [Pinecone](./pinecone-connector.md) | ✅ | ❌ | Microsoft Semantic Kernel Project | | [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | @@ -56,7 +56,7 @@ Semantic Kernel provides a number of out-of-the-box Vector Store integrations ma | [Faiss](./faiss-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [In-Memory](./inmemory-connector.md) | ✅ | N/A | Microsoft Semantic Kernel Project | | [MongoDB](./mongodb-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | -| Neon |Use [Postgres](./postgres-connector.md)| ✅ | Microsoft Semantic Kernel Project | +| [Neon Serverless Postgres](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/neon1722366567200.neon_serverless_postgres_azure_prod) |Use [Postgres Connector](./postgres-connector.md)| ✅ | Microsoft Semantic Kernel Project | | [Pinecone](./pinecone-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Postgres](./postgres-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | | [Qdrant](./qdrant-connector.md) | ✅ | ✅ | Microsoft Semantic Kernel Project | diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md index 612ce262..53beaf0a 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/postgres-connector.md @@ -17,7 +17,7 @@ ms.service: semantic-kernel ## Overview -The Postgres Vector Store connector can be used to access and manage data in Postgres and also supports [Neon Serverless Postgres](https://neon.tech/). +The Postgres Vector Store connector can be used to access and manage data in Postgres and also supports [Neon Serverless Postgres](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/neon1722366567200.neon_serverless_postgres_azure_prod). The connector has the following characteristics. From 65fbc59a55ba783f016ed501958b084a208b4d6c Mon Sep 17 00:00:00 2001 From: Devis Lucato Date: Fri, 11 Apr 2025 10:19:52 -0700 Subject: [PATCH 115/117] Fix typos --- semantic-kernel/Frameworks/agent/agent-architecture.md | 4 ++-- semantic-kernel/Frameworks/agent/agent-functions.md | 2 +- semantic-kernel/Frameworks/agent/agent-streaming.md | 4 ++-- .../agent/examples/example-agent-collaboration.md | 6 +++--- semantic-kernel/Frameworks/agent/index.md | 2 +- .../Frameworks/process/examples/example-first-process.md | 2 +- .../function-calling/function-choice-behaviors.md | 2 +- .../concepts/plugins/adding-logic-apps-as-plugins.md | 2 +- .../concepts/text-search/text-search-function-calling.md | 2 +- .../how-to/build-your-own-connector.md | 2 +- .../how-to/vector-store-data-ingestion.md | 2 +- .../concepts/vector-store-connectors/hybrid-search.md | 2 +- semantic-kernel/concepts/vector-store-connectors/index.md | 2 +- .../out-of-the-box-connectors/qdrant-connector.md | 2 +- .../concepts/vector-store-connectors/vector-search.md | 2 +- semantic-kernel/get-started/supported-languages.md | 2 +- semantic-kernel/support/glossary.md | 2 +- .../support/migration/agent-framework-rc-migration-guide.md | 2 +- semantic-kernel/support/migration/vectorstore-march-2025.md | 4 ++-- 19 files changed, 24 insertions(+), 24 deletions(-) diff --git a/semantic-kernel/Frameworks/agent/agent-architecture.md b/semantic-kernel/Frameworks/agent/agent-architecture.md index f829f480..98d555f0 100644 --- a/semantic-kernel/Frameworks/agent/agent-architecture.md +++ b/semantic-kernel/Frameworks/agent/agent-architecture.md @@ -68,7 +68,7 @@ Agents can either be invoked directly to perform tasks or orchestrated within an ## Agent Thread The abstract `AgentThread` class serves as the core abstraction for threads or conversation state. -It abstracts away the different ways in which convesation state may be managed for different agents. +It abstracts away the different ways in which conversation state may be managed for different agents. Stateful agent services often store conversation state in the service, and you can interact with it via an id. Other agents may require the entire chat history to be passed to the agent on each invocation, in which @@ -81,7 +81,7 @@ If a different agent thread type is used with the `AzureAIAgent`, we fail fast d ## Agent Chat -The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a stategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. +The [`AgentChat`](./agent-chat.md) class serves as the foundational component that enables agents of any type to engage in a specific conversation. This class provides the essential capabilities for managing agent interactions within a chat environment. Building on this, the [`AgentGroupChat`](./agent-chat.md#creating-an-agentgroupchat) class extends these capabilities by offering a strategy-based container, which allows multiple agents to collaborate across numerous interactions within the same conversation. > [!IMPORTANT] > The current `OpenAIResponsesAgent` is not supported as part of Semantic Kernel's `AgentGroupChat` patterns. Stayed tuned for updates. diff --git a/semantic-kernel/Frameworks/agent/agent-functions.md b/semantic-kernel/Frameworks/agent/agent-functions.md index ba38d91b..d531b1f9 100644 --- a/semantic-kernel/Frameworks/agent/agent-functions.md +++ b/semantic-kernel/Frameworks/agent/agent-functions.md @@ -1,6 +1,6 @@ --- title: Configuring Agents with Semantic Kernel Plugins. -description: Describes how to use Semantic Kernal plugins and function calling with agents. +description: Describes how to use Semantic Kernel plugins and function calling with agents. zone_pivot_groups: programming-languages author: crickman ms.topic: tutorial diff --git a/semantic-kernel/Frameworks/agent/agent-streaming.md b/semantic-kernel/Frameworks/agent/agent-streaming.md index 6052afe7..09b5e9bc 100644 --- a/semantic-kernel/Frameworks/agent/agent-streaming.md +++ b/semantic-kernel/Frameworks/agent/agent-streaming.md @@ -120,7 +120,7 @@ OpenAIAssistantAgent agent = ...; // Create a thread for the agent conversation. OpenAIAssistantAgentThread agentThread = new(assistantClient); -// Cerate a user message +// Create a user message var message = new ChatMessageContent(AuthorRole.User, ""); // Generate the streamed agent response(s) @@ -148,7 +148,7 @@ OpenAIAssistantAgent agent = ...; // Create a thread for the agent conversation. OpenAIAssistantAgentThread agentThread = new(assistantClient, "your-existing-thread-id"); -// Cerate a user message +// Create a user message var message = new ChatMessageContent(AuthorRole.User, ""); // Generate the streamed agent response(s) diff --git a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md index 6a7dd1a5..d8c301cc 100644 --- a/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md +++ b/semantic-kernel/Frameworks/agent/examples/example-agent-collaboration.md @@ -391,7 +391,7 @@ RULES: ::: zone-end ::: zone pivot="programming-language-csharp" -The _Writer_ agent is similiar, but doesn't require the specification of Execution Settings since it isn't configured with a plug-in. +The _Writer_ agent is similar, but doesn't require the specification of Execution Settings since it isn't configured with a plug-in. Here the _Writer_ is given a single-purpose task, follow direction and rewrite the content. @@ -402,7 +402,7 @@ ChatCompletionAgent agentWriter = Name = WriterName, Instructions = """ - Your sole responsiblity is to rewrite content according to review suggestions. + Your sole responsibility is to rewrite content according to review suggestions. - Always apply all review direction. - Always revise the content in its entirety without explanation. @@ -414,7 +414,7 @@ ChatCompletionAgent agentWriter = ::: zone-end ::: zone pivot="programming-language-python" -The _Writer_ agent is similiar. It is given a single-purpose task, follow direction and rewrite the content. +The _Writer_ agent is similar. It is given a single-purpose task, follow direction and rewrite the content. ```python agent_writer = ChatCompletionAgent( kernel=kernel, diff --git a/semantic-kernel/Frameworks/agent/index.md b/semantic-kernel/Frameworks/agent/index.md index 800b0f04..7ada9c47 100644 --- a/semantic-kernel/Frameworks/agent/index.md +++ b/semantic-kernel/Frameworks/agent/index.md @@ -22,7 +22,7 @@ The Semantic Kernel Agent Framework provides a platform within the Semantic Kern ![Orange gradient user icon representing AI agent](../../media/agentSKdocs3.png) ![Red-pink gradient user icon representing AI agent](../../media/agentSKdocs4.png) -An **AI agent** is a software entity designed to perform tasks autonomously or semi-autonomously by recieving input, processing information, and taking actions to achieve specific goals. +An **AI agent** is a software entity designed to perform tasks autonomously or semi-autonomously by receiving input, processing information, and taking actions to achieve specific goals. Agents can send and receive messages, generating responses using a combination of models, tools, human inputs, or other customizable components. diff --git a/semantic-kernel/Frameworks/process/examples/example-first-process.md b/semantic-kernel/Frameworks/process/examples/example-first-process.md index 88d30f2f..97a611be 100644 --- a/semantic-kernel/Frameworks/process/examples/example-first-process.md +++ b/semantic-kernel/Frameworks/process/examples/example-first-process.md @@ -21,7 +21,7 @@ Built for extensibility, the Process Framework supports diverse operational patt ## Getting Started -The Sematic Kernel Process Framework can be used to infuse AI into just about any business process you can think of. As an illustrative example to get started, let's look at building a process for generating documentation for a new product. +The Semantic Kernel Process Framework can be used to infuse AI into just about any business process you can think of. As an illustrative example to get started, let's look at building a process to generate documentation for a new product. Before we get started, make sure you have the required Semantic Kernel packages installed: diff --git a/semantic-kernel/concepts/ai-services/chat-completion/function-calling/function-choice-behaviors.md b/semantic-kernel/concepts/ai-services/chat-completion/function-calling/function-choice-behaviors.md index 07a10724..c6794212 100644 --- a/semantic-kernel/concepts/ai-services/chat-completion/function-calling/function-choice-behaviors.md +++ b/semantic-kernel/concepts/ai-services/chat-completion/function-calling/function-choice-behaviors.md @@ -618,7 +618,7 @@ Certain aspects of the function choice behaviors can be configured through optio ## Function Invocation -Function invocation is the process whereby Sematic Kernel invokes functions chosen by the AI model. For more details on function invocation see [function invocation article](./function-invocation.md). +Function invocation is the process whereby Semantic Kernel invokes functions chosen by the AI model. For more details on function invocation see [function invocation article](./function-invocation.md). ## Supported AI Connectors diff --git a/semantic-kernel/concepts/plugins/adding-logic-apps-as-plugins.md b/semantic-kernel/concepts/plugins/adding-logic-apps-as-plugins.md index aab94dc3..72874e0f 100644 --- a/semantic-kernel/concepts/plugins/adding-logic-apps-as-plugins.md +++ b/semantic-kernel/concepts/plugins/adding-logic-apps-as-plugins.md @@ -203,7 +203,7 @@ Below is an example in C# that leverages interactive auth to acquire a token and string ClientId = "[AAD_CLIENT_ID]"; string TenantId = "[TENANT_ID]"; string Authority = $"https://login.microsoftonline.com/{TenantId}"; -string[] Scopes = new string[] { "api://[AAD_CIENT_ID]/SKLogicApp" }; +string[] Scopes = new string[] { "api://[AAD_CLIENT_ID]/SKLogicApp" }; var app = PublicClientApplicationBuilder.Create(ClientId) .WithAuthority(Authority) diff --git a/semantic-kernel/concepts/text-search/text-search-function-calling.md b/semantic-kernel/concepts/text-search/text-search-function-calling.md index f5468da0..29a046d7 100644 --- a/semantic-kernel/concepts/text-search/text-search-function-calling.md +++ b/semantic-kernel/concepts/text-search/text-search-function-calling.md @@ -113,7 +113,7 @@ Console.WriteLine(await kernel.InvokePromptAsync("What is the Semantic Kernel? I The final sample in this section shows how to use a filter with function calling. For this sample only search results from the Microsoft Developer Blogs site will be included. An instance of `TextSearchFilter` is created and an equality clause is added to match the `devblogs.microsoft.com` site. -Ths filter will be used when the function is invoked in response to a function calling request from the model. +This filter will be used when the function is invoked in response to a function calling request from the model. ```csharp using Microsoft.SemanticKernel; diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md index 9f6faeaf..ea20d387 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/build-your-own-connector.md @@ -430,7 +430,7 @@ public Task DeleteBatchAsync(IEnumerable keys, CancellationToken cancell Secondly, if the database client does support batching, pass all requests directly to the underlying client so that it may send the entire set in one request. -## Recommended common patterns and pratices +## Recommended common patterns and practices 1. Keep `IVectorStore` and `IVectorStoreRecordCollection` implementations unsealed with virtual methods, so that developers can inherit and override if needed. 1. Always use options classes for optional settings with smart defaults. diff --git a/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md b/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md index 7728b45e..7877bdf9 100644 --- a/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md +++ b/semantic-kernel/concepts/vector-store-connectors/how-to/vector-store-data-ingestion.md @@ -94,7 +94,7 @@ internal class TextParagraph Note that we are passing the value `1536` to the `VectorStoreRecordVectorAttribute`. This is the dimension size of the vector and has to match the size of vector that your chosen embedding generator produces. > [!TIP] -> For more information on how to annotate your data model and what additional options are available for each attribute, refer to [definining your data model](../../../concepts/vector-store-connectors/defining-your-data-model.md). +> For more information on how to annotate your data model and what additional options are available for each attribute, refer to [defining your data model](../../../concepts/vector-store-connectors/defining-your-data-model.md). ## Read the paragraphs in the document diff --git a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md index f962d91c..61d6bfc5 100644 --- a/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/hybrid-search.md @@ -56,7 +56,7 @@ IKeywordHybridSearch collection = (IKeywordHybridSearch)vectorStor // Generate a vector for your search text, using your chosen embedding generation implementation. ReadOnlyMemory searchVector = await GenerateEmbeddingAsync("I'm looking for a hotel where customer happiness is the priority."); -// Do the search, passing an options object with a Top value to limit resulst to the single top match. +// Do the search, passing an options object with a Top value to limit results to the single top match. var searchResult = await collection.HybridSearchAsync(searchVector, ["happiness", "hotel", "customer"], new() { Top = 1 }); // Inspect the returned hotel. diff --git a/semantic-kernel/concepts/vector-store-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/index.md index 56f362eb..f449ea9c 100644 --- a/semantic-kernel/concepts/vector-store-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/index.md @@ -1,6 +1,6 @@ --- title: What are Semantic Kernel Vector Store connectors? (Preview) -description: Describes what a Semantic Kernal Vector Store is, an provides a basic example of how to use one and how to get started. +description: Describes what a Semantic Kernel Vector Store is, an provides a basic example of how to use one and how to get started. zone_pivot_groups: programming-languages author: westey-m ms.topic: conceptual diff --git a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md index bd6adebc..8fb79bb2 100644 --- a/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md +++ b/semantic-kernel/concepts/vector-store-connectors/out-of-the-box-connectors/qdrant-connector.md @@ -222,7 +222,7 @@ collection = QdrantCollection(collection_name="skhotels", data_model_type=hotel) ## Serialization -The Qdrant connector uses a model called `PointStruct` for reading and writing to the store. This can be imported from `from qdrant_client.models import PointStruct`. The serialization methods expects a output of a list of PointStruct objects, and the deserialization method recieves a list of PointStruct objects. +The Qdrant connector uses a model called `PointStruct` for reading and writing to the store. This can be imported from `from qdrant_client.models import PointStruct`. The serialization methods expects a output of a list of PointStruct objects, and the deserialization method receives a list of PointStruct objects. There are some special considerations for this that have to do with named or unnamed vectors, see below. diff --git a/semantic-kernel/concepts/vector-store-connectors/vector-search.md b/semantic-kernel/concepts/vector-store-connectors/vector-search.md index 8d6855ac..6a938759 100644 --- a/semantic-kernel/concepts/vector-store-connectors/vector-search.md +++ b/semantic-kernel/concepts/vector-store-connectors/vector-search.md @@ -47,7 +47,7 @@ IVectorStoreRecordCollection collection = vectorStore.GetCollectio // Generate a vector for your search text, using your chosen embedding generation implementation. ReadOnlyMemory searchVector = await GenerateEmbeddingAsync("I'm looking for a hotel where customer happiness is the priority."); -// Do the search, passing an options object with a Top value to limit resulst to the single top match. +// Do the search, passing an options object with a Top value to limit results to the single top match. var searchResult = await collection.VectorizedSearchAsync(searchVector, new() { Top = 1 }); // Inspect the returned hotel. diff --git a/semantic-kernel/get-started/supported-languages.md b/semantic-kernel/get-started/supported-languages.md index 03823889..33bc122d 100644 --- a/semantic-kernel/get-started/supported-languages.md +++ b/semantic-kernel/get-started/supported-languages.md @@ -209,7 +209,7 @@ Once you've created a prompt, you can serialize it so that it can be stored or s | Ollama | ✅ | ✅ | ❌ | | | ONNX | ✅ | ✅ | ❌ | | | OpenAI | ✅ | ✅ | ✅ | | -| Other endpoints that suppoprt OpenAI APIs | ✅ | ✅ | ✅ | Includes LLM Studio, etc. | +| Other endpoints that support OpenAI APIs | ✅ | ✅ | ✅ | Includes LLM Studio, etc. | ### Vector Store Connectors (Experimental) diff --git a/semantic-kernel/support/glossary.md b/semantic-kernel/support/glossary.md index c11cd625..2170d00c 100644 --- a/semantic-kernel/support/glossary.md +++ b/semantic-kernel/support/glossary.md @@ -11,7 +11,7 @@ ms.service: semantic-kernel 👋 Hello! We've included a Glossary below with key terminology. -| Term/Word | Defintion | +| Term/Word | Definition | |---|---| | Agent | An agent is an artificial intelligence that can answer questions and automate processes for users. There's a wide spectrum of agents that can be built, ranging from simple chat bots to fully automated AI assistants. With Semantic Kernel, we provide you with the tools to build increasingly more sophisticated agents that don't require you to be an AI expert. | | API | Application Programming Interface. A set of rules and specifications that allow software components to communicate and exchange data. | diff --git a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md index 88c7ca7e..bd57eff1 100644 --- a/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md +++ b/semantic-kernel/support/migration/agent-framework-rc-migration-guide.md @@ -19,7 +19,7 @@ As we transition some agents from the experimental stage to the release candidat In version 1.43.0 we are releasing a new common agent invocation API, that will allow all agent types to be invoked via a common API. -To enable this new API we are introducing the concept of an `AgentThread`, which represents a conversation thread and abstracts away the different thread management requirements of different agent types. For some agent types it will also, in future, allow different thread imlementations to be used with the same agent. +To enable this new API we are introducing the concept of an `AgentThread`, which represents a conversation thread and abstracts away the different thread management requirements of different agent types. For some agent types it will also, in future, allow different thread implementations to be used with the same agent. The common `Invoke` methods that we are introducing allow you to provide the message(s) that you want to pass to the agent and an optional `AgentThread`. If an `AgentThread` is provided, this will continue the conversation already on the `AgentThread`. If no `AgentThread` is provided, a new default thread will be created and returned as part of the response. diff --git a/semantic-kernel/support/migration/vectorstore-march-2025.md b/semantic-kernel/support/migration/vectorstore-march-2025.md index dececefe..6c51d5a8 100644 --- a/semantic-kernel/support/migration/vectorstore-march-2025.md +++ b/semantic-kernel/support/migration/vectorstore-march-2025.md @@ -69,8 +69,8 @@ one vector property, one must be chosen. ## `VectorSearchOptions` change to generic type -The `VectorSearchOptions` class is changing to `VectorSearchOptions`, to accomodate the -LINQ based filtering and new property selectors metioned above. +The `VectorSearchOptions` class is changing to `VectorSearchOptions`, to accommodate the +LINQ based filtering and new property selectors mentioned above. If you are currently constructing the options class without providing the name of the options class there will be no change. E.g. `VectorizedSearchAsync(embedding, new() { Top = 5 })`. From 0c1c3a84510e4e96f1bc09778248b71da2f81896 Mon Sep 17 00:00:00 2001 From: Sophia Lagerkrans-Pandey <163188263+sophialagerkranspandey@users.noreply.github.com> Date: Fri, 11 Apr 2025 10:35:32 -0700 Subject: [PATCH 116/117] Update semantic-kernel/concepts/vector-store-connectors/index.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- semantic-kernel/concepts/vector-store-connectors/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/index.md b/semantic-kernel/concepts/vector-store-connectors/index.md index f449ea9c..1c22da1b 100644 --- a/semantic-kernel/concepts/vector-store-connectors/index.md +++ b/semantic-kernel/concepts/vector-store-connectors/index.md @@ -1,6 +1,6 @@ --- title: What are Semantic Kernel Vector Store connectors? (Preview) -description: Describes what a Semantic Kernel Vector Store is, an provides a basic example of how to use one and how to get started. +description: Describes what a Semantic Kernel Vector Store is, and provides a basic example of how to use one and how to get started. zone_pivot_groups: programming-languages author: westey-m ms.topic: conceptual From cda9a7047bdbd7e2d50a6d6c0e599a271d43fcdd Mon Sep 17 00:00:00 2001 From: westey <164392973+westey-m@users.noreply.github.com> Date: Tue, 15 Apr 2025 12:15:22 +0100 Subject: [PATCH 117/117] Update distance function messaging based on feedback --- .../defining-your-data-model.md | 10 +++++----- .../schema-with-record-definition.md | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/semantic-kernel/concepts/vector-store-connectors/defining-your-data-model.md b/semantic-kernel/concepts/vector-store-connectors/defining-your-data-model.md index 2d19d17d..df1ae95a 100644 --- a/semantic-kernel/concepts/vector-store-connectors/defining-your-data-model.md +++ b/semantic-kernel/concepts/vector-store-connectors/defining-your-data-model.md @@ -42,7 +42,7 @@ public class Hotel [VectorStoreRecordData(IsFullTextSearchable = true)] public string Description { get; set; } - [VectorStoreRecordVector(4, DistanceFunction.CosineDistance, IndexKind.Hnsw)] + [VectorStoreRecordVector(4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw)] public ReadOnlyMemory? DescriptionEmbedding { get; set; } [VectorStoreRecordData(IsFilterable = true)] @@ -95,7 +95,7 @@ public string HotelName { get; set; } Use this attribute to indicate that your property contains a vector. ```csharp -[VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineDistance, IndexKind.Hnsw)] +[VectorStoreRecordVector(Dimensions: 4, DistanceFunction.CosineSimilarity, IndexKind.Hnsw)] public ReadOnlyMemory? DescriptionEmbedding { get; set; } ``` @@ -105,7 +105,7 @@ public ReadOnlyMemory? DescriptionEmbedding { get; set; } |---------------------------|:--------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Dimensions | Yes for collection create, optional otherwise | The number of dimensions that the vector has. This is typically required when creating a vector index for a collection. | | IndexKind | No | The type of index to index the vector with. Default varies by vector store type. | -| DistanceFunction | No | The type of distance function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | +| DistanceFunction | No | The type of function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | | StoragePropertyName | No | Can be used to supply an alternative name for the property in the database. Note that this parameter is not supported by all connectors, e.g. where alternatives like `JsonPropertyNameAttribute` is supported. | Common index kinds and distance function types are supplied as static values on the `Microsoft.SemanticKernel.Data.IndexKind` and `Microsoft.SemanticKernel.Data.DistanceFunction` classes. @@ -218,7 +218,7 @@ VectorStoreRecordVectorField(dimensions=4, distance_function=DistanceFunction.CO |---------------------------|:--------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | dimensions | Yes for collection create, optional otherwise | The number of dimensions that the vector has. This is typically required when creating a vector index for a collection. | | index_kind | No | The type of index to index the vector with. Default varies by vector store type. | -| distance_function | No | The type of distance function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | +| distance_function | No | The type of function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | | local_embedding | No | Indicates whether the property has a local embedding associated with it, default is None. | | embedding_settings | No | The settings for the embedding, in the form of a dict with service_id as key and PromptExecutionSettings as value, default is None. | | serialize_function | No | The function to use to serialize the vector, if the type is not a list[float \| int] this function is needed, or the whole model needs to be serialized. | @@ -339,7 +339,7 @@ private List descriptionEmbedding; |---------------------------|:--------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | dimensions | Yes for collection create, optional otherwise | The number of dimensions that the vector has. This is typically required when creating a vector index for a collection. | | indexKind | No | The type of index to index the vector with. Default varies by vector store type. | -| distanceFunction | No | The type of distance function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | +| distanceFunction | No | The type of function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | | storageName | No | Can be used to supply an alternative name for the field in the database. Note that this parameter is not supported by all connectors, e.g. where Jackson is used, in that case the storage name can be specified using Jackson annotations. | Common index kinds and distance function types are supplied on the `com.microsoft.semantickernel.data.vectorstorage.definition.IndexKind` and `com.microsoft.semantickernel.data.vectorstorage.definition.DistanceFunction` enums. diff --git a/semantic-kernel/concepts/vector-store-connectors/schema-with-record-definition.md b/semantic-kernel/concepts/vector-store-connectors/schema-with-record-definition.md index cb7ab59a..1d203e24 100644 --- a/semantic-kernel/concepts/vector-store-connectors/schema-with-record-definition.md +++ b/semantic-kernel/concepts/vector-store-connectors/schema-with-record-definition.md @@ -39,7 +39,7 @@ var hotelDefinition = new VectorStoreRecordDefinition new VectorStoreRecordKeyProperty("HotelId", typeof(ulong)), new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = true }, new VectorStoreRecordDataProperty("Description", typeof(string)) { IsFullTextSearchable = true }, - new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(float)) { Dimensions = 4, DistanceFunction = DistanceFunction.CosineDistance, IndexKind = IndexKind.Hnsw }, + new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(float)) { Dimensions = 4, DistanceFunction = DistanceFunction.CosineSimilarity, IndexKind = IndexKind.Hnsw }, } }; ``` @@ -99,7 +99,7 @@ new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = Use this class to indicate that your property contains a vector. ```csharp -new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(float)) { Dimensions = 4, DistanceFunction = DistanceFunction.CosineDistance, IndexKind = IndexKind.Hnsw }, +new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(float)) { Dimensions = 4, DistanceFunction = DistanceFunction.CosineSimilarity, IndexKind = IndexKind.Hnsw }, ``` #### VectorStoreRecordVectorProperty configuration settings @@ -110,7 +110,7 @@ new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(float)) { Dim | PropertyType | Yes | The type of the property on the data model. Used by the built in mappers to automatically map between the storage schema and data model and for creating indexes. | | Dimensions | Yes for collection create, optional otherwise | The number of dimensions that the vector has. This is typically required when creating a vector index for a collection. | | IndexKind | No | The type of index to index the vector with. Default varies by vector store type. | -| DistanceFunction | No | The type of distance function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | +| DistanceFunction | No | The type of function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | | StoragePropertyName | No | Can be used to supply an alternative name for the property in the database. Note that this parameter is not supported by all connectors, e.g. where alternatives like `JsonPropertyNameAttribute` is supported. | > [!TIP] @@ -250,7 +250,7 @@ VectorStoreRecordDataField.builder() Use this class to indicate that your field contains a vector. -```csharp +```java VectorStoreRecordVectorField.builder().withName("descriptionEmbedding") .withDimensions(4) .withIndexKind(IndexKind.HNSW) @@ -266,7 +266,7 @@ VectorStoreRecordVectorField.builder().withName("descriptionEmbedding") | fieldType | Yes | The type of the field on the data model. Used by the built in mappers to automatically map between the storage schema and data model and for creating indexes. | | dimensions | Yes for collection create, optional otherwise | The number of dimensions that the vector has. This is typically required when creating a vector index for a collection. | | indexKind | No | The type of index to index the vector with. Default varies by vector store type. | -| distanceFunction | No | The type of distance function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | +| distanceFunction | No | The type of function to use when doing vector comparison during vector search over this vector. Default varies by vector store type. | | storageName | No | Can be used to supply an alternative name for the field in the database. Note that this parameter is not supported by all connectors, e.g. where Jackson is used, in that case the storage name can be specified using Jackson annotations. | > [!TIP]