From 10a48cdaa5878f9cb52ec4121f8313709824ce45 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Tue, 18 Feb 2025 14:13:39 -0800
Subject: [PATCH] remove outdated docs, rename directory so people are less
likely to click
Change-Id: I8025a9e1623d8064f758f5b04a062df9001487be
---
.../aistudio_gemini_prompt_freeform.ipynb | 0
...tudio_gemini_prompt_freeform_nofiles.ipynb | 0
{docs => _doc_gen}/build_docs.py | 0
docs/api/google/generativeai.md | 128 -
docs/api/google/generativeai/ChatSession.md | 244 -
.../google/generativeai/GenerativeModel.md | 478 -
docs/api/google/generativeai/_api_cache.json | 10387 ----------------
docs/api/google/generativeai/_redirects.yaml | 13 -
docs/api/google/generativeai/_toc.yaml | 509 -
docs/api/google/generativeai/all_symbols.md | 261 -
docs/api/google/generativeai/api_report.pb | Bin 54075 -> 0 bytes
docs/api/google/generativeai/caching.md | 49 -
.../generativeai/caching/CachedContent.md | 448 -
.../caching/get_default_cache_client.md | 26 -
docs/api/google/generativeai/configure.md | 86 -
.../google/generativeai/create_tuned_model.md | 246 -
docs/api/google/generativeai/delete_file.md | 28 -
.../google/generativeai/delete_tuned_model.md | 30 -
docs/api/google/generativeai/embed_content.md | 132 -
.../generativeai/embed_content_async.md | 34 -
.../api/google/generativeai/get_base_model.md | 95 -
docs/api/google/generativeai/get_file.md | 28 -
docs/api/google/generativeai/get_model.md | 95 -
docs/api/google/generativeai/get_operation.md | 28 -
.../google/generativeai/get_tuned_model.md | 95 -
docs/api/google/generativeai/list_files.md | 28 -
docs/api/google/generativeai/list_models.md | 95 -
.../google/generativeai/list_operations.md | 28 -
.../google/generativeai/list_tuned_models.md | 95 -
docs/api/google/generativeai/protos.md | 384 -
.../protos/AttributionSourceId.md | 73 -
.../AttributionSourceId/GroundingPassageId.md | 61 -
.../SemanticRetrieverChunk.md | 62 -
.../protos/BatchCreateChunksRequest.md | 64 -
.../protos/BatchCreateChunksResponse.md | 46 -
.../protos/BatchDeleteChunksRequest.md | 63 -
.../protos/BatchEmbedContentsRequest.md | 67 -
.../protos/BatchEmbedContentsResponse.md | 48 -
.../protos/BatchEmbedTextRequest.md | 77 -
.../protos/BatchEmbedTextResponse.md | 47 -
.../protos/BatchUpdateChunksRequest.md | 64 -
.../protos/BatchUpdateChunksResponse.md | 46 -
docs/api/google/generativeai/protos/Blob.md | 66 -
.../generativeai/protos/CachedContent.md | 222 -
.../protos/CachedContent/UsageMetadata.md | 47 -
.../google/generativeai/protos/Candidate.md | 186 -
.../protos/Candidate/FinishReason.md | 876 --
docs/api/google/generativeai/protos/Chunk.md | 125 -
.../google/generativeai/protos/Chunk/State.md | 699 --
.../google/generativeai/protos/ChunkData.md | 49 -
.../generativeai/protos/CitationMetadata.md | 46 -
.../generativeai/protos/CitationSource.md | 98 -
.../generativeai/protos/CodeExecution.md | 23 -
.../protos/CodeExecutionResult.md | 66 -
.../protos/CodeExecutionResult/Outcome.md | 702 --
.../google/generativeai/protos/Condition.md | 85 -
.../generativeai/protos/Condition/Operator.md | 820 --
.../api/google/generativeai/protos/Content.md | 66 -
.../generativeai/protos/ContentEmbedding.md | 46 -
.../generativeai/protos/ContentFilter.md | 68 -
docs/api/google/generativeai/protos/Corpus.md | 97 -
.../protos/CountMessageTokensRequest.md | 68 -
.../protos/CountMessageTokensResponse.md | 50 -
.../protos/CountTextTokensRequest.md | 68 -
.../protos/CountTextTokensResponse.md | 50 -
.../generativeai/protos/CountTokensRequest.md | 90 -
.../protos/CountTokensResponse.md | 62 -
.../protos/CreateCachedContentRequest.md | 46 -
.../generativeai/protos/CreateChunkRequest.md | 61 -
.../protos/CreateCorpusRequest.md | 46 -
.../protos/CreateDocumentRequest.md | 60 -
.../generativeai/protos/CreateFileRequest.md | 46 -
.../generativeai/protos/CreateFileResponse.md | 46 -
.../protos/CreatePermissionRequest.md | 60 -
.../protos/CreateTunedModelMetadata.md | 100 -
.../protos/CreateTunedModelRequest.md | 64 -
.../generativeai/protos/CustomMetadata.md | 97 -
.../api/google/generativeai/protos/Dataset.md | 48 -
.../protos/DeleteCachedContentRequest.md | 47 -
.../generativeai/protos/DeleteChunkRequest.md | 48 -
.../protos/DeleteCorpusRequest.md | 64 -
.../protos/DeleteDocumentRequest.md | 64 -
.../generativeai/protos/DeleteFileRequest.md | 47 -
.../protos/DeletePermissionRequest.md | 48 -
.../protos/DeleteTunedModelRequest.md | 47 -
.../google/generativeai/protos/Document.md | 113 -
.../protos/DynamicRetrievalConfig.md | 66 -
.../protos/DynamicRetrievalConfig/Mode.md | 651 -
.../protos/EmbedContentRequest.md | 117 -
.../protos/EmbedContentResponse.md | 47 -
.../generativeai/protos/EmbedTextRequest.md | 61 -
.../generativeai/protos/EmbedTextResponse.md | 48 -
.../google/generativeai/protos/Embedding.md | 46 -
.../api/google/generativeai/protos/Example.md | 62 -
.../generativeai/protos/ExecutableCode.md | 65 -
.../protos/ExecutableCode/Language.md | 652 -
docs/api/google/generativeai/protos/File.md | 205 -
.../google/generativeai/protos/File/State.md | 701 --
.../google/generativeai/protos/FileData.md | 60 -
.../generativeai/protos/FunctionCall.md | 64 -
.../protos/FunctionCallingConfig.md | 70 -
.../protos/FunctionCallingConfig/Mode.md | 707 --
.../protos/FunctionDeclaration.md | 86 -
.../generativeai/protos/FunctionResponse.md | 63 -
.../protos/GenerateAnswerRequest.md | 177 -
.../GenerateAnswerRequest/AnswerStyle.md | 701 --
.../protos/GenerateAnswerResponse.md | 108 -
.../GenerateAnswerResponse/InputFeedback.md | 66 -
.../InputFeedback/BlockReason.md | 676 -
.../protos/GenerateContentRequest.md | 192 -
.../protos/GenerateContentResponse.md | 88 -
.../GenerateContentResponse/PromptFeedback.md | 64 -
.../PromptFeedback/BlockReason.md | 725 --
.../GenerateContentResponse/UsageMetadata.md | 90 -
.../protos/GenerateMessageRequest.md | 142 -
.../protos/GenerateMessageResponse.md | 81 -
.../protos/GenerateTextRequest.md | 219 -
.../protos/GenerateTextResponse.md | 84 -
.../generativeai/protos/GenerationConfig.md | 297 -
.../protos/GetCachedContentRequest.md | 47 -
.../generativeai/protos/GetChunkRequest.md | 47 -
.../generativeai/protos/GetCorpusRequest.md | 47 -
.../generativeai/protos/GetDocumentRequest.md | 47 -
.../generativeai/protos/GetFileRequest.md | 47 -
.../generativeai/protos/GetModelRequest.md | 51 -
.../protos/GetPermissionRequest.md | 50 -
.../protos/GetTunedModelRequest.md | 48 -
.../protos/GoogleSearchRetrieval.md | 47 -
.../protos/GroundingAttribution.md | 61 -
.../generativeai/protos/GroundingChunk.md | 51 -
.../generativeai/protos/GroundingChunk/Web.md | 61 -
.../generativeai/protos/GroundingMetadata.md | 90 -
.../generativeai/protos/GroundingPassage.md | 60 -
.../generativeai/protos/GroundingPassages.md | 46 -
.../generativeai/protos/GroundingSupport.md | 80 -
.../generativeai/protos/HarmCategory.md | 897 --
.../generativeai/protos/Hyperparameters.md | 110 -
.../protos/ListCachedContentsRequest.md | 70 -
.../protos/ListCachedContentsResponse.md | 61 -
.../generativeai/protos/ListChunksRequest.md | 86 -
.../generativeai/protos/ListChunksResponse.md | 62 -
.../generativeai/protos/ListCorporaRequest.md | 71 -
.../protos/ListCorporaResponse.md | 62 -
.../protos/ListDocumentsRequest.md | 85 -
.../protos/ListDocumentsResponse.md | 62 -
.../generativeai/protos/ListFilesRequest.md | 61 -
.../generativeai/protos/ListFilesResponse.md | 60 -
.../generativeai/protos/ListModelsRequest.md | 70 -
.../generativeai/protos/ListModelsResponse.md | 62 -
.../protos/ListPermissionsRequest.md | 86 -
.../protos/ListPermissionsResponse.md | 62 -
.../protos/ListTunedModelsRequest.md | 103 -
.../protos/ListTunedModelsResponse.md | 62 -
.../generativeai/protos/LogprobsResult.md | 65 -
.../protos/LogprobsResult/Candidate.md | 75 -
.../protos/LogprobsResult/TopCandidates.md | 47 -
.../api/google/generativeai/protos/Message.md | 92 -
.../generativeai/protos/MessagePrompt.md | 109 -
.../generativeai/protos/MetadataFilter.md | 65 -
docs/api/google/generativeai/protos/Model.md | 240 -
docs/api/google/generativeai/protos/Part.md | 158 -
.../google/generativeai/protos/Permission.md | 118 -
.../protos/Permission/GranteeType.md | 701 --
.../generativeai/protos/Permission/Role.md | 700 --
.../generativeai/protos/PredictRequest.md | 75 -
.../generativeai/protos/PredictResponse.md | 46 -
.../generativeai/protos/QueryCorpusRequest.md | 119 -
.../protos/QueryCorpusResponse.md | 46 -
.../protos/QueryDocumentRequest.md | 119 -
.../protos/QueryDocumentResponse.md | 46 -
.../generativeai/protos/RelevantChunk.md | 59 -
.../generativeai/protos/RetrievalMetadata.md | 52 -
.../generativeai/protos/SafetyFeedback.md | 65 -
.../generativeai/protos/SafetyRating.md | 82 -
.../generativeai/protos/SafetySetting.md | 65 -
docs/api/google/generativeai/protos/Schema.md | 186 -
.../protos/Schema/PropertiesEntry.md | 101 -
.../generativeai/protos/SearchEntryPoint.md | 61 -
.../api/google/generativeai/protos/Segment.md | 91 -
.../protos/SemanticRetrieverConfig.md | 106 -
.../google/generativeai/protos/StringList.md | 46 -
.../google/generativeai/protos/TaskType.md | 802 --
.../generativeai/protos/TextCompletion.md | 80 -
.../google/generativeai/protos/TextPrompt.md | 48 -
docs/api/google/generativeai/protos/Tool.md | 90 -
.../google/generativeai/protos/ToolConfig.md | 46 -
.../protos/TransferOwnershipRequest.md | 63 -
.../protos/TransferOwnershipResponse.md | 21 -
.../google/generativeai/protos/TunedModel.md | 255 -
.../generativeai/protos/TunedModelSource.md | 63 -
.../generativeai/protos/TuningExample.md | 61 -
.../generativeai/protos/TuningExamples.md | 48 -
.../generativeai/protos/TuningSnapshot.md | 87 -
.../google/generativeai/protos/TuningTask.md | 103 -
docs/api/google/generativeai/protos/Type.md | 770 --
.../protos/UpdateCachedContentRequest.md | 59 -
.../generativeai/protos/UpdateChunkRequest.md | 60 -
.../protos/UpdateCorpusRequest.md | 60 -
.../protos/UpdateDocumentRequest.md | 60 -
.../protos/UpdatePermissionRequest.md | 64 -
.../protos/UpdateTunedModelRequest.md | 59 -
.../generativeai/protos/VideoMetadata.md | 46 -
docs/api/google/generativeai/types.md | 155 -
.../generativeai/types/AnyModelNameOptions.md | 23 -
.../types/AsyncGenerateContentResponse.md | 161 -
.../types/BaseModelNameOptions.md | 21 -
.../api/google/generativeai/types/BlobDict.md | 21 -
.../api/google/generativeai/types/BlobType.md | 22 -
.../types/BlockedPromptException.md | 21 -
.../generativeai/types/BlockedReason.md | 683 -
.../generativeai/types/BrokenResponseError.md | 21 -
.../types/CallableFunctionDeclaration.md | 145 -
.../types/CitationMetadataDict.md | 46 -
.../generativeai/types/CitationSourceDict.md | 94 -
.../google/generativeai/types/ContentDict.md | 21 -
.../generativeai/types/ContentFilterDict.md | 64 -
.../google/generativeai/types/ContentType.md | 34 -
.../google/generativeai/types/ContentsType.md | 36 -
docs/api/google/generativeai/types/File.md | 210 -
.../google/generativeai/types/FileDataDict.md | 21 -
.../google/generativeai/types/FileDataType.md | 22 -
.../generativeai/types/FunctionDeclaration.md | 125 -
.../types/FunctionDeclarationType.md | 22 -
.../generativeai/types/FunctionLibrary.md | 70 -
.../generativeai/types/FunctionLibraryType.md | 29 -
.../types/GenerateContentResponse.md | 193 -
.../generativeai/types/GenerationConfig.md | 419 -
.../types/GenerationConfigDict.md | 21 -
.../types/GenerationConfigType.md | 21 -
.../generativeai/types/HarmBlockThreshold.md | 756 --
.../google/generativeai/types/HarmCategory.md | 647 -
.../generativeai/types/HarmProbability.md | 734 --
.../types/IncompleteIterationError.md | 21 -
docs/api/google/generativeai/types/Model.md | 257 -
.../generativeai/types/ModelsIterable.md | 19 -
.../api/google/generativeai/types/PartDict.md | 21 -
.../api/google/generativeai/types/PartType.md | 31 -
.../google/generativeai/types/Permission.md | 290 -
.../google/generativeai/types/Permissions.md | 432 -
.../generativeai/types/RequestOptions.md | 209 -
.../generativeai/types/RequestOptionsType.md | 20 -
.../generativeai/types/SafetyFeedbackDict.md | 65 -
.../generativeai/types/SafetyRatingDict.md | 79 -
.../generativeai/types/SafetySettingDict.md | 62 -
docs/api/google/generativeai/types/Status.md | 61 -
.../types/StopCandidateException.md | 21 -
.../generativeai/types/StrictContentType.md | 20 -
docs/api/google/generativeai/types/Tool.md | 118 -
.../api/google/generativeai/types/ToolDict.md | 21 -
.../google/generativeai/types/ToolsType.md | 28 -
.../google/generativeai/types/TunedModel.md | 375 -
.../types/TunedModelNameOptions.md | 21 -
.../generativeai/types/TunedModelState.md | 706 --
.../google/generativeai/types/TypedDict.md | 67 -
.../types/get_default_file_client.md | 26 -
.../google/generativeai/types/to_file_data.md | 28 -
.../google/generativeai/update_tuned_model.md | 32 -
docs/api/google/generativeai/upload_file.md | 123 -
258 files changed, 45899 deletions(-)
rename {docs => _doc_gen}/ais-templates/aistudio_gemini_prompt_freeform.ipynb (100%)
rename {docs => _doc_gen}/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb (100%)
rename {docs => _doc_gen}/build_docs.py (100%)
delete mode 100644 docs/api/google/generativeai.md
delete mode 100644 docs/api/google/generativeai/ChatSession.md
delete mode 100644 docs/api/google/generativeai/GenerativeModel.md
delete mode 100644 docs/api/google/generativeai/_api_cache.json
delete mode 100644 docs/api/google/generativeai/_redirects.yaml
delete mode 100644 docs/api/google/generativeai/_toc.yaml
delete mode 100644 docs/api/google/generativeai/all_symbols.md
delete mode 100644 docs/api/google/generativeai/api_report.pb
delete mode 100644 docs/api/google/generativeai/caching.md
delete mode 100644 docs/api/google/generativeai/caching/CachedContent.md
delete mode 100644 docs/api/google/generativeai/caching/get_default_cache_client.md
delete mode 100644 docs/api/google/generativeai/configure.md
delete mode 100644 docs/api/google/generativeai/create_tuned_model.md
delete mode 100644 docs/api/google/generativeai/delete_file.md
delete mode 100644 docs/api/google/generativeai/delete_tuned_model.md
delete mode 100644 docs/api/google/generativeai/embed_content.md
delete mode 100644 docs/api/google/generativeai/embed_content_async.md
delete mode 100644 docs/api/google/generativeai/get_base_model.md
delete mode 100644 docs/api/google/generativeai/get_file.md
delete mode 100644 docs/api/google/generativeai/get_model.md
delete mode 100644 docs/api/google/generativeai/get_operation.md
delete mode 100644 docs/api/google/generativeai/get_tuned_model.md
delete mode 100644 docs/api/google/generativeai/list_files.md
delete mode 100644 docs/api/google/generativeai/list_models.md
delete mode 100644 docs/api/google/generativeai/list_operations.md
delete mode 100644 docs/api/google/generativeai/list_tuned_models.md
delete mode 100644 docs/api/google/generativeai/protos.md
delete mode 100644 docs/api/google/generativeai/protos/AttributionSourceId.md
delete mode 100644 docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
delete mode 100644 docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
delete mode 100644 docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
delete mode 100644 docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
delete mode 100644 docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
delete mode 100644 docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
delete mode 100644 docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
delete mode 100644 docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
delete mode 100644 docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
delete mode 100644 docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
delete mode 100644 docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
delete mode 100644 docs/api/google/generativeai/protos/Blob.md
delete mode 100644 docs/api/google/generativeai/protos/CachedContent.md
delete mode 100644 docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/Candidate.md
delete mode 100644 docs/api/google/generativeai/protos/Candidate/FinishReason.md
delete mode 100644 docs/api/google/generativeai/protos/Chunk.md
delete mode 100644 docs/api/google/generativeai/protos/Chunk/State.md
delete mode 100644 docs/api/google/generativeai/protos/ChunkData.md
delete mode 100644 docs/api/google/generativeai/protos/CitationMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/CitationSource.md
delete mode 100644 docs/api/google/generativeai/protos/CodeExecution.md
delete mode 100644 docs/api/google/generativeai/protos/CodeExecutionResult.md
delete mode 100644 docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
delete mode 100644 docs/api/google/generativeai/protos/Condition.md
delete mode 100644 docs/api/google/generativeai/protos/Condition/Operator.md
delete mode 100644 docs/api/google/generativeai/protos/Content.md
delete mode 100644 docs/api/google/generativeai/protos/ContentEmbedding.md
delete mode 100644 docs/api/google/generativeai/protos/ContentFilter.md
delete mode 100644 docs/api/google/generativeai/protos/Corpus.md
delete mode 100644 docs/api/google/generativeai/protos/CountMessageTokensRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CountMessageTokensResponse.md
delete mode 100644 docs/api/google/generativeai/protos/CountTextTokensRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CountTextTokensResponse.md
delete mode 100644 docs/api/google/generativeai/protos/CountTokensRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CountTokensResponse.md
delete mode 100644 docs/api/google/generativeai/protos/CreateCachedContentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CreateChunkRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CreateCorpusRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CreateDocumentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CreateFileRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CreateFileResponse.md
delete mode 100644 docs/api/google/generativeai/protos/CreatePermissionRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/CreateTunedModelRequest.md
delete mode 100644 docs/api/google/generativeai/protos/CustomMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/Dataset.md
delete mode 100644 docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/DeleteChunkRequest.md
delete mode 100644 docs/api/google/generativeai/protos/DeleteCorpusRequest.md
delete mode 100644 docs/api/google/generativeai/protos/DeleteDocumentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/DeleteFileRequest.md
delete mode 100644 docs/api/google/generativeai/protos/DeletePermissionRequest.md
delete mode 100644 docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
delete mode 100644 docs/api/google/generativeai/protos/Document.md
delete mode 100644 docs/api/google/generativeai/protos/DynamicRetrievalConfig.md
delete mode 100644 docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md
delete mode 100644 docs/api/google/generativeai/protos/EmbedContentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/EmbedContentResponse.md
delete mode 100644 docs/api/google/generativeai/protos/EmbedTextRequest.md
delete mode 100644 docs/api/google/generativeai/protos/EmbedTextResponse.md
delete mode 100644 docs/api/google/generativeai/protos/Embedding.md
delete mode 100644 docs/api/google/generativeai/protos/Example.md
delete mode 100644 docs/api/google/generativeai/protos/ExecutableCode.md
delete mode 100644 docs/api/google/generativeai/protos/ExecutableCode/Language.md
delete mode 100644 docs/api/google/generativeai/protos/File.md
delete mode 100644 docs/api/google/generativeai/protos/File/State.md
delete mode 100644 docs/api/google/generativeai/protos/FileData.md
delete mode 100644 docs/api/google/generativeai/protos/FunctionCall.md
delete mode 100644 docs/api/google/generativeai/protos/FunctionCallingConfig.md
delete mode 100644 docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
delete mode 100644 docs/api/google/generativeai/protos/FunctionDeclaration.md
delete mode 100644 docs/api/google/generativeai/protos/FunctionResponse.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateAnswerRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateContentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateMessageRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateMessageResponse.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateTextRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GenerateTextResponse.md
delete mode 100644 docs/api/google/generativeai/protos/GenerationConfig.md
delete mode 100644 docs/api/google/generativeai/protos/GetCachedContentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetChunkRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetCorpusRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetDocumentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetFileRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetModelRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetPermissionRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GetTunedModelRequest.md
delete mode 100644 docs/api/google/generativeai/protos/GoogleSearchRetrieval.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingAttribution.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingChunk.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingChunk/Web.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingPassage.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingPassages.md
delete mode 100644 docs/api/google/generativeai/protos/GroundingSupport.md
delete mode 100644 docs/api/google/generativeai/protos/HarmCategory.md
delete mode 100644 docs/api/google/generativeai/protos/Hyperparameters.md
delete mode 100644 docs/api/google/generativeai/protos/ListCachedContentsRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListCachedContentsResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListChunksRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListChunksResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListCorporaRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListCorporaResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListDocumentsRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListDocumentsResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListFilesRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListFilesResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListModelsRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListModelsResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListPermissionsRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListPermissionsResponse.md
delete mode 100644 docs/api/google/generativeai/protos/ListTunedModelsRequest.md
delete mode 100644 docs/api/google/generativeai/protos/ListTunedModelsResponse.md
delete mode 100644 docs/api/google/generativeai/protos/LogprobsResult.md
delete mode 100644 docs/api/google/generativeai/protos/LogprobsResult/Candidate.md
delete mode 100644 docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md
delete mode 100644 docs/api/google/generativeai/protos/Message.md
delete mode 100644 docs/api/google/generativeai/protos/MessagePrompt.md
delete mode 100644 docs/api/google/generativeai/protos/MetadataFilter.md
delete mode 100644 docs/api/google/generativeai/protos/Model.md
delete mode 100644 docs/api/google/generativeai/protos/Part.md
delete mode 100644 docs/api/google/generativeai/protos/Permission.md
delete mode 100644 docs/api/google/generativeai/protos/Permission/GranteeType.md
delete mode 100644 docs/api/google/generativeai/protos/Permission/Role.md
delete mode 100644 docs/api/google/generativeai/protos/PredictRequest.md
delete mode 100644 docs/api/google/generativeai/protos/PredictResponse.md
delete mode 100644 docs/api/google/generativeai/protos/QueryCorpusRequest.md
delete mode 100644 docs/api/google/generativeai/protos/QueryCorpusResponse.md
delete mode 100644 docs/api/google/generativeai/protos/QueryDocumentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/QueryDocumentResponse.md
delete mode 100644 docs/api/google/generativeai/protos/RelevantChunk.md
delete mode 100644 docs/api/google/generativeai/protos/RetrievalMetadata.md
delete mode 100644 docs/api/google/generativeai/protos/SafetyFeedback.md
delete mode 100644 docs/api/google/generativeai/protos/SafetyRating.md
delete mode 100644 docs/api/google/generativeai/protos/SafetySetting.md
delete mode 100644 docs/api/google/generativeai/protos/Schema.md
delete mode 100644 docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
delete mode 100644 docs/api/google/generativeai/protos/SearchEntryPoint.md
delete mode 100644 docs/api/google/generativeai/protos/Segment.md
delete mode 100644 docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
delete mode 100644 docs/api/google/generativeai/protos/StringList.md
delete mode 100644 docs/api/google/generativeai/protos/TaskType.md
delete mode 100644 docs/api/google/generativeai/protos/TextCompletion.md
delete mode 100644 docs/api/google/generativeai/protos/TextPrompt.md
delete mode 100644 docs/api/google/generativeai/protos/Tool.md
delete mode 100644 docs/api/google/generativeai/protos/ToolConfig.md
delete mode 100644 docs/api/google/generativeai/protos/TransferOwnershipRequest.md
delete mode 100644 docs/api/google/generativeai/protos/TransferOwnershipResponse.md
delete mode 100644 docs/api/google/generativeai/protos/TunedModel.md
delete mode 100644 docs/api/google/generativeai/protos/TunedModelSource.md
delete mode 100644 docs/api/google/generativeai/protos/TuningExample.md
delete mode 100644 docs/api/google/generativeai/protos/TuningExamples.md
delete mode 100644 docs/api/google/generativeai/protos/TuningSnapshot.md
delete mode 100644 docs/api/google/generativeai/protos/TuningTask.md
delete mode 100644 docs/api/google/generativeai/protos/Type.md
delete mode 100644 docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/UpdateChunkRequest.md
delete mode 100644 docs/api/google/generativeai/protos/UpdateCorpusRequest.md
delete mode 100644 docs/api/google/generativeai/protos/UpdateDocumentRequest.md
delete mode 100644 docs/api/google/generativeai/protos/UpdatePermissionRequest.md
delete mode 100644 docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
delete mode 100644 docs/api/google/generativeai/protos/VideoMetadata.md
delete mode 100644 docs/api/google/generativeai/types.md
delete mode 100644 docs/api/google/generativeai/types/AnyModelNameOptions.md
delete mode 100644 docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
delete mode 100644 docs/api/google/generativeai/types/BaseModelNameOptions.md
delete mode 100644 docs/api/google/generativeai/types/BlobDict.md
delete mode 100644 docs/api/google/generativeai/types/BlobType.md
delete mode 100644 docs/api/google/generativeai/types/BlockedPromptException.md
delete mode 100644 docs/api/google/generativeai/types/BlockedReason.md
delete mode 100644 docs/api/google/generativeai/types/BrokenResponseError.md
delete mode 100644 docs/api/google/generativeai/types/CallableFunctionDeclaration.md
delete mode 100644 docs/api/google/generativeai/types/CitationMetadataDict.md
delete mode 100644 docs/api/google/generativeai/types/CitationSourceDict.md
delete mode 100644 docs/api/google/generativeai/types/ContentDict.md
delete mode 100644 docs/api/google/generativeai/types/ContentFilterDict.md
delete mode 100644 docs/api/google/generativeai/types/ContentType.md
delete mode 100644 docs/api/google/generativeai/types/ContentsType.md
delete mode 100644 docs/api/google/generativeai/types/File.md
delete mode 100644 docs/api/google/generativeai/types/FileDataDict.md
delete mode 100644 docs/api/google/generativeai/types/FileDataType.md
delete mode 100644 docs/api/google/generativeai/types/FunctionDeclaration.md
delete mode 100644 docs/api/google/generativeai/types/FunctionDeclarationType.md
delete mode 100644 docs/api/google/generativeai/types/FunctionLibrary.md
delete mode 100644 docs/api/google/generativeai/types/FunctionLibraryType.md
delete mode 100644 docs/api/google/generativeai/types/GenerateContentResponse.md
delete mode 100644 docs/api/google/generativeai/types/GenerationConfig.md
delete mode 100644 docs/api/google/generativeai/types/GenerationConfigDict.md
delete mode 100644 docs/api/google/generativeai/types/GenerationConfigType.md
delete mode 100644 docs/api/google/generativeai/types/HarmBlockThreshold.md
delete mode 100644 docs/api/google/generativeai/types/HarmCategory.md
delete mode 100644 docs/api/google/generativeai/types/HarmProbability.md
delete mode 100644 docs/api/google/generativeai/types/IncompleteIterationError.md
delete mode 100644 docs/api/google/generativeai/types/Model.md
delete mode 100644 docs/api/google/generativeai/types/ModelsIterable.md
delete mode 100644 docs/api/google/generativeai/types/PartDict.md
delete mode 100644 docs/api/google/generativeai/types/PartType.md
delete mode 100644 docs/api/google/generativeai/types/Permission.md
delete mode 100644 docs/api/google/generativeai/types/Permissions.md
delete mode 100644 docs/api/google/generativeai/types/RequestOptions.md
delete mode 100644 docs/api/google/generativeai/types/RequestOptionsType.md
delete mode 100644 docs/api/google/generativeai/types/SafetyFeedbackDict.md
delete mode 100644 docs/api/google/generativeai/types/SafetyRatingDict.md
delete mode 100644 docs/api/google/generativeai/types/SafetySettingDict.md
delete mode 100644 docs/api/google/generativeai/types/Status.md
delete mode 100644 docs/api/google/generativeai/types/StopCandidateException.md
delete mode 100644 docs/api/google/generativeai/types/StrictContentType.md
delete mode 100644 docs/api/google/generativeai/types/Tool.md
delete mode 100644 docs/api/google/generativeai/types/ToolDict.md
delete mode 100644 docs/api/google/generativeai/types/ToolsType.md
delete mode 100644 docs/api/google/generativeai/types/TunedModel.md
delete mode 100644 docs/api/google/generativeai/types/TunedModelNameOptions.md
delete mode 100644 docs/api/google/generativeai/types/TunedModelState.md
delete mode 100644 docs/api/google/generativeai/types/TypedDict.md
delete mode 100644 docs/api/google/generativeai/types/get_default_file_client.md
delete mode 100644 docs/api/google/generativeai/types/to_file_data.md
delete mode 100644 docs/api/google/generativeai/update_tuned_model.md
delete mode 100644 docs/api/google/generativeai/upload_file.md
diff --git a/docs/ais-templates/aistudio_gemini_prompt_freeform.ipynb b/_doc_gen/ais-templates/aistudio_gemini_prompt_freeform.ipynb
similarity index 100%
rename from docs/ais-templates/aistudio_gemini_prompt_freeform.ipynb
rename to _doc_gen/ais-templates/aistudio_gemini_prompt_freeform.ipynb
diff --git a/docs/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb b/_doc_gen/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb
similarity index 100%
rename from docs/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb
rename to _doc_gen/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb
diff --git a/docs/build_docs.py b/_doc_gen/build_docs.py
similarity index 100%
rename from docs/build_docs.py
rename to _doc_gen/build_docs.py
diff --git a/docs/api/google/generativeai.md b/docs/api/google/generativeai.md
deleted file mode 100644
index 5b3931f08..000000000
--- a/docs/api/google/generativeai.md
+++ /dev/null
@@ -1,128 +0,0 @@
-
-# Module: google.generativeai
-
-
-
-
-
-
-
-Google AI Python SDK
-
-
-
-## Setup
-
-```posix-terminal
-pip install google-generativeai
-```
-
-## GenerativeModel
-
-Use `genai.GenerativeModel` to access the API:
-
-```
-import google.generativeai as genai
-import os
-
-genai.configure(api_key=os.environ['API_KEY'])
-
-model = genai.GenerativeModel(model_name='gemini-1.5-flash')
-response = model.generate_content('Teach me about how an LLM works')
-
-print(response.text)
-```
-
-See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details.
-
-## Modules
-
-[`caching`](../google/generativeai/caching.md) module
-
-[`protos`](../google/generativeai/protos.md) module: This module provides low level access to the ProtoBuffer "Message" classes used by the API.
-
-[`types`](../google/generativeai/types.md) module: A collection of type definitions used throughout the library.
-
-## Classes
-
-[`class ChatSession`](../google/generativeai/ChatSession.md): Contains an ongoing conversation with the model.
-
-[`class GenerationConfig`](../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
-
-[`class GenerativeModel`](../google/generativeai/GenerativeModel.md): The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
-
-## Functions
-
-[`configure(...)`](../google/generativeai/configure.md): Captures default client configuration.
-
-[`create_tuned_model(...)`](../google/generativeai/create_tuned_model.md): Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
-
-[`delete_file(...)`](../google/generativeai/delete_file.md): Calls the API to permanently delete a specified file using a supported file service.
-
-[`delete_tuned_model(...)`](../google/generativeai/delete_tuned_model.md): Calls the API to delete a specified tuned model
-
-[`embed_content(...)`](../google/generativeai/embed_content.md): Calls the API to create embeddings for content passed in.
-
-[`embed_content_async(...)`](../google/generativeai/embed_content_async.md): Calls the API to create async embeddings for content passed in.
-
-[`get_base_model(...)`](../google/generativeai/get_base_model.md): Calls the API to fetch a base model by name.
-
-[`get_file(...)`](../google/generativeai/get_file.md): Calls the API to retrieve a specified file using a supported file service.
-
-[`get_model(...)`](../google/generativeai/get_model.md): Calls the API to fetch a model by name.
-
-[`get_operation(...)`](../google/generativeai/get_operation.md): Calls the API to get a specific operation
-
-[`get_tuned_model(...)`](../google/generativeai/get_tuned_model.md): Calls the API to fetch a tuned model by name.
-
-[`list_files(...)`](../google/generativeai/list_files.md): Calls the API to list files using a supported file service.
-
-[`list_models(...)`](../google/generativeai/list_models.md): Calls the API to list all available models.
-
-[`list_operations(...)`](../google/generativeai/list_operations.md): Calls the API to list all operations
-
-[`list_tuned_models(...)`](../google/generativeai/list_tuned_models.md): Calls the API to list all tuned models.
-
-[`update_tuned_model(...)`](../google/generativeai/update_tuned_model.md): Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
-
-[`upload_file(...)`](../google/generativeai/upload_file.md): Calls the API to upload a file using a supported file service.
-
-
-
-
-
-
-Other Members |
-
-
-
-
-__version__
-
- |
-
-
-`'0.8.3'`
-
- |
-
-
-
-annotations
-
- |
-
-
-Instance of `__future__._Feature`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md
deleted file mode 100644
index 442c59e02..000000000
--- a/docs/api/google/generativeai/ChatSession.md
+++ /dev/null
@@ -1,244 +0,0 @@
-
-# google.generativeai.ChatSession
-
-
-
-
-
-
-
-Contains an ongoing conversation with the model.
-
-
-google.generativeai.ChatSession(
- model: GenerativeModel,
- history: (Iterable[content_types.StrictContentType] | None) = None,
- enable_automatic_function_calling: bool = False
-)
-
-
-
-
-
-
-```
->>> model = genai.GenerativeModel('models/gemini-1.5-flash')
->>> chat = model.start_chat()
->>> response = chat.send_message("Hello")
->>> print(response.text)
->>> response = chat.send_message("Hello again")
->>> print(response.text)
->>> response = chat.send_message(...
-```
-
-This `ChatSession` object collects the messages sent and received, in its
-ChatSession.history
attribute.
-
-
-
-
-Arguments |
-
-
-
-
-`model`
-
- |
-
-
-The model to use in the chat.
-
- |
-
-
-
-`history`
-
- |
-
-
-A chat history to initialize the object with.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`history`
-
- |
-
-
-The chat history.
-
- |
-
-
-
-`last`
-
- |
-
-
-returns the last received `genai.GenerateContentResponse`
-
- |
-
-
-
-
-
-## Methods
-
-rewind
-
-View source
-
-
-rewind() -> tuple[protos.Content, protos.Content]
-
-
-Removes the last request/response pair from the chat history.
-
-
-send_message
-
-View source
-
-
-send_message(
- content: content_types.ContentType,
- *,
- generation_config: generation_types.GenerationConfigType = None,
- safety_settings: safety_types.SafetySettingOptions = None,
- stream: bool = False,
- tools: (content_types.FunctionLibraryType | None) = None,
- tool_config: (content_types.ToolConfigType | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> generation_types.GenerateContentResponse
-
-
-Sends the conversation history with the added message and returns the model's response.
-
-Appends the request and response to the conversation history.
-
-```
->>> model = genai.GenerativeModel('models/gemini-1.5-flash')
->>> chat = model.start_chat()
->>> response = chat.send_message("Hello")
->>> print(response.text)
-"Hello! How can I assist you today?"
->>> len(chat.history)
-2
-```
-
-Call it with `stream=True` to receive response chunks as they are generated:
-
-```
->>> chat = model.start_chat()
->>> response = chat.send_message("Explain quantum physics", stream=True)
->>> for chunk in response:
-... print(chunk.text, end='')
-```
-
-Once iteration over chunks is complete, the `response` and `ChatSession` are in states identical to the
-`stream=False` case. Some properties are not available until iteration is complete.
-
-Like GenerativeModel.generate_content
this method lets you override the model's `generation_config` and
-`safety_settings`.
-
-
-
-
-Arguments |
-
-
-
-
-`content`
-
- |
-
-
-The message contents.
-
- |
-
-
-
-`generation_config`
-
- |
-
-
-Overrides for the model's generation config.
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-Overrides for the model's safety settings.
-
- |
-
-
-
-`stream`
-
- |
-
-
-If True, yield response chunks as they are generated.
-
- |
-
-
-
-
-
-send_message_async
-
-View source
-
-
-send_message_async(
- content,
- *,
- generation_config=None,
- safety_settings=None,
- stream=False,
- tools=None,
- tool_config=None,
- request_options=None
-)
-
-
-The async version of ChatSession.send_message
.
-
-
-
-
diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md
deleted file mode 100644
index f9b0ccb7d..000000000
--- a/docs/api/google/generativeai/GenerativeModel.md
+++ /dev/null
@@ -1,478 +0,0 @@
-
-# google.generativeai.GenerativeModel
-
-
-
-
-
-
-
-The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
-
-
-google.generativeai.GenerativeModel(
- model_name: str = 'gemini-1.5-flash-002',
- safety_settings: (safety_types.SafetySettingOptions | None) = None,
- generation_config: (generation_types.GenerationConfigType | None) = None,
- tools: (content_types.FunctionLibraryType | None) = None,
- tool_config: (content_types.ToolConfigType | None) = None,
- system_instruction: (content_types.ContentType | None) = None
-)
-
-
-
-
-
-
-This family of functionality is designed to support multi-turn conversations, and multimodal
-requests. What media-types are supported for input and output is model-dependant.
-
-```
->>> import google.generativeai as genai
->>> import PIL.Image
->>> genai.configure(api_key='YOUR_API_KEY')
->>> model = genai.GenerativeModel('models/gemini-1.5-flash')
->>> result = model.generate_content('Tell me a story about a magic backpack')
->>> result.text
-"In the quaint little town of Lakeside, there lived a young girl named Lily..."
-```
-
-#### Multimodal input:
-
-
-
-```
->>> model = genai.GenerativeModel('models/gemini-1.5-flash')
->>> result = model.generate_content([
-... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
->>> result.text
-"**Blueberry Scones** ..."
-```
-
-Multi-turn conversation:
-
-```
->>> chat = model.start_chat()
->>> response = chat.send_message("Hi, I have some questions for you.")
->>> response.text
-"Sure, I'll do my best to answer your questions..."
-```
-
-To list the compatible model names use:
-
-```
->>> for m in genai.list_models():
-... if 'generateContent' in m.supported_generation_methods:
-... print(m.name)
-```
-
-
-
-
-Arguments |
-
-
-
-
-`model_name`
-
- |
-
-
-The name of the model to query. To list compatible models use
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-Sets the default safety filters. This controls which content is blocked
-by the api before being returned.
-
- |
-
-
-
-`generation_config`
-
- |
-
-
-A `genai.GenerationConfig` setting the default generation parameters to
-use.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`cached_content`
-
- |
-
-
-
-
- |
-
-
-
-`model_name`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-count_tokens
-
-View source
-
-
-count_tokens(
- contents: content_types.ContentsType = None,
- *,
- generation_config: (generation_types.GenerationConfigType | None) = None,
- safety_settings: (safety_types.SafetySettingOptions | None) = None,
- tools: (content_types.FunctionLibraryType | None) = None,
- tool_config: (content_types.ToolConfigType | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> protos.CountTokensResponse
-
-
-
-
-
-count_tokens_async
-
-View source
-
-
-count_tokens_async(
- contents=None,
- *,
- generation_config=None,
- safety_settings=None,
- tools=None,
- tool_config=None,
- request_options=None
-)
-
-
-
-
-
-from_cached_content
-
-View source
-
-
-@classmethod
-from_cached_content(
- cached_content: (str | caching.CachedContent),
- *,
- generation_config: (generation_types.GenerationConfigType | None) = None,
- safety_settings: (safety_types.SafetySettingOptions | None) = None
-) -> GenerativeModel
-
-
-Creates a model with `cached_content` as model's context.
-
-
-
-
-
-Args |
-
-
-
-
-`cached_content`
-
- |
-
-
-context for the model.
-
- |
-
-
-
-`generation_config`
-
- |
-
-
-Overrides for the model's generation config.
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-Overrides for the model's safety settings.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-`GenerativeModel` object with `cached_content` as its context.
-
- |
-
-
-
-
-
-
-generate_content
-
-View source
-
-
-generate_content(
- contents: content_types.ContentsType,
- *,
- generation_config: (generation_types.GenerationConfigType | None) = None,
- safety_settings: (safety_types.SafetySettingOptions | None) = None,
- stream: bool = False,
- tools: (content_types.FunctionLibraryType | None) = None,
- tool_config: (content_types.ToolConfigType | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> generation_types.GenerateContentResponse
-
-
-A multipurpose function to generate responses from the model.
-
-This GenerativeModel.generate_content
method can handle multimodal input, and multi-turn
-conversations.
-
-```
->>> model = genai.GenerativeModel('models/gemini-1.5-flash')
->>> response = model.generate_content('Tell me a story about a magic backpack')
->>> response.text
-```
-
-### Streaming
-
-This method supports streaming with the `stream=True`. The result has the same type as the non streaming case,
-but you can iterate over the response chunks as they become available:
-
-```
->>> response = model.generate_content('Tell me a story about a magic backpack', stream=True)
->>> for chunk in response:
-... print(chunk.text)
-```
-
-### Multi-turn
-
-This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each
-request. This takes some manual management but gives you complete control:
-
-```
->>> messages = [{'role':'user', 'parts': ['hello']}]
->>> response = model.generate_content(messages) # "Hello, how can I help"
->>> messages.append(response.candidates[0].content)
->>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']})
->>> response = model.generate_content(messages)
-```
-
-For a simpler multi-turn interface see GenerativeModel.start_chat
.
-
-### Input type flexibility
-
-While the underlying API strictly expects a `list[protos.Content]` objects, this method
-will convert the user input into the correct type. The hierarchy of types that can be
-converted is below. Any of these objects can be passed as an equivalent `dict`.
-
-* `Iterable[protos.Content]`
-* protos.Content
-* `Iterable[protos.Part]`
-* protos.Part
-* `str`, `Image`, or protos.Blob
-
-In an `Iterable[protos.Content]` each `content` is a separate message.
-But note that an `Iterable[protos.Part]` is taken as the parts of a single message.
-
-
-
-
-Arguments |
-
-
-
-
-`contents`
-
- |
-
-
-The contents serving as the model's prompt.
-
- |
-
-
-
-`generation_config`
-
- |
-
-
-Overrides for the model's generation config.
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-Overrides for the model's safety settings.
-
- |
-
-
-
-`stream`
-
- |
-
-
-If True, yield response chunks as they are generated.
-
- |
-
-
-
-`tools`
-
- |
-
-
-`protos.Tools` more info coming soon.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-generate_content_async
-
-View source
-
-
-generate_content_async(
- contents,
- *,
- generation_config=None,
- safety_settings=None,
- stream=False,
- tools=None,
- tool_config=None,
- request_options=None
-)
-
-
-The async version of GenerativeModel.generate_content
.
-
-
-start_chat
-
-View source
-
-
-start_chat(
- *,
- history: (Iterable[content_types.StrictContentType] | None) = None,
- enable_automatic_function_calling: bool = False
-) -> ChatSession
-
-
-Returns a `genai.ChatSession` attached to this model.
-
-```
->>> model = genai.GenerativeModel()
->>> chat = model.start_chat(history=[...])
->>> response = chat.send_message("Hello?")
-```
-
-
-
-
-Arguments |
-
-
-
-
-`history`
-
- |
-
-
-An iterable of protos.Content objects, or equivalents to initialize the session.
-
- |
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/_api_cache.json b/docs/api/google/generativeai/_api_cache.json
deleted file mode 100644
index 518937a44..000000000
--- a/docs/api/google/generativeai/_api_cache.json
+++ /dev/null
@@ -1,10387 +0,0 @@
-{
- "duplicate_of": {
- "google.generativeai.ChatSession.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.ChatSession.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.ChatSession.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.ChatSession.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.ChatSession.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.ChatSession.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.ChatSession.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.GenerationConfig": "google.generativeai.types.GenerationConfig",
- "google.generativeai.GenerationConfig.__eq__": "google.generativeai.types.GenerationConfig.__eq__",
- "google.generativeai.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.GenerationConfig.__init__": "google.generativeai.types.GenerationConfig.__init__",
- "google.generativeai.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.GenerationConfig.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.GenerativeModel.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.GenerativeModel.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.GenerativeModel.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.GenerativeModel.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.GenerativeModel.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.GenerativeModel.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.GenerativeModel.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.annotations": "google.generativeai.caching.annotations",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.AttributionSourceId.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.AttributionSourceId.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.AttributionSourceId.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.AttributionSourceId.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.AttributionSourceId.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.AttributionSourceId.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.AttributionSourceId.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.AttributionSourceId.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchCreateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchCreateChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchCreateChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchCreateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchCreateChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchCreateChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchCreateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchCreateChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchCreateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchCreateChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchCreateChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchCreateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchCreateChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchCreateChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchCreateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchCreateChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchDeleteChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchEmbedContentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchEmbedContentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchEmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchEmbedTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchEmbedTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchEmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchEmbedTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchEmbedTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchEmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchEmbedTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchEmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchEmbedTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchEmbedTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchEmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchEmbedTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchEmbedTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchEmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchEmbedTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchUpdateChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.BatchUpdateChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Blob.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Blob.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Blob.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Blob.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Blob.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Blob.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Blob.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Blob.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CachedContent.UsageMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CachedContent.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CachedContent.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CachedContent.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CachedContent.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CachedContent.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CachedContent.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CachedContent.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CachedContent.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Candidate.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Candidate.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Candidate.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Candidate.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Candidate.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Chunk.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.Chunk.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.Chunk.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.Chunk.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.Chunk.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.Chunk.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.Chunk.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.Chunk.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.Chunk.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.Chunk.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.Chunk.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.Chunk.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.Chunk.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.Chunk.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.Chunk.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.Chunk.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.Chunk.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.Chunk.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.Chunk.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.Chunk.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.Chunk.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.Chunk.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.Chunk.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.Chunk.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.Chunk.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.Chunk.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.Chunk.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.Chunk.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.Chunk.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.Chunk.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.Chunk.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.Chunk.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.Chunk.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.Chunk.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.Chunk.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.Chunk.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.Chunk.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.Chunk.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.Chunk.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.Chunk.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.Chunk.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.Chunk.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.Chunk.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.Chunk.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.Chunk.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.Chunk.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.Chunk.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.Chunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Chunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Chunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Chunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Chunk.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Chunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Chunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Chunk.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ChunkData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ChunkData.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ChunkData.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ChunkData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ChunkData.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ChunkData.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ChunkData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ChunkData.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CitationMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CitationMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CitationMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CitationMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CitationMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CitationMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CitationMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CitationMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CitationSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CitationSource.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CitationSource.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CitationSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CitationSource.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CitationSource.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CitationSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CitationSource.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CodeExecution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CodeExecution.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CodeExecution.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CodeExecution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CodeExecution.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CodeExecution.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CodeExecution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CodeExecution.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.CodeExecutionResult.Outcome.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.CodeExecutionResult.Outcome.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.CodeExecutionResult.Outcome.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.CodeExecutionResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CodeExecutionResult.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CodeExecutionResult.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CodeExecutionResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CodeExecutionResult.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CodeExecutionResult.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CodeExecutionResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CodeExecutionResult.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Condition.Operator.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.Condition.Operator.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.Condition.Operator.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.Condition.Operator.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.Condition.Operator.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.Condition.Operator.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.Condition.Operator.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.Condition.Operator.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.Condition.Operator.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.Condition.Operator.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.Condition.Operator.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.Condition.Operator.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.Condition.Operator.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.Condition.Operator.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.Condition.Operator.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.Condition.Operator.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.Condition.Operator.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.Condition.Operator.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.Condition.Operator.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.Condition.Operator.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.Condition.Operator.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.Condition.Operator.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.Condition.Operator.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.Condition.Operator.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.Condition.Operator.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.Condition.Operator.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.Condition.Operator.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.Condition.Operator.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.Condition.Operator.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.Condition.Operator.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.Condition.Operator.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.Condition.Operator.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.Condition.Operator.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.Condition.Operator.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.Condition.Operator.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.Condition.Operator.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.Condition.Operator.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.Condition.Operator.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.Condition.Operator.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.Condition.Operator.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.Condition.Operator.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.Condition.Operator.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.Condition.Operator.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.Condition.Operator.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.Condition.Operator.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.Condition.Operator.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.Condition.Operator.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.Condition.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Condition.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Condition.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Condition.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Condition.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Condition.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Condition.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Condition.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Content.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Content.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Content.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Content.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Content.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Content.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Content.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Content.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ContentEmbedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ContentEmbedding.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ContentEmbedding.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ContentEmbedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ContentEmbedding.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ContentEmbedding.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ContentEmbedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ContentEmbedding.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ContentFilter.BlockedReason": "google.generativeai.types.BlockedReason",
- "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.ContentFilter.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.ContentFilter.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.ContentFilter.BlockedReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.ContentFilter.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.ContentFilter.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.ContentFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ContentFilter.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ContentFilter.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ContentFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ContentFilter.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ContentFilter.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ContentFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ContentFilter.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Corpus.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Corpus.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Corpus.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Corpus.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Corpus.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Corpus.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Corpus.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Corpus.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CountMessageTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CountMessageTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CountMessageTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CountMessageTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CountMessageTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CountMessageTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CountMessageTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CountMessageTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CountMessageTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CountMessageTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CountMessageTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CountMessageTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CountMessageTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CountMessageTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CountMessageTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CountMessageTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CountTextTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CountTextTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CountTextTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CountTextTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CountTextTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CountTextTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CountTextTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CountTextTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CountTextTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CountTextTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CountTextTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CountTextTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CountTextTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CountTextTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CountTextTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CountTextTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CountTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CountTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CountTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CountTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CountTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CountTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CountTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CountTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CountTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CountTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CountTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CountTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CountTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CountTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CountTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CountTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateFileResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateFileResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateFileResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateFileResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateFileResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateFileResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateFileResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateFileResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreatePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreatePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreatePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreatePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreatePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateTunedModelMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateTunedModelMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateTunedModelMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateTunedModelMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateTunedModelMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateTunedModelMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateTunedModelMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateTunedModelMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CreateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CreateTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CreateTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CreateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CreateTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CreateTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CreateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CreateTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.CustomMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.CustomMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.CustomMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.CustomMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.CustomMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.CustomMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.CustomMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.CustomMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Dataset.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Dataset.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Dataset.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Dataset.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Dataset.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Dataset.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Dataset.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Dataset.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeleteCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeleteCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeleteCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeleteCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeleteCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeleteCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeleteCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeleteCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeleteChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeleteChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeleteChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeleteChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeleteChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeleteChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeleteChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeleteChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeleteCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeleteCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeleteCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeleteCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeleteCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeleteCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeleteCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeleteCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeleteDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeleteDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeleteDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeleteDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeleteDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeleteDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeleteDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeleteDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeleteFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeleteFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeleteFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeleteFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeleteFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeleteFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeleteFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeleteFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeletePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeletePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeletePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeletePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeletePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeletePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeletePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeletePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DeleteTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DeleteTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DeleteTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DeleteTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DeleteTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DeleteTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DeleteTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DeleteTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Document.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Document.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Document.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Document.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Document.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Document.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Document.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Document.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.DynamicRetrievalConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.DynamicRetrievalConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.DynamicRetrievalConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.DynamicRetrievalConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.DynamicRetrievalConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.DynamicRetrievalConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.DynamicRetrievalConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.DynamicRetrievalConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.EmbedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.EmbedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.EmbedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.EmbedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.EmbedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.EmbedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.EmbedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.EmbedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.EmbedContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.EmbedContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.EmbedContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.EmbedContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.EmbedContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.EmbedContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.EmbedContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.EmbedContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.EmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.EmbedTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.EmbedTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.EmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.EmbedTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.EmbedTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.EmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.EmbedTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.EmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.EmbedTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.EmbedTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.EmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.EmbedTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.EmbedTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.EmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.EmbedTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Embedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Embedding.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Embedding.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Embedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Embedding.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Embedding.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Embedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Embedding.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Example.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Example.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Example.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Example.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Example.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Example.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Example.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Example.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ExecutableCode.Language.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.ExecutableCode.Language.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.ExecutableCode.Language.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.ExecutableCode.Language.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.ExecutableCode.Language.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.ExecutableCode.Language.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.ExecutableCode.Language.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.ExecutableCode.Language.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.ExecutableCode.Language.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.ExecutableCode.Language.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.ExecutableCode.Language.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.ExecutableCode.Language.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.ExecutableCode.Language.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.ExecutableCode.Language.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.ExecutableCode.Language.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.ExecutableCode.Language.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.ExecutableCode.Language.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.ExecutableCode.Language.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.ExecutableCode.Language.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.ExecutableCode.Language.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.ExecutableCode.Language.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.ExecutableCode.Language.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.ExecutableCode.Language.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.ExecutableCode.Language.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.ExecutableCode.Language.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.ExecutableCode.Language.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.ExecutableCode.Language.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.ExecutableCode.Language.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.ExecutableCode.Language.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.ExecutableCode.Language.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.ExecutableCode.Language.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.ExecutableCode.Language.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.ExecutableCode.Language.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.ExecutableCode.Language.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.ExecutableCode.Language.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.ExecutableCode.Language.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.ExecutableCode.Language.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.ExecutableCode.Language.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.ExecutableCode.Language.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.ExecutableCode.Language.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.ExecutableCode.Language.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.ExecutableCode.Language.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.ExecutableCode.Language.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.ExecutableCode.Language.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.ExecutableCode.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ExecutableCode.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ExecutableCode.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ExecutableCode.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ExecutableCode.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ExecutableCode.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ExecutableCode.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ExecutableCode.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.File.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.File.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.File.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.File.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.File.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.File.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.File.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.File.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.File.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.File.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.File.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.File.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.File.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.File.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.File.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.File.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.File.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.File.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.File.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.File.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.File.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.File.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.File.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.File.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.File.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.File.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.File.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.File.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.File.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.File.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.File.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.File.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.File.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.File.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.File.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.File.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.File.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.File.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.File.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.File.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.File.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.File.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.File.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.File.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.File.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.File.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.File.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.File.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.File.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.File.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.File.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.File.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.File.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.File.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.File.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.FileData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.FileData.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.FileData.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.FileData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.FileData.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.FileData.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.FileData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.FileData.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.FunctionCall.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.FunctionCall.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.FunctionCall.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.FunctionCall.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.FunctionCall.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.FunctionCall.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.FunctionCall.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.FunctionCall.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.FunctionCallingConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.FunctionCallingConfig.Mode.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.FunctionCallingConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.FunctionCallingConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.FunctionCallingConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.FunctionCallingConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.FunctionCallingConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.FunctionCallingConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.FunctionCallingConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.FunctionCallingConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.FunctionCallingConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.FunctionDeclaration.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.FunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.FunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.FunctionDeclaration.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.FunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.FunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.FunctionDeclaration.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.FunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.FunctionResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.FunctionResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.FunctionResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.FunctionResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.FunctionResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.FunctionResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.FunctionResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.FunctionResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.GenerateAnswerRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateAnswerRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateAnswerRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateAnswerRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateAnswerRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateAnswerRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateAnswerRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateAnswerRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateAnswerResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateAnswerResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateAnswerResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateAnswerResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateAnswerResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateAnswerResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateAnswerResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateAnswerResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateMessageRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateMessageRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateMessageRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateMessageRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateMessageRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateMessageRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateMessageRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateMessageRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateMessageResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateMessageResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateMessageResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateMessageResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateMessageResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateMessageResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateMessageResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateMessageResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerateTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerateTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerateTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerateTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerateTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerateTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerateTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerateTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GenerationConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GenerationConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GenerationConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetPermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetPermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetPermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetPermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetPermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetPermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetPermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetPermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GetTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GetTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GetTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GetTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GetTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GetTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GetTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GetTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GoogleSearchRetrieval.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GoogleSearchRetrieval.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GoogleSearchRetrieval.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GoogleSearchRetrieval.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GoogleSearchRetrieval.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GoogleSearchRetrieval.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GoogleSearchRetrieval.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GoogleSearchRetrieval.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingAttribution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingAttribution.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingAttribution.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingAttribution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingAttribution.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingAttribution.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingAttribution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingAttribution.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingChunk.Web.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingChunk.Web.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingChunk.Web.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingChunk.Web.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingChunk.Web.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingChunk.Web.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingChunk.Web.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingChunk.Web.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingChunk.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingChunk.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingPassage.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingPassage.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingPassage.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingPassage.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingPassage.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingPassage.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingPassage.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingPassage.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingPassages.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingPassages.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingPassages.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingPassages.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingPassages.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingPassages.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingPassages.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingPassages.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.GroundingSupport.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.GroundingSupport.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.GroundingSupport.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.GroundingSupport.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.GroundingSupport.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.GroundingSupport.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.GroundingSupport.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.GroundingSupport.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.HarmCategory.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.Hyperparameters.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Hyperparameters.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Hyperparameters.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Hyperparameters.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Hyperparameters.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Hyperparameters.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Hyperparameters.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Hyperparameters.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListCachedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListCachedContentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListCachedContentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListCachedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListCachedContentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListCachedContentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListCachedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListCachedContentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListCachedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListCachedContentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListCachedContentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListCachedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListCachedContentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListCachedContentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListCachedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListCachedContentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListCorporaRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListCorporaRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListCorporaRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListCorporaRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListCorporaRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListCorporaRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListCorporaRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListCorporaRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListCorporaResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListCorporaResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListCorporaResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListCorporaResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListCorporaResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListCorporaResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListCorporaResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListCorporaResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListDocumentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListDocumentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListDocumentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListDocumentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListDocumentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListDocumentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListDocumentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListDocumentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListDocumentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListDocumentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListDocumentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListDocumentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListDocumentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListDocumentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListDocumentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListDocumentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListFilesRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListFilesRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListFilesRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListFilesRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListFilesRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListFilesRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListFilesRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListFilesRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListFilesResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListFilesResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListFilesResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListFilesResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListFilesResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListFilesResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListFilesResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListFilesResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListModelsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListModelsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListModelsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListModelsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListModelsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListModelsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListModelsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListModelsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListModelsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListModelsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListPermissionsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListPermissionsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListPermissionsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListPermissionsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListPermissionsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListPermissionsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListPermissionsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListPermissionsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListPermissionsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListPermissionsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListPermissionsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListPermissionsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListPermissionsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListPermissionsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListPermissionsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListPermissionsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListTunedModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListTunedModelsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListTunedModelsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListTunedModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListTunedModelsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListTunedModelsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListTunedModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListTunedModelsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ListTunedModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ListTunedModelsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ListTunedModelsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ListTunedModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ListTunedModelsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ListTunedModelsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ListTunedModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ListTunedModelsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.LogprobsResult.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.LogprobsResult.Candidate.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.LogprobsResult.Candidate.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.LogprobsResult.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.LogprobsResult.Candidate.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.LogprobsResult.Candidate.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.LogprobsResult.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.LogprobsResult.Candidate.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.LogprobsResult.TopCandidates.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.LogprobsResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.LogprobsResult.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.LogprobsResult.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.LogprobsResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.LogprobsResult.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.LogprobsResult.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.LogprobsResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.LogprobsResult.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Message.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Message.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Message.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Message.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Message.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Message.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Message.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Message.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.MessagePrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.MessagePrompt.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.MessagePrompt.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.MessagePrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.MessagePrompt.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.MessagePrompt.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.MessagePrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.MessagePrompt.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.MetadataFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.MetadataFilter.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.MetadataFilter.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.MetadataFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.MetadataFilter.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.MetadataFilter.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.MetadataFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.MetadataFilter.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Model.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Model.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Model.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Model.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Model.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Model.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Model.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Model.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Part.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Part.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Part.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Part.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Part.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Part.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Part.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Part.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Permission.GranteeType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.Permission.GranteeType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.Permission.GranteeType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.Permission.GranteeType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.Permission.GranteeType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.Permission.GranteeType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.Permission.GranteeType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.Permission.GranteeType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.Permission.GranteeType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.Permission.GranteeType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.Permission.GranteeType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.Permission.GranteeType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.Permission.GranteeType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.Permission.GranteeType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.Permission.GranteeType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.Permission.GranteeType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.Permission.GranteeType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.Permission.GranteeType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.Permission.GranteeType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.Permission.GranteeType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.Permission.GranteeType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.Permission.GranteeType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.Permission.GranteeType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.Permission.GranteeType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.Permission.GranteeType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.Permission.GranteeType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.Permission.GranteeType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.Permission.GranteeType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.Permission.GranteeType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.Permission.GranteeType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.Permission.GranteeType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.Permission.GranteeType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.Permission.GranteeType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.Permission.GranteeType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.Permission.GranteeType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.Permission.GranteeType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.Permission.GranteeType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.Permission.GranteeType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.Permission.GranteeType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.Permission.GranteeType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.Permission.GranteeType.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.Permission.GranteeType.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.Permission.GranteeType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.Permission.GranteeType.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.Permission.GranteeType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.Permission.Role.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.Permission.Role.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.Permission.Role.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.Permission.Role.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.Permission.Role.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.Permission.Role.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.Permission.Role.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.Permission.Role.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.Permission.Role.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.Permission.Role.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.Permission.Role.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.Permission.Role.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.Permission.Role.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.Permission.Role.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.Permission.Role.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.Permission.Role.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.Permission.Role.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.Permission.Role.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.Permission.Role.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.Permission.Role.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.Permission.Role.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.Permission.Role.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.Permission.Role.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.Permission.Role.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.Permission.Role.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.Permission.Role.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.Permission.Role.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.Permission.Role.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.Permission.Role.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.Permission.Role.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.Permission.Role.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.Permission.Role.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.Permission.Role.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.Permission.Role.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.Permission.Role.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.Permission.Role.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.Permission.Role.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.Permission.Role.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.Permission.Role.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.Permission.Role.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.Permission.Role.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.Permission.Role.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.Permission.Role.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.Permission.Role.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.Permission.Role.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.Permission.Role.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.Permission.Role.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.Permission.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Permission.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Permission.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Permission.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Permission.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Permission.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Permission.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Permission.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.PredictRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.PredictRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.PredictRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.PredictRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.PredictRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.PredictRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.PredictRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.PredictRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.PredictResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.PredictResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.PredictResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.PredictResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.PredictResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.PredictResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.PredictResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.PredictResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.QueryCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.QueryCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.QueryCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.QueryCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.QueryCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.QueryCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.QueryCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.QueryCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.QueryCorpusResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.QueryCorpusResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.QueryCorpusResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.QueryCorpusResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.QueryCorpusResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.QueryCorpusResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.QueryCorpusResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.QueryCorpusResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.QueryDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.QueryDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.QueryDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.QueryDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.QueryDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.QueryDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.QueryDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.QueryDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.QueryDocumentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.QueryDocumentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.QueryDocumentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.QueryDocumentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.QueryDocumentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.QueryDocumentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.QueryDocumentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.QueryDocumentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.RelevantChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.RelevantChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.RelevantChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.RelevantChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.RelevantChunk.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.RelevantChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.RelevantChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.RelevantChunk.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.RetrievalMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.RetrievalMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.RetrievalMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.RetrievalMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.RetrievalMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.RetrievalMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.RetrievalMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.RetrievalMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.SafetyFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.SafetyFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.SafetyFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.SafetyFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.SafetyFeedback.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.SafetyFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.SafetyFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.SafetyFeedback.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.SafetyRating.HarmProbability": "google.generativeai.types.HarmProbability",
- "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.SafetyRating.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.SafetyRating.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.SafetyRating.HarmProbability.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.SafetyRating.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.SafetyRating.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.SafetyRating.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.SafetyRating.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.SafetyRating.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.SafetyRating.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.SafetyRating.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.SafetyRating.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.SafetyRating.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.SafetyRating.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold": "google.generativeai.types.HarmBlockThreshold",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.SafetySetting.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.SafetySetting.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.SafetySetting.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.SafetySetting.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.SafetySetting.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.SafetySetting.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.SafetySetting.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.SafetySetting.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Schema.PropertiesEntry.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Schema.PropertiesEntry.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Schema.PropertiesEntry.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Schema.PropertiesEntry.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Schema.PropertiesEntry.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Schema.PropertiesEntry.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Schema.PropertiesEntry.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Schema.PropertiesEntry.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Schema.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Schema.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Schema.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Schema.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Schema.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Schema.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Schema.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Schema.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.SearchEntryPoint.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.SearchEntryPoint.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.SearchEntryPoint.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.SearchEntryPoint.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.SearchEntryPoint.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.SearchEntryPoint.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.SearchEntryPoint.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.SearchEntryPoint.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Segment.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Segment.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Segment.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Segment.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Segment.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Segment.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Segment.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Segment.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.SemanticRetrieverConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.SemanticRetrieverConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.SemanticRetrieverConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.SemanticRetrieverConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.SemanticRetrieverConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.SemanticRetrieverConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.SemanticRetrieverConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.SemanticRetrieverConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.StringList.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.StringList.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.StringList.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.StringList.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.StringList.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.StringList.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.StringList.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.StringList.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TaskType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.TaskType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.TaskType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.TaskType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.TaskType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.TaskType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.TaskType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.TaskType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.TaskType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.TaskType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.TaskType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.TaskType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.TaskType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.TaskType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.TaskType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.TaskType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.TaskType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.TaskType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.TaskType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.TaskType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.TaskType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.TaskType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.TaskType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.TaskType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.TaskType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.TaskType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.TaskType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.TaskType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.TaskType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.TaskType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.TaskType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.TaskType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.TaskType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.TaskType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.TaskType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.TaskType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.TaskType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.TaskType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.TaskType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.TaskType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.TaskType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.TaskType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.TaskType.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.TaskType.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.TaskType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.TaskType.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.TaskType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.TextCompletion.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TextCompletion.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TextCompletion.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TextCompletion.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TextCompletion.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TextCompletion.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TextCompletion.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TextCompletion.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TextPrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TextPrompt.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TextPrompt.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TextPrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TextPrompt.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TextPrompt.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TextPrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TextPrompt.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Tool.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.Tool.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.Tool.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.Tool.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.Tool.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.Tool.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.Tool.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.Tool.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.ToolConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.ToolConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.ToolConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.ToolConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.ToolConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.ToolConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.ToolConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.ToolConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TransferOwnershipRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TransferOwnershipRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TransferOwnershipRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TransferOwnershipRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TransferOwnershipRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TransferOwnershipRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TransferOwnershipRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TransferOwnershipRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TransferOwnershipResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TransferOwnershipResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TransferOwnershipResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TransferOwnershipResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TransferOwnershipResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TransferOwnershipResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TransferOwnershipResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TransferOwnershipResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TunedModel.State": "google.generativeai.types.TunedModelState",
- "google.generativeai.protos.TunedModel.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.TunedModel.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.TunedModel.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.TunedModel.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.TunedModel.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.TunedModel.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.TunedModel.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.TunedModel.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.TunedModel.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.TunedModel.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.TunedModel.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.TunedModel.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.TunedModel.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.TunedModel.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.TunedModel.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.TunedModel.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.TunedModel.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.TunedModel.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.TunedModel.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.TunedModel.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.TunedModel.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.TunedModel.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.TunedModel.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.TunedModel.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.TunedModel.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.TunedModel.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.TunedModel.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.TunedModel.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.TunedModel.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.TunedModel.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.TunedModel.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.TunedModel.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.TunedModel.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.TunedModel.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.TunedModel.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.TunedModel.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.TunedModel.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.TunedModel.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.TunedModel.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.TunedModel.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.TunedModel.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.TunedModel.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.TunedModel.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.TunedModel.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.TunedModel.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.TunedModel.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.TunedModel.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.TunedModel.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TunedModel.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TunedModel.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TunedModel.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TunedModel.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TunedModel.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TunedModel.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TunedModel.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TunedModelSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TunedModelSource.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TunedModelSource.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TunedModelSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TunedModelSource.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TunedModelSource.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TunedModelSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TunedModelSource.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TuningExample.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TuningExample.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TuningExample.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TuningExample.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TuningExample.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TuningExample.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TuningExample.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TuningExample.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TuningExamples.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TuningExamples.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TuningExamples.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TuningExamples.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TuningExamples.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TuningExamples.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TuningExamples.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TuningExamples.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TuningSnapshot.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TuningSnapshot.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TuningSnapshot.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TuningSnapshot.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TuningSnapshot.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TuningSnapshot.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TuningSnapshot.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TuningSnapshot.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.TuningTask.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.TuningTask.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.TuningTask.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.TuningTask.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.TuningTask.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.TuningTask.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.TuningTask.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.TuningTask.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.Type.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.protos.Type.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.protos.Type.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.protos.Type.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.protos.Type.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.protos.Type.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.protos.Type.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.protos.Type.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.protos.Type.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.protos.Type.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.protos.Type.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.protos.Type.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.protos.Type.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.protos.Type.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.protos.Type.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.protos.Type.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.protos.Type.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.protos.Type.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.protos.Type.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.protos.Type.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.protos.Type.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.protos.Type.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.protos.Type.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.protos.Type.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.protos.Type.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.protos.Type.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.protos.Type.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.protos.Type.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.protos.Type.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.protos.Type.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.protos.Type.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.protos.Type.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.protos.Type.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.protos.Type.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.protos.Type.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.protos.Type.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.protos.Type.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.protos.Type.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.protos.Type.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.protos.Type.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.protos.Type.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.protos.Type.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.protos.Type.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.protos.Type.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.protos.Type.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.protos.Type.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.protos.Type.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.protos.UpdateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.UpdateCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.UpdateCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.UpdateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.UpdateCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.UpdateCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.UpdateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.UpdateCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.UpdateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.UpdateChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.UpdateChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.UpdateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.UpdateChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.UpdateChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.UpdateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.UpdateChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.UpdateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.UpdateCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.UpdateCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.UpdateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.UpdateCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.UpdateCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.UpdateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.UpdateCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.UpdateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.UpdateDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.UpdateDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.UpdateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.UpdateDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.UpdateDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.UpdateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.UpdateDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.UpdatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.UpdatePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.UpdatePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.UpdatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.UpdatePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.UpdatePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.UpdatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.UpdatePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.UpdateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.UpdateTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.UpdateTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.UpdateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.UpdateTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.UpdateTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.UpdateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.UpdateTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.protos.VideoMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
- "google.generativeai.protos.VideoMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.protos.VideoMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.protos.VideoMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
- "google.generativeai.protos.VideoMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.protos.VideoMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.protos.VideoMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
- "google.generativeai.protos.VideoMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.AsyncGenerateContentResponse.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.AsyncGenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.AsyncGenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.AsyncGenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.AsyncGenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.AsyncGenerateContentResponse.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.AsyncGenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.BlockedPromptException.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.BlockedPromptException.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.BlockedPromptException.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.BlockedPromptException.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.BlockedPromptException.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.BlockedPromptException.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.types.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.types.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.types.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.types.BlockedReason.__contains__": "google.generativeai.protos.ContentFilter.BlockedReason.__contains__",
- "google.generativeai.types.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.types.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.types.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.types.BlockedReason.__getitem__": "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__",
- "google.generativeai.types.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.types.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.types.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.types.BlockedReason.__iter__": "google.generativeai.protos.ContentFilter.BlockedReason.__iter__",
- "google.generativeai.types.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.types.BlockedReason.__len__": "google.generativeai.protos.ContentFilter.BlockedReason.__len__",
- "google.generativeai.types.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.types.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.types.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.types.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.types.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.types.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.types.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.types.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.types.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.types.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.types.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.types.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.types.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.types.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.types.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.types.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.types.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.types.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.types.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.types.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.types.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.types.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.types.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.types.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.types.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.types.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.types.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.types.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.types.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.types.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.types.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.types.BlockedReason.from_bytes": "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes",
- "google.generativeai.types.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.types.BlockedReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.types.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.types.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.types.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.types.BrokenResponseError.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.BrokenResponseError.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.BrokenResponseError.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.BrokenResponseError.__init__": "google.generativeai.types.BlockedPromptException.__init__",
- "google.generativeai.types.BrokenResponseError.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.BrokenResponseError.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.BrokenResponseError.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.BrokenResponseError.__new__": "google.generativeai.types.BlockedPromptException.__new__",
- "google.generativeai.types.BrokenResponseError.add_note": "google.generativeai.types.BlockedPromptException.add_note",
- "google.generativeai.types.BrokenResponseError.args": "google.generativeai.types.BlockedPromptException.args",
- "google.generativeai.types.BrokenResponseError.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback",
- "google.generativeai.types.CallableFunctionDeclaration.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.CallableFunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.CallableFunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.CallableFunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.CallableFunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.CallableFunctionDeclaration.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.CallableFunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.CallableFunctionDeclaration.description": "google.generativeai.types.FunctionDeclaration.description",
- "google.generativeai.types.CallableFunctionDeclaration.from_function": "google.generativeai.types.FunctionDeclaration.from_function",
- "google.generativeai.types.CallableFunctionDeclaration.name": "google.generativeai.types.FunctionDeclaration.name",
- "google.generativeai.types.CallableFunctionDeclaration.parameters": "google.generativeai.types.FunctionDeclaration.parameters",
- "google.generativeai.types.CallableFunctionDeclaration.to_proto": "google.generativeai.types.FunctionDeclaration.to_proto",
- "google.generativeai.types.CitationMetadataDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.CitationMetadataDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.CitationMetadataDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.CitationMetadataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.CitationMetadataDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.CitationMetadataDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.CitationMetadataDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.CitationMetadataDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.CitationMetadataDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.CitationMetadataDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.CitationMetadataDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.CitationMetadataDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.CitationMetadataDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.CitationMetadataDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.CitationMetadataDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.CitationMetadataDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.CitationMetadataDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.CitationMetadataDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.CitationMetadataDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.CitationMetadataDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.CitationMetadataDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.CitationMetadataDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.CitationMetadataDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.CitationMetadataDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.CitationSourceDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.CitationSourceDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.CitationSourceDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.CitationSourceDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.CitationSourceDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.CitationSourceDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.CitationSourceDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.CitationSourceDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.CitationSourceDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.CitationSourceDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.CitationSourceDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.CitationSourceDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.CitationSourceDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.CitationSourceDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.CitationSourceDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.CitationSourceDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.CitationSourceDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.CitationSourceDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.CitationSourceDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.CitationSourceDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.CitationSourceDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.CitationSourceDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.CitationSourceDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.CitationSourceDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.ContentDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.ContentDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.ContentDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.ContentDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.ContentDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.ContentDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.ContentDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.ContentDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.ContentDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.ContentDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.ContentDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.ContentDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.ContentDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.ContentDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.ContentDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.ContentDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.ContentDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.ContentDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.ContentDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.ContentDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.ContentDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.ContentDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.ContentDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.ContentDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.ContentFilterDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.ContentFilterDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.ContentFilterDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.ContentFilterDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.ContentFilterDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.ContentFilterDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.ContentFilterDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.ContentFilterDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.ContentFilterDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.ContentFilterDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.ContentFilterDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.ContentFilterDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.ContentFilterDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.ContentFilterDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.ContentFilterDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.ContentFilterDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.ContentFilterDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.ContentFilterDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.ContentFilterDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.ContentFilterDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.ContentFilterDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.ContentFilterDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.ContentFilterDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.ContentFilterDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.File.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.File.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.File.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.File.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.File.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.File.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.File.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.FileDataDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.FileDataDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.FileDataDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.FileDataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.FileDataDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.FileDataDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.FileDataDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.FileDataDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.FileDataDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.FileDataDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.FileDataDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.FileDataDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.FileDataDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.FileDataDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.FileDataDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.FileDataDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.FileDataDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.FileDataDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.FileDataDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.FileDataDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.FileDataDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.FileDataDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.FileDataDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.FileDataDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.FunctionDeclaration.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.FunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.FunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.FunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.FunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.FunctionDeclaration.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.FunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.FunctionLibrary.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.FunctionLibrary.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.FunctionLibrary.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.FunctionLibrary.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.FunctionLibrary.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.FunctionLibrary.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.FunctionLibrary.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.GenerateContentResponse.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.GenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.GenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.GenerateContentResponse.__init__": "google.generativeai.types.AsyncGenerateContentResponse.__init__",
- "google.generativeai.types.GenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.GenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.GenerateContentResponse.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.GenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.GenerateContentResponse.candidates": "google.generativeai.types.AsyncGenerateContentResponse.candidates",
- "google.generativeai.types.GenerateContentResponse.parts": "google.generativeai.types.AsyncGenerateContentResponse.parts",
- "google.generativeai.types.GenerateContentResponse.prompt_feedback": "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback",
- "google.generativeai.types.GenerateContentResponse.text": "google.generativeai.types.AsyncGenerateContentResponse.text",
- "google.generativeai.types.GenerateContentResponse.to_dict": "google.generativeai.types.AsyncGenerateContentResponse.to_dict",
- "google.generativeai.types.GenerateContentResponse.usage_metadata": "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata",
- "google.generativeai.types.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.GenerationConfig.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.GenerationConfigDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.GenerationConfigDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.GenerationConfigDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.GenerationConfigDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.GenerationConfigDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.GenerationConfigDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.GenerationConfigDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.GenerationConfigDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.GenerationConfigDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.GenerationConfigDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.GenerationConfigDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.GenerationConfigDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.GenerationConfigDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.GenerationConfigDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.GenerationConfigDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.GenerationConfigDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.GenerationConfigDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.GenerationConfigDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.GenerationConfigDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.GenerationConfigDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.GenerationConfigDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.GenerationConfigDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.GenerationConfigDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.GenerationConfigDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.types.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.types.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.types.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.types.HarmBlockThreshold.__contains__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__",
- "google.generativeai.types.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.types.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.types.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.types.HarmBlockThreshold.__getitem__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__",
- "google.generativeai.types.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.types.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.types.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.types.HarmBlockThreshold.__iter__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__",
- "google.generativeai.types.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.types.HarmBlockThreshold.__len__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__",
- "google.generativeai.types.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.types.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.types.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.types.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.types.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.types.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.types.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.types.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.types.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.types.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.types.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.types.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.types.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.types.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.types.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.types.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.types.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.types.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.types.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.types.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.types.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.types.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.types.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.types.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.types.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.types.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.types.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.types.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.types.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.types.HarmBlockThreshold.from_bytes": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes",
- "google.generativeai.types.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.types.HarmBlockThreshold.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.types.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.types.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.types.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.types.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.types.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.types.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.types.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.types.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.types.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.types.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.types.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.types.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.types.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.types.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.types.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.types.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.types.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.types.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.types.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.types.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.types.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.types.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.types.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.types.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.types.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.types.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.types.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.types.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.types.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.types.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.types.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.types.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.types.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.types.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.types.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.types.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.types.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.types.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.types.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.types.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.types.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.types.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.types.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.types.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.types.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.types.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.types.HarmCategory.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.types.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.types.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.types.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.types.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.types.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.types.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.types.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.types.HarmProbability.__contains__": "google.generativeai.protos.SafetyRating.HarmProbability.__contains__",
- "google.generativeai.types.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.types.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.types.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.types.HarmProbability.__getitem__": "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__",
- "google.generativeai.types.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.types.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.types.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.types.HarmProbability.__iter__": "google.generativeai.protos.SafetyRating.HarmProbability.__iter__",
- "google.generativeai.types.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.types.HarmProbability.__len__": "google.generativeai.protos.SafetyRating.HarmProbability.__len__",
- "google.generativeai.types.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.types.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.types.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.types.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.types.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.types.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.types.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.types.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.types.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.types.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.types.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.types.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.types.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.types.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.types.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.types.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.types.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.types.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.types.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.types.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.types.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.types.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.types.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.types.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.types.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.types.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.types.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.types.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.types.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.types.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.types.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.types.HarmProbability.from_bytes": "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes",
- "google.generativeai.types.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.types.HarmProbability.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.types.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.types.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.types.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.types.IncompleteIterationError.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.IncompleteIterationError.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.IncompleteIterationError.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.IncompleteIterationError.__init__": "google.generativeai.types.BlockedPromptException.__init__",
- "google.generativeai.types.IncompleteIterationError.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.IncompleteIterationError.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.IncompleteIterationError.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.IncompleteIterationError.__new__": "google.generativeai.types.BlockedPromptException.__new__",
- "google.generativeai.types.IncompleteIterationError.add_note": "google.generativeai.types.BlockedPromptException.add_note",
- "google.generativeai.types.IncompleteIterationError.args": "google.generativeai.types.BlockedPromptException.args",
- "google.generativeai.types.IncompleteIterationError.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback",
- "google.generativeai.types.Model.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.Model.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.Model.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.Model.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.Model.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.Model.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.ModelNameOptions": "google.generativeai.types.AnyModelNameOptions",
- "google.generativeai.types.PartDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.PartDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.PartDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.PartDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.PartDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.PartDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.PartDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.PartDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.PartDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.PartDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.PartDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.PartDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.PartDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.PartDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.PartDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.PartDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.PartDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.PartDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.PartDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.PartDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.PartDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.PartDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.PartDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.PartDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.Permission.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.Permission.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.Permission.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.Permission.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.Permission.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.Permission.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.Permissions.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.Permissions.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.Permissions.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.Permissions.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.Permissions.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.Permissions.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.Permissions.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.RequestOptions.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.RequestOptions.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.RequestOptions.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.RequestOptions.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.RequestOptions.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.RequestOptions.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.SafetyFeedbackDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.SafetyFeedbackDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.SafetyFeedbackDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.SafetyFeedbackDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.SafetyFeedbackDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.SafetyFeedbackDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.SafetyFeedbackDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.SafetyFeedbackDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.SafetyFeedbackDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.SafetyFeedbackDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.SafetyFeedbackDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.SafetyFeedbackDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.SafetyFeedbackDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.SafetyFeedbackDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.SafetyFeedbackDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.SafetyFeedbackDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.SafetyFeedbackDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.SafetyFeedbackDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.SafetyFeedbackDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.SafetyFeedbackDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.SafetyFeedbackDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.SafetyFeedbackDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.SafetyFeedbackDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.SafetyFeedbackDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.SafetyRatingDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.SafetyRatingDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.SafetyRatingDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.SafetyRatingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.SafetyRatingDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.SafetyRatingDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.SafetyRatingDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.SafetyRatingDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.SafetyRatingDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.SafetyRatingDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.SafetyRatingDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.SafetyRatingDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.SafetyRatingDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.SafetyRatingDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.SafetyRatingDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.SafetyRatingDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.SafetyRatingDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.SafetyRatingDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.SafetyRatingDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.SafetyRatingDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.SafetyRatingDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.SafetyRatingDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.SafetyRatingDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.SafetyRatingDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.SafetySettingDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.SafetySettingDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.SafetySettingDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.SafetySettingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.SafetySettingDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.SafetySettingDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.SafetySettingDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.SafetySettingDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.SafetySettingDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.SafetySettingDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.SafetySettingDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.SafetySettingDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.SafetySettingDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.SafetySettingDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.SafetySettingDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.SafetySettingDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.SafetySettingDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.SafetySettingDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.SafetySettingDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.SafetySettingDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.SafetySettingDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.SafetySettingDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.SafetySettingDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.SafetySettingDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.StopCandidateException.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.StopCandidateException.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.StopCandidateException.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.StopCandidateException.__init__": "google.generativeai.types.BlockedPromptException.__init__",
- "google.generativeai.types.StopCandidateException.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.StopCandidateException.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.StopCandidateException.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.StopCandidateException.__new__": "google.generativeai.types.BlockedPromptException.__new__",
- "google.generativeai.types.StopCandidateException.add_note": "google.generativeai.types.BlockedPromptException.add_note",
- "google.generativeai.types.StopCandidateException.args": "google.generativeai.types.BlockedPromptException.args",
- "google.generativeai.types.StopCandidateException.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback",
- "google.generativeai.types.Tool.__eq__": "google.generativeai.caching.CachedContent.__eq__",
- "google.generativeai.types.Tool.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.Tool.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.Tool.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.Tool.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.Tool.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.Tool.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.ToolDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
- "google.generativeai.types.ToolDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
- "google.generativeai.types.ToolDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
- "google.generativeai.types.ToolDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
- "google.generativeai.types.ToolDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
- "google.generativeai.types.ToolDict.__init__": "google.generativeai.types.BlobDict.__init__",
- "google.generativeai.types.ToolDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
- "google.generativeai.types.ToolDict.__le__": "google.generativeai.types.BlobDict.__le__",
- "google.generativeai.types.ToolDict.__len__": "google.generativeai.types.BlobDict.__len__",
- "google.generativeai.types.ToolDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
- "google.generativeai.types.ToolDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
- "google.generativeai.types.ToolDict.__new__": "google.generativeai.types.BlobDict.__new__",
- "google.generativeai.types.ToolDict.__or__": "google.generativeai.types.BlobDict.__or__",
- "google.generativeai.types.ToolDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
- "google.generativeai.types.ToolDict.clear": "google.generativeai.types.BlobDict.clear",
- "google.generativeai.types.ToolDict.copy": "google.generativeai.types.BlobDict.copy",
- "google.generativeai.types.ToolDict.get": "google.generativeai.types.BlobDict.get",
- "google.generativeai.types.ToolDict.items": "google.generativeai.types.BlobDict.items",
- "google.generativeai.types.ToolDict.keys": "google.generativeai.types.BlobDict.keys",
- "google.generativeai.types.ToolDict.pop": "google.generativeai.types.BlobDict.pop",
- "google.generativeai.types.ToolDict.popitem": "google.generativeai.types.BlobDict.popitem",
- "google.generativeai.types.ToolDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
- "google.generativeai.types.ToolDict.update": "google.generativeai.types.BlobDict.update",
- "google.generativeai.types.ToolDict.values": "google.generativeai.types.BlobDict.values",
- "google.generativeai.types.TunedModel.__ge__": "google.generativeai.caching.CachedContent.__ge__",
- "google.generativeai.types.TunedModel.__gt__": "google.generativeai.caching.CachedContent.__gt__",
- "google.generativeai.types.TunedModel.__le__": "google.generativeai.caching.CachedContent.__le__",
- "google.generativeai.types.TunedModel.__lt__": "google.generativeai.caching.CachedContent.__lt__",
- "google.generativeai.types.TunedModel.__ne__": "google.generativeai.caching.CachedContent.__ne__",
- "google.generativeai.types.TunedModel.__new__": "google.generativeai.caching.CachedContent.__new__",
- "google.generativeai.types.TunedModelState.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
- "google.generativeai.types.TunedModelState.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
- "google.generativeai.types.TunedModelState.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
- "google.generativeai.types.TunedModelState.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
- "google.generativeai.types.TunedModelState.__contains__": "google.generativeai.protos.TunedModel.State.__contains__",
- "google.generativeai.types.TunedModelState.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
- "google.generativeai.types.TunedModelState.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
- "google.generativeai.types.TunedModelState.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
- "google.generativeai.types.TunedModelState.__getitem__": "google.generativeai.protos.TunedModel.State.__getitem__",
- "google.generativeai.types.TunedModelState.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
- "google.generativeai.types.TunedModelState.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
- "google.generativeai.types.TunedModelState.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
- "google.generativeai.types.TunedModelState.__iter__": "google.generativeai.protos.TunedModel.State.__iter__",
- "google.generativeai.types.TunedModelState.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
- "google.generativeai.types.TunedModelState.__len__": "google.generativeai.protos.TunedModel.State.__len__",
- "google.generativeai.types.TunedModelState.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
- "google.generativeai.types.TunedModelState.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
- "google.generativeai.types.TunedModelState.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
- "google.generativeai.types.TunedModelState.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
- "google.generativeai.types.TunedModelState.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
- "google.generativeai.types.TunedModelState.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
- "google.generativeai.types.TunedModelState.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
- "google.generativeai.types.TunedModelState.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
- "google.generativeai.types.TunedModelState.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
- "google.generativeai.types.TunedModelState.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
- "google.generativeai.types.TunedModelState.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
- "google.generativeai.types.TunedModelState.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
- "google.generativeai.types.TunedModelState.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
- "google.generativeai.types.TunedModelState.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
- "google.generativeai.types.TunedModelState.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
- "google.generativeai.types.TunedModelState.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
- "google.generativeai.types.TunedModelState.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
- "google.generativeai.types.TunedModelState.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
- "google.generativeai.types.TunedModelState.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
- "google.generativeai.types.TunedModelState.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
- "google.generativeai.types.TunedModelState.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
- "google.generativeai.types.TunedModelState.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
- "google.generativeai.types.TunedModelState.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
- "google.generativeai.types.TunedModelState.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
- "google.generativeai.types.TunedModelState.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
- "google.generativeai.types.TunedModelState.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
- "google.generativeai.types.TunedModelState.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
- "google.generativeai.types.TunedModelState.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
- "google.generativeai.types.TunedModelState.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
- "google.generativeai.types.TunedModelState.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
- "google.generativeai.types.TunedModelState.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
- "google.generativeai.types.TunedModelState.from_bytes": "google.generativeai.protos.TunedModel.State.from_bytes",
- "google.generativeai.types.TunedModelState.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
- "google.generativeai.types.TunedModelState.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
- "google.generativeai.types.TunedModelState.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
- "google.generativeai.types.TunedModelState.real": "google.generativeai.protos.Candidate.FinishReason.real",
- "google.generativeai.types.TunedModelState.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
- "google.generativeai.types.annotations": "google.generativeai.caching.annotations"
- },
- "is_fragment": {
- "google.generativeai": false,
- "google.generativeai.ChatSession": false,
- "google.generativeai.ChatSession.__eq__": true,
- "google.generativeai.ChatSession.__ge__": true,
- "google.generativeai.ChatSession.__gt__": true,
- "google.generativeai.ChatSession.__init__": true,
- "google.generativeai.ChatSession.__le__": true,
- "google.generativeai.ChatSession.__lt__": true,
- "google.generativeai.ChatSession.__ne__": true,
- "google.generativeai.ChatSession.__new__": true,
- "google.generativeai.ChatSession.history": true,
- "google.generativeai.ChatSession.last": true,
- "google.generativeai.ChatSession.rewind": true,
- "google.generativeai.ChatSession.send_message": true,
- "google.generativeai.ChatSession.send_message_async": true,
- "google.generativeai.GenerationConfig": false,
- "google.generativeai.GenerationConfig.__eq__": true,
- "google.generativeai.GenerationConfig.__ge__": true,
- "google.generativeai.GenerationConfig.__gt__": true,
- "google.generativeai.GenerationConfig.__init__": true,
- "google.generativeai.GenerationConfig.__le__": true,
- "google.generativeai.GenerationConfig.__lt__": true,
- "google.generativeai.GenerationConfig.__ne__": true,
- "google.generativeai.GenerationConfig.__new__": true,
- "google.generativeai.GenerationConfig.candidate_count": true,
- "google.generativeai.GenerationConfig.frequency_penalty": true,
- "google.generativeai.GenerationConfig.logprobs": true,
- "google.generativeai.GenerationConfig.max_output_tokens": true,
- "google.generativeai.GenerationConfig.presence_penalty": true,
- "google.generativeai.GenerationConfig.response_logprobs": true,
- "google.generativeai.GenerationConfig.response_mime_type": true,
- "google.generativeai.GenerationConfig.response_schema": true,
- "google.generativeai.GenerationConfig.seed": true,
- "google.generativeai.GenerationConfig.stop_sequences": true,
- "google.generativeai.GenerationConfig.temperature": true,
- "google.generativeai.GenerationConfig.top_k": true,
- "google.generativeai.GenerationConfig.top_p": true,
- "google.generativeai.GenerativeModel": false,
- "google.generativeai.GenerativeModel.__eq__": true,
- "google.generativeai.GenerativeModel.__ge__": true,
- "google.generativeai.GenerativeModel.__gt__": true,
- "google.generativeai.GenerativeModel.__init__": true,
- "google.generativeai.GenerativeModel.__le__": true,
- "google.generativeai.GenerativeModel.__lt__": true,
- "google.generativeai.GenerativeModel.__ne__": true,
- "google.generativeai.GenerativeModel.__new__": true,
- "google.generativeai.GenerativeModel.cached_content": true,
- "google.generativeai.GenerativeModel.count_tokens": true,
- "google.generativeai.GenerativeModel.count_tokens_async": true,
- "google.generativeai.GenerativeModel.from_cached_content": true,
- "google.generativeai.GenerativeModel.generate_content": true,
- "google.generativeai.GenerativeModel.generate_content_async": true,
- "google.generativeai.GenerativeModel.model_name": true,
- "google.generativeai.GenerativeModel.start_chat": true,
- "google.generativeai.__version__": true,
- "google.generativeai.annotations": true,
- "google.generativeai.caching": false,
- "google.generativeai.caching.CachedContent": false,
- "google.generativeai.caching.CachedContent.__eq__": true,
- "google.generativeai.caching.CachedContent.__ge__": true,
- "google.generativeai.caching.CachedContent.__gt__": true,
- "google.generativeai.caching.CachedContent.__init__": true,
- "google.generativeai.caching.CachedContent.__le__": true,
- "google.generativeai.caching.CachedContent.__lt__": true,
- "google.generativeai.caching.CachedContent.__ne__": true,
- "google.generativeai.caching.CachedContent.__new__": true,
- "google.generativeai.caching.CachedContent.create": true,
- "google.generativeai.caching.CachedContent.create_time": true,
- "google.generativeai.caching.CachedContent.delete": true,
- "google.generativeai.caching.CachedContent.display_name": true,
- "google.generativeai.caching.CachedContent.expire_time": true,
- "google.generativeai.caching.CachedContent.get": true,
- "google.generativeai.caching.CachedContent.list": true,
- "google.generativeai.caching.CachedContent.model": true,
- "google.generativeai.caching.CachedContent.name": true,
- "google.generativeai.caching.CachedContent.update": true,
- "google.generativeai.caching.CachedContent.update_time": true,
- "google.generativeai.caching.CachedContent.usage_metadata": true,
- "google.generativeai.caching.annotations": true,
- "google.generativeai.caching.get_default_cache_client": false,
- "google.generativeai.configure": false,
- "google.generativeai.create_tuned_model": false,
- "google.generativeai.delete_file": false,
- "google.generativeai.delete_tuned_model": false,
- "google.generativeai.embed_content": false,
- "google.generativeai.embed_content_async": false,
- "google.generativeai.get_base_model": false,
- "google.generativeai.get_file": false,
- "google.generativeai.get_model": false,
- "google.generativeai.get_operation": false,
- "google.generativeai.get_tuned_model": false,
- "google.generativeai.list_files": false,
- "google.generativeai.list_models": false,
- "google.generativeai.list_operations": false,
- "google.generativeai.list_tuned_models": false,
- "google.generativeai.protos": false,
- "google.generativeai.protos.AttributionSourceId": false,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId": false,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__call__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__or__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ror__": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.mro": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.part_index": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.passage_id": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": true,
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": false,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__call__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__or__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ror__": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.chunk": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.mro": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.source": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": true,
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": true,
- "google.generativeai.protos.AttributionSourceId.__call__": true,
- "google.generativeai.protos.AttributionSourceId.__eq__": true,
- "google.generativeai.protos.AttributionSourceId.__ge__": true,
- "google.generativeai.protos.AttributionSourceId.__gt__": true,
- "google.generativeai.protos.AttributionSourceId.__init__": true,
- "google.generativeai.protos.AttributionSourceId.__le__": true,
- "google.generativeai.protos.AttributionSourceId.__lt__": true,
- "google.generativeai.protos.AttributionSourceId.__ne__": true,
- "google.generativeai.protos.AttributionSourceId.__new__": true,
- "google.generativeai.protos.AttributionSourceId.__or__": true,
- "google.generativeai.protos.AttributionSourceId.__ror__": true,
- "google.generativeai.protos.AttributionSourceId.copy_from": true,
- "google.generativeai.protos.AttributionSourceId.deserialize": true,
- "google.generativeai.protos.AttributionSourceId.from_json": true,
- "google.generativeai.protos.AttributionSourceId.grounding_passage": true,
- "google.generativeai.protos.AttributionSourceId.mro": true,
- "google.generativeai.protos.AttributionSourceId.pb": true,
- "google.generativeai.protos.AttributionSourceId.semantic_retriever_chunk": true,
- "google.generativeai.protos.AttributionSourceId.serialize": true,
- "google.generativeai.protos.AttributionSourceId.to_dict": true,
- "google.generativeai.protos.AttributionSourceId.to_json": true,
- "google.generativeai.protos.AttributionSourceId.wrap": true,
- "google.generativeai.protos.BatchCreateChunksRequest": false,
- "google.generativeai.protos.BatchCreateChunksRequest.__call__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__eq__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__ge__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__gt__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__init__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__le__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__lt__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__ne__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__new__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__or__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.__ror__": true,
- "google.generativeai.protos.BatchCreateChunksRequest.copy_from": true,
- "google.generativeai.protos.BatchCreateChunksRequest.deserialize": true,
- "google.generativeai.protos.BatchCreateChunksRequest.from_json": true,
- "google.generativeai.protos.BatchCreateChunksRequest.mro": true,
- "google.generativeai.protos.BatchCreateChunksRequest.parent": true,
- "google.generativeai.protos.BatchCreateChunksRequest.pb": true,
- "google.generativeai.protos.BatchCreateChunksRequest.requests": true,
- "google.generativeai.protos.BatchCreateChunksRequest.serialize": true,
- "google.generativeai.protos.BatchCreateChunksRequest.to_dict": true,
- "google.generativeai.protos.BatchCreateChunksRequest.to_json": true,
- "google.generativeai.protos.BatchCreateChunksRequest.wrap": true,
- "google.generativeai.protos.BatchCreateChunksResponse": false,
- "google.generativeai.protos.BatchCreateChunksResponse.__call__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__eq__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__ge__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__gt__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__init__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__le__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__lt__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__ne__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__new__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__or__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.__ror__": true,
- "google.generativeai.protos.BatchCreateChunksResponse.chunks": true,
- "google.generativeai.protos.BatchCreateChunksResponse.copy_from": true,
- "google.generativeai.protos.BatchCreateChunksResponse.deserialize": true,
- "google.generativeai.protos.BatchCreateChunksResponse.from_json": true,
- "google.generativeai.protos.BatchCreateChunksResponse.mro": true,
- "google.generativeai.protos.BatchCreateChunksResponse.pb": true,
- "google.generativeai.protos.BatchCreateChunksResponse.serialize": true,
- "google.generativeai.protos.BatchCreateChunksResponse.to_dict": true,
- "google.generativeai.protos.BatchCreateChunksResponse.to_json": true,
- "google.generativeai.protos.BatchCreateChunksResponse.wrap": true,
- "google.generativeai.protos.BatchDeleteChunksRequest": false,
- "google.generativeai.protos.BatchDeleteChunksRequest.__call__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__init__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__le__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__new__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__or__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.__ror__": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.from_json": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.mro": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.parent": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.pb": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.requests": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.serialize": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.to_json": true,
- "google.generativeai.protos.BatchDeleteChunksRequest.wrap": true,
- "google.generativeai.protos.BatchEmbedContentsRequest": false,
- "google.generativeai.protos.BatchEmbedContentsRequest.__call__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__init__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__le__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__new__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__or__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.__ror__": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.from_json": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.model": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.mro": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.pb": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.requests": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.serialize": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.to_json": true,
- "google.generativeai.protos.BatchEmbedContentsRequest.wrap": true,
- "google.generativeai.protos.BatchEmbedContentsResponse": false,
- "google.generativeai.protos.BatchEmbedContentsResponse.__call__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__init__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__le__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__new__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__or__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.__ror__": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.embeddings": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.from_json": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.mro": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.pb": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.serialize": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.to_json": true,
- "google.generativeai.protos.BatchEmbedContentsResponse.wrap": true,
- "google.generativeai.protos.BatchEmbedTextRequest": false,
- "google.generativeai.protos.BatchEmbedTextRequest.__call__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__eq__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__ge__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__gt__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__init__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__le__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__lt__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__ne__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__new__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__or__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.__ror__": true,
- "google.generativeai.protos.BatchEmbedTextRequest.copy_from": true,
- "google.generativeai.protos.BatchEmbedTextRequest.deserialize": true,
- "google.generativeai.protos.BatchEmbedTextRequest.from_json": true,
- "google.generativeai.protos.BatchEmbedTextRequest.model": true,
- "google.generativeai.protos.BatchEmbedTextRequest.mro": true,
- "google.generativeai.protos.BatchEmbedTextRequest.pb": true,
- "google.generativeai.protos.BatchEmbedTextRequest.requests": true,
- "google.generativeai.protos.BatchEmbedTextRequest.serialize": true,
- "google.generativeai.protos.BatchEmbedTextRequest.texts": true,
- "google.generativeai.protos.BatchEmbedTextRequest.to_dict": true,
- "google.generativeai.protos.BatchEmbedTextRequest.to_json": true,
- "google.generativeai.protos.BatchEmbedTextRequest.wrap": true,
- "google.generativeai.protos.BatchEmbedTextResponse": false,
- "google.generativeai.protos.BatchEmbedTextResponse.__call__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__eq__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__ge__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__gt__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__init__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__le__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__lt__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__ne__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__new__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__or__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.__ror__": true,
- "google.generativeai.protos.BatchEmbedTextResponse.copy_from": true,
- "google.generativeai.protos.BatchEmbedTextResponse.deserialize": true,
- "google.generativeai.protos.BatchEmbedTextResponse.embeddings": true,
- "google.generativeai.protos.BatchEmbedTextResponse.from_json": true,
- "google.generativeai.protos.BatchEmbedTextResponse.mro": true,
- "google.generativeai.protos.BatchEmbedTextResponse.pb": true,
- "google.generativeai.protos.BatchEmbedTextResponse.serialize": true,
- "google.generativeai.protos.BatchEmbedTextResponse.to_dict": true,
- "google.generativeai.protos.BatchEmbedTextResponse.to_json": true,
- "google.generativeai.protos.BatchEmbedTextResponse.wrap": true,
- "google.generativeai.protos.BatchUpdateChunksRequest": false,
- "google.generativeai.protos.BatchUpdateChunksRequest.__call__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__init__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__le__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__new__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__or__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.__ror__": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.from_json": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.mro": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.parent": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.pb": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.requests": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.serialize": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.to_json": true,
- "google.generativeai.protos.BatchUpdateChunksRequest.wrap": true,
- "google.generativeai.protos.BatchUpdateChunksResponse": false,
- "google.generativeai.protos.BatchUpdateChunksResponse.__call__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__init__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__le__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__new__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__or__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.__ror__": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.chunks": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.from_json": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.mro": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.pb": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.serialize": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.to_json": true,
- "google.generativeai.protos.BatchUpdateChunksResponse.wrap": true,
- "google.generativeai.protos.Blob": false,
- "google.generativeai.protos.Blob.__call__": true,
- "google.generativeai.protos.Blob.__eq__": true,
- "google.generativeai.protos.Blob.__ge__": true,
- "google.generativeai.protos.Blob.__gt__": true,
- "google.generativeai.protos.Blob.__init__": true,
- "google.generativeai.protos.Blob.__le__": true,
- "google.generativeai.protos.Blob.__lt__": true,
- "google.generativeai.protos.Blob.__ne__": true,
- "google.generativeai.protos.Blob.__new__": true,
- "google.generativeai.protos.Blob.__or__": true,
- "google.generativeai.protos.Blob.__ror__": true,
- "google.generativeai.protos.Blob.copy_from": true,
- "google.generativeai.protos.Blob.data": true,
- "google.generativeai.protos.Blob.deserialize": true,
- "google.generativeai.protos.Blob.from_json": true,
- "google.generativeai.protos.Blob.mime_type": true,
- "google.generativeai.protos.Blob.mro": true,
- "google.generativeai.protos.Blob.pb": true,
- "google.generativeai.protos.Blob.serialize": true,
- "google.generativeai.protos.Blob.to_dict": true,
- "google.generativeai.protos.Blob.to_json": true,
- "google.generativeai.protos.Blob.wrap": true,
- "google.generativeai.protos.CachedContent": false,
- "google.generativeai.protos.CachedContent.UsageMetadata": false,
- "google.generativeai.protos.CachedContent.UsageMetadata.__call__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__init__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__le__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__new__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__or__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.__ror__": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.from_json": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.mro": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.pb": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.serialize": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.to_json": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.total_token_count": true,
- "google.generativeai.protos.CachedContent.UsageMetadata.wrap": true,
- "google.generativeai.protos.CachedContent.__call__": true,
- "google.generativeai.protos.CachedContent.__eq__": true,
- "google.generativeai.protos.CachedContent.__ge__": true,
- "google.generativeai.protos.CachedContent.__gt__": true,
- "google.generativeai.protos.CachedContent.__init__": true,
- "google.generativeai.protos.CachedContent.__le__": true,
- "google.generativeai.protos.CachedContent.__lt__": true,
- "google.generativeai.protos.CachedContent.__ne__": true,
- "google.generativeai.protos.CachedContent.__new__": true,
- "google.generativeai.protos.CachedContent.__or__": true,
- "google.generativeai.protos.CachedContent.__ror__": true,
- "google.generativeai.protos.CachedContent.contents": true,
- "google.generativeai.protos.CachedContent.copy_from": true,
- "google.generativeai.protos.CachedContent.create_time": true,
- "google.generativeai.protos.CachedContent.deserialize": true,
- "google.generativeai.protos.CachedContent.display_name": true,
- "google.generativeai.protos.CachedContent.expire_time": true,
- "google.generativeai.protos.CachedContent.from_json": true,
- "google.generativeai.protos.CachedContent.model": true,
- "google.generativeai.protos.CachedContent.mro": true,
- "google.generativeai.protos.CachedContent.name": true,
- "google.generativeai.protos.CachedContent.pb": true,
- "google.generativeai.protos.CachedContent.serialize": true,
- "google.generativeai.protos.CachedContent.system_instruction": true,
- "google.generativeai.protos.CachedContent.to_dict": true,
- "google.generativeai.protos.CachedContent.to_json": true,
- "google.generativeai.protos.CachedContent.tool_config": true,
- "google.generativeai.protos.CachedContent.tools": true,
- "google.generativeai.protos.CachedContent.ttl": true,
- "google.generativeai.protos.CachedContent.update_time": true,
- "google.generativeai.protos.CachedContent.usage_metadata": true,
- "google.generativeai.protos.CachedContent.wrap": true,
- "google.generativeai.protos.Candidate": false,
- "google.generativeai.protos.Candidate.FinishReason": false,
- "google.generativeai.protos.Candidate.FinishReason.BLOCKLIST": true,
- "google.generativeai.protos.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED": true,
- "google.generativeai.protos.Candidate.FinishReason.LANGUAGE": true,
- "google.generativeai.protos.Candidate.FinishReason.MALFORMED_FUNCTION_CALL": true,
- "google.generativeai.protos.Candidate.FinishReason.MAX_TOKENS": true,
- "google.generativeai.protos.Candidate.FinishReason.OTHER": true,
- "google.generativeai.protos.Candidate.FinishReason.PROHIBITED_CONTENT": true,
- "google.generativeai.protos.Candidate.FinishReason.RECITATION": true,
- "google.generativeai.protos.Candidate.FinishReason.SAFETY": true,
- "google.generativeai.protos.Candidate.FinishReason.SPII": true,
- "google.generativeai.protos.Candidate.FinishReason.STOP": true,
- "google.generativeai.protos.Candidate.FinishReason.__abs__": true,
- "google.generativeai.protos.Candidate.FinishReason.__add__": true,
- "google.generativeai.protos.Candidate.FinishReason.__and__": true,
- "google.generativeai.protos.Candidate.FinishReason.__bool__": true,
- "google.generativeai.protos.Candidate.FinishReason.__contains__": true,
- "google.generativeai.protos.Candidate.FinishReason.__eq__": true,
- "google.generativeai.protos.Candidate.FinishReason.__floordiv__": true,
- "google.generativeai.protos.Candidate.FinishReason.__ge__": true,
- "google.generativeai.protos.Candidate.FinishReason.__getitem__": true,
- "google.generativeai.protos.Candidate.FinishReason.__gt__": true,
- "google.generativeai.protos.Candidate.FinishReason.__init__": true,
- "google.generativeai.protos.Candidate.FinishReason.__invert__": true,
- "google.generativeai.protos.Candidate.FinishReason.__iter__": true,
- "google.generativeai.protos.Candidate.FinishReason.__le__": true,
- "google.generativeai.protos.Candidate.FinishReason.__len__": true,
- "google.generativeai.protos.Candidate.FinishReason.__lshift__": true,
- "google.generativeai.protos.Candidate.FinishReason.__lt__": true,
- "google.generativeai.protos.Candidate.FinishReason.__mod__": true,
- "google.generativeai.protos.Candidate.FinishReason.__mul__": true,
- "google.generativeai.protos.Candidate.FinishReason.__ne__": true,
- "google.generativeai.protos.Candidate.FinishReason.__neg__": true,
- "google.generativeai.protos.Candidate.FinishReason.__new__": true,
- "google.generativeai.protos.Candidate.FinishReason.__or__": true,
- "google.generativeai.protos.Candidate.FinishReason.__pos__": true,
- "google.generativeai.protos.Candidate.FinishReason.__pow__": true,
- "google.generativeai.protos.Candidate.FinishReason.__radd__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rand__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rlshift__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rmod__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rmul__": true,
- "google.generativeai.protos.Candidate.FinishReason.__ror__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rpow__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rrshift__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rshift__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rsub__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rtruediv__": true,
- "google.generativeai.protos.Candidate.FinishReason.__rxor__": true,
- "google.generativeai.protos.Candidate.FinishReason.__sub__": true,
- "google.generativeai.protos.Candidate.FinishReason.__truediv__": true,
- "google.generativeai.protos.Candidate.FinishReason.__xor__": true,
- "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio": true,
- "google.generativeai.protos.Candidate.FinishReason.bit_count": true,
- "google.generativeai.protos.Candidate.FinishReason.bit_length": true,
- "google.generativeai.protos.Candidate.FinishReason.conjugate": true,
- "google.generativeai.protos.Candidate.FinishReason.denominator": true,
- "google.generativeai.protos.Candidate.FinishReason.from_bytes": true,
- "google.generativeai.protos.Candidate.FinishReason.imag": true,
- "google.generativeai.protos.Candidate.FinishReason.is_integer": true,
- "google.generativeai.protos.Candidate.FinishReason.numerator": true,
- "google.generativeai.protos.Candidate.FinishReason.real": true,
- "google.generativeai.protos.Candidate.FinishReason.to_bytes": true,
- "google.generativeai.protos.Candidate.__call__": true,
- "google.generativeai.protos.Candidate.__eq__": true,
- "google.generativeai.protos.Candidate.__ge__": true,
- "google.generativeai.protos.Candidate.__gt__": true,
- "google.generativeai.protos.Candidate.__init__": true,
- "google.generativeai.protos.Candidate.__le__": true,
- "google.generativeai.protos.Candidate.__lt__": true,
- "google.generativeai.protos.Candidate.__ne__": true,
- "google.generativeai.protos.Candidate.__new__": true,
- "google.generativeai.protos.Candidate.__or__": true,
- "google.generativeai.protos.Candidate.__ror__": true,
- "google.generativeai.protos.Candidate.avg_logprobs": true,
- "google.generativeai.protos.Candidate.citation_metadata": true,
- "google.generativeai.protos.Candidate.content": true,
- "google.generativeai.protos.Candidate.copy_from": true,
- "google.generativeai.protos.Candidate.deserialize": true,
- "google.generativeai.protos.Candidate.finish_reason": true,
- "google.generativeai.protos.Candidate.from_json": true,
- "google.generativeai.protos.Candidate.grounding_attributions": true,
- "google.generativeai.protos.Candidate.grounding_metadata": true,
- "google.generativeai.protos.Candidate.index": true,
- "google.generativeai.protos.Candidate.logprobs_result": true,
- "google.generativeai.protos.Candidate.mro": true,
- "google.generativeai.protos.Candidate.pb": true,
- "google.generativeai.protos.Candidate.safety_ratings": true,
- "google.generativeai.protos.Candidate.serialize": true,
- "google.generativeai.protos.Candidate.to_dict": true,
- "google.generativeai.protos.Candidate.to_json": true,
- "google.generativeai.protos.Candidate.token_count": true,
- "google.generativeai.protos.Candidate.wrap": true,
- "google.generativeai.protos.Chunk": false,
- "google.generativeai.protos.Chunk.State": false,
- "google.generativeai.protos.Chunk.State.STATE_ACTIVE": true,
- "google.generativeai.protos.Chunk.State.STATE_FAILED": true,
- "google.generativeai.protos.Chunk.State.STATE_PENDING_PROCESSING": true,
- "google.generativeai.protos.Chunk.State.STATE_UNSPECIFIED": true,
- "google.generativeai.protos.Chunk.State.__abs__": true,
- "google.generativeai.protos.Chunk.State.__add__": true,
- "google.generativeai.protos.Chunk.State.__and__": true,
- "google.generativeai.protos.Chunk.State.__bool__": true,
- "google.generativeai.protos.Chunk.State.__contains__": true,
- "google.generativeai.protos.Chunk.State.__eq__": true,
- "google.generativeai.protos.Chunk.State.__floordiv__": true,
- "google.generativeai.protos.Chunk.State.__ge__": true,
- "google.generativeai.protos.Chunk.State.__getitem__": true,
- "google.generativeai.protos.Chunk.State.__gt__": true,
- "google.generativeai.protos.Chunk.State.__init__": true,
- "google.generativeai.protos.Chunk.State.__invert__": true,
- "google.generativeai.protos.Chunk.State.__iter__": true,
- "google.generativeai.protos.Chunk.State.__le__": true,
- "google.generativeai.protos.Chunk.State.__len__": true,
- "google.generativeai.protos.Chunk.State.__lshift__": true,
- "google.generativeai.protos.Chunk.State.__lt__": true,
- "google.generativeai.protos.Chunk.State.__mod__": true,
- "google.generativeai.protos.Chunk.State.__mul__": true,
- "google.generativeai.protos.Chunk.State.__ne__": true,
- "google.generativeai.protos.Chunk.State.__neg__": true,
- "google.generativeai.protos.Chunk.State.__new__": true,
- "google.generativeai.protos.Chunk.State.__or__": true,
- "google.generativeai.protos.Chunk.State.__pos__": true,
- "google.generativeai.protos.Chunk.State.__pow__": true,
- "google.generativeai.protos.Chunk.State.__radd__": true,
- "google.generativeai.protos.Chunk.State.__rand__": true,
- "google.generativeai.protos.Chunk.State.__rfloordiv__": true,
- "google.generativeai.protos.Chunk.State.__rlshift__": true,
- "google.generativeai.protos.Chunk.State.__rmod__": true,
- "google.generativeai.protos.Chunk.State.__rmul__": true,
- "google.generativeai.protos.Chunk.State.__ror__": true,
- "google.generativeai.protos.Chunk.State.__rpow__": true,
- "google.generativeai.protos.Chunk.State.__rrshift__": true,
- "google.generativeai.protos.Chunk.State.__rshift__": true,
- "google.generativeai.protos.Chunk.State.__rsub__": true,
- "google.generativeai.protos.Chunk.State.__rtruediv__": true,
- "google.generativeai.protos.Chunk.State.__rxor__": true,
- "google.generativeai.protos.Chunk.State.__sub__": true,
- "google.generativeai.protos.Chunk.State.__truediv__": true,
- "google.generativeai.protos.Chunk.State.__xor__": true,
- "google.generativeai.protos.Chunk.State.as_integer_ratio": true,
- "google.generativeai.protos.Chunk.State.bit_count": true,
- "google.generativeai.protos.Chunk.State.bit_length": true,
- "google.generativeai.protos.Chunk.State.conjugate": true,
- "google.generativeai.protos.Chunk.State.denominator": true,
- "google.generativeai.protos.Chunk.State.from_bytes": true,
- "google.generativeai.protos.Chunk.State.imag": true,
- "google.generativeai.protos.Chunk.State.is_integer": true,
- "google.generativeai.protos.Chunk.State.numerator": true,
- "google.generativeai.protos.Chunk.State.real": true,
- "google.generativeai.protos.Chunk.State.to_bytes": true,
- "google.generativeai.protos.Chunk.__call__": true,
- "google.generativeai.protos.Chunk.__eq__": true,
- "google.generativeai.protos.Chunk.__ge__": true,
- "google.generativeai.protos.Chunk.__gt__": true,
- "google.generativeai.protos.Chunk.__init__": true,
- "google.generativeai.protos.Chunk.__le__": true,
- "google.generativeai.protos.Chunk.__lt__": true,
- "google.generativeai.protos.Chunk.__ne__": true,
- "google.generativeai.protos.Chunk.__new__": true,
- "google.generativeai.protos.Chunk.__or__": true,
- "google.generativeai.protos.Chunk.__ror__": true,
- "google.generativeai.protos.Chunk.copy_from": true,
- "google.generativeai.protos.Chunk.create_time": true,
- "google.generativeai.protos.Chunk.custom_metadata": true,
- "google.generativeai.protos.Chunk.data": true,
- "google.generativeai.protos.Chunk.deserialize": true,
- "google.generativeai.protos.Chunk.from_json": true,
- "google.generativeai.protos.Chunk.mro": true,
- "google.generativeai.protos.Chunk.name": true,
- "google.generativeai.protos.Chunk.pb": true,
- "google.generativeai.protos.Chunk.serialize": true,
- "google.generativeai.protos.Chunk.state": true,
- "google.generativeai.protos.Chunk.to_dict": true,
- "google.generativeai.protos.Chunk.to_json": true,
- "google.generativeai.protos.Chunk.update_time": true,
- "google.generativeai.protos.Chunk.wrap": true,
- "google.generativeai.protos.ChunkData": false,
- "google.generativeai.protos.ChunkData.__call__": true,
- "google.generativeai.protos.ChunkData.__eq__": true,
- "google.generativeai.protos.ChunkData.__ge__": true,
- "google.generativeai.protos.ChunkData.__gt__": true,
- "google.generativeai.protos.ChunkData.__init__": true,
- "google.generativeai.protos.ChunkData.__le__": true,
- "google.generativeai.protos.ChunkData.__lt__": true,
- "google.generativeai.protos.ChunkData.__ne__": true,
- "google.generativeai.protos.ChunkData.__new__": true,
- "google.generativeai.protos.ChunkData.__or__": true,
- "google.generativeai.protos.ChunkData.__ror__": true,
- "google.generativeai.protos.ChunkData.copy_from": true,
- "google.generativeai.protos.ChunkData.deserialize": true,
- "google.generativeai.protos.ChunkData.from_json": true,
- "google.generativeai.protos.ChunkData.mro": true,
- "google.generativeai.protos.ChunkData.pb": true,
- "google.generativeai.protos.ChunkData.serialize": true,
- "google.generativeai.protos.ChunkData.string_value": true,
- "google.generativeai.protos.ChunkData.to_dict": true,
- "google.generativeai.protos.ChunkData.to_json": true,
- "google.generativeai.protos.ChunkData.wrap": true,
- "google.generativeai.protos.CitationMetadata": false,
- "google.generativeai.protos.CitationMetadata.__call__": true,
- "google.generativeai.protos.CitationMetadata.__eq__": true,
- "google.generativeai.protos.CitationMetadata.__ge__": true,
- "google.generativeai.protos.CitationMetadata.__gt__": true,
- "google.generativeai.protos.CitationMetadata.__init__": true,
- "google.generativeai.protos.CitationMetadata.__le__": true,
- "google.generativeai.protos.CitationMetadata.__lt__": true,
- "google.generativeai.protos.CitationMetadata.__ne__": true,
- "google.generativeai.protos.CitationMetadata.__new__": true,
- "google.generativeai.protos.CitationMetadata.__or__": true,
- "google.generativeai.protos.CitationMetadata.__ror__": true,
- "google.generativeai.protos.CitationMetadata.citation_sources": true,
- "google.generativeai.protos.CitationMetadata.copy_from": true,
- "google.generativeai.protos.CitationMetadata.deserialize": true,
- "google.generativeai.protos.CitationMetadata.from_json": true,
- "google.generativeai.protos.CitationMetadata.mro": true,
- "google.generativeai.protos.CitationMetadata.pb": true,
- "google.generativeai.protos.CitationMetadata.serialize": true,
- "google.generativeai.protos.CitationMetadata.to_dict": true,
- "google.generativeai.protos.CitationMetadata.to_json": true,
- "google.generativeai.protos.CitationMetadata.wrap": true,
- "google.generativeai.protos.CitationSource": false,
- "google.generativeai.protos.CitationSource.__call__": true,
- "google.generativeai.protos.CitationSource.__eq__": true,
- "google.generativeai.protos.CitationSource.__ge__": true,
- "google.generativeai.protos.CitationSource.__gt__": true,
- "google.generativeai.protos.CitationSource.__init__": true,
- "google.generativeai.protos.CitationSource.__le__": true,
- "google.generativeai.protos.CitationSource.__lt__": true,
- "google.generativeai.protos.CitationSource.__ne__": true,
- "google.generativeai.protos.CitationSource.__new__": true,
- "google.generativeai.protos.CitationSource.__or__": true,
- "google.generativeai.protos.CitationSource.__ror__": true,
- "google.generativeai.protos.CitationSource.copy_from": true,
- "google.generativeai.protos.CitationSource.deserialize": true,
- "google.generativeai.protos.CitationSource.end_index": true,
- "google.generativeai.protos.CitationSource.from_json": true,
- "google.generativeai.protos.CitationSource.license_": true,
- "google.generativeai.protos.CitationSource.mro": true,
- "google.generativeai.protos.CitationSource.pb": true,
- "google.generativeai.protos.CitationSource.serialize": true,
- "google.generativeai.protos.CitationSource.start_index": true,
- "google.generativeai.protos.CitationSource.to_dict": true,
- "google.generativeai.protos.CitationSource.to_json": true,
- "google.generativeai.protos.CitationSource.uri": true,
- "google.generativeai.protos.CitationSource.wrap": true,
- "google.generativeai.protos.CodeExecution": false,
- "google.generativeai.protos.CodeExecution.__call__": true,
- "google.generativeai.protos.CodeExecution.__eq__": true,
- "google.generativeai.protos.CodeExecution.__ge__": true,
- "google.generativeai.protos.CodeExecution.__gt__": true,
- "google.generativeai.protos.CodeExecution.__init__": true,
- "google.generativeai.protos.CodeExecution.__le__": true,
- "google.generativeai.protos.CodeExecution.__lt__": true,
- "google.generativeai.protos.CodeExecution.__ne__": true,
- "google.generativeai.protos.CodeExecution.__new__": true,
- "google.generativeai.protos.CodeExecution.__or__": true,
- "google.generativeai.protos.CodeExecution.__ror__": true,
- "google.generativeai.protos.CodeExecution.copy_from": true,
- "google.generativeai.protos.CodeExecution.deserialize": true,
- "google.generativeai.protos.CodeExecution.from_json": true,
- "google.generativeai.protos.CodeExecution.mro": true,
- "google.generativeai.protos.CodeExecution.pb": true,
- "google.generativeai.protos.CodeExecution.serialize": true,
- "google.generativeai.protos.CodeExecution.to_dict": true,
- "google.generativeai.protos.CodeExecution.to_json": true,
- "google.generativeai.protos.CodeExecution.wrap": true,
- "google.generativeai.protos.CodeExecutionResult": false,
- "google.generativeai.protos.CodeExecutionResult.Outcome": false,
- "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_DEADLINE_EXCEEDED": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_FAILED": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_OK": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_UNSPECIFIED": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.from_bytes": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.imag": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.is_integer": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.real": true,
- "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": true,
- "google.generativeai.protos.CodeExecutionResult.__call__": true,
- "google.generativeai.protos.CodeExecutionResult.__eq__": true,
- "google.generativeai.protos.CodeExecutionResult.__ge__": true,
- "google.generativeai.protos.CodeExecutionResult.__gt__": true,
- "google.generativeai.protos.CodeExecutionResult.__init__": true,
- "google.generativeai.protos.CodeExecutionResult.__le__": true,
- "google.generativeai.protos.CodeExecutionResult.__lt__": true,
- "google.generativeai.protos.CodeExecutionResult.__ne__": true,
- "google.generativeai.protos.CodeExecutionResult.__new__": true,
- "google.generativeai.protos.CodeExecutionResult.__or__": true,
- "google.generativeai.protos.CodeExecutionResult.__ror__": true,
- "google.generativeai.protos.CodeExecutionResult.copy_from": true,
- "google.generativeai.protos.CodeExecutionResult.deserialize": true,
- "google.generativeai.protos.CodeExecutionResult.from_json": true,
- "google.generativeai.protos.CodeExecutionResult.mro": true,
- "google.generativeai.protos.CodeExecutionResult.outcome": true,
- "google.generativeai.protos.CodeExecutionResult.output": true,
- "google.generativeai.protos.CodeExecutionResult.pb": true,
- "google.generativeai.protos.CodeExecutionResult.serialize": true,
- "google.generativeai.protos.CodeExecutionResult.to_dict": true,
- "google.generativeai.protos.CodeExecutionResult.to_json": true,
- "google.generativeai.protos.CodeExecutionResult.wrap": true,
- "google.generativeai.protos.Condition": false,
- "google.generativeai.protos.Condition.Operator": false,
- "google.generativeai.protos.Condition.Operator.EQUAL": true,
- "google.generativeai.protos.Condition.Operator.EXCLUDES": true,
- "google.generativeai.protos.Condition.Operator.GREATER": true,
- "google.generativeai.protos.Condition.Operator.GREATER_EQUAL": true,
- "google.generativeai.protos.Condition.Operator.INCLUDES": true,
- "google.generativeai.protos.Condition.Operator.LESS": true,
- "google.generativeai.protos.Condition.Operator.LESS_EQUAL": true,
- "google.generativeai.protos.Condition.Operator.NOT_EQUAL": true,
- "google.generativeai.protos.Condition.Operator.OPERATOR_UNSPECIFIED": true,
- "google.generativeai.protos.Condition.Operator.__abs__": true,
- "google.generativeai.protos.Condition.Operator.__add__": true,
- "google.generativeai.protos.Condition.Operator.__and__": true,
- "google.generativeai.protos.Condition.Operator.__bool__": true,
- "google.generativeai.protos.Condition.Operator.__contains__": true,
- "google.generativeai.protos.Condition.Operator.__eq__": true,
- "google.generativeai.protos.Condition.Operator.__floordiv__": true,
- "google.generativeai.protos.Condition.Operator.__ge__": true,
- "google.generativeai.protos.Condition.Operator.__getitem__": true,
- "google.generativeai.protos.Condition.Operator.__gt__": true,
- "google.generativeai.protos.Condition.Operator.__init__": true,
- "google.generativeai.protos.Condition.Operator.__invert__": true,
- "google.generativeai.protos.Condition.Operator.__iter__": true,
- "google.generativeai.protos.Condition.Operator.__le__": true,
- "google.generativeai.protos.Condition.Operator.__len__": true,
- "google.generativeai.protos.Condition.Operator.__lshift__": true,
- "google.generativeai.protos.Condition.Operator.__lt__": true,
- "google.generativeai.protos.Condition.Operator.__mod__": true,
- "google.generativeai.protos.Condition.Operator.__mul__": true,
- "google.generativeai.protos.Condition.Operator.__ne__": true,
- "google.generativeai.protos.Condition.Operator.__neg__": true,
- "google.generativeai.protos.Condition.Operator.__new__": true,
- "google.generativeai.protos.Condition.Operator.__or__": true,
- "google.generativeai.protos.Condition.Operator.__pos__": true,
- "google.generativeai.protos.Condition.Operator.__pow__": true,
- "google.generativeai.protos.Condition.Operator.__radd__": true,
- "google.generativeai.protos.Condition.Operator.__rand__": true,
- "google.generativeai.protos.Condition.Operator.__rfloordiv__": true,
- "google.generativeai.protos.Condition.Operator.__rlshift__": true,
- "google.generativeai.protos.Condition.Operator.__rmod__": true,
- "google.generativeai.protos.Condition.Operator.__rmul__": true,
- "google.generativeai.protos.Condition.Operator.__ror__": true,
- "google.generativeai.protos.Condition.Operator.__rpow__": true,
- "google.generativeai.protos.Condition.Operator.__rrshift__": true,
- "google.generativeai.protos.Condition.Operator.__rshift__": true,
- "google.generativeai.protos.Condition.Operator.__rsub__": true,
- "google.generativeai.protos.Condition.Operator.__rtruediv__": true,
- "google.generativeai.protos.Condition.Operator.__rxor__": true,
- "google.generativeai.protos.Condition.Operator.__sub__": true,
- "google.generativeai.protos.Condition.Operator.__truediv__": true,
- "google.generativeai.protos.Condition.Operator.__xor__": true,
- "google.generativeai.protos.Condition.Operator.as_integer_ratio": true,
- "google.generativeai.protos.Condition.Operator.bit_count": true,
- "google.generativeai.protos.Condition.Operator.bit_length": true,
- "google.generativeai.protos.Condition.Operator.conjugate": true,
- "google.generativeai.protos.Condition.Operator.denominator": true,
- "google.generativeai.protos.Condition.Operator.from_bytes": true,
- "google.generativeai.protos.Condition.Operator.imag": true,
- "google.generativeai.protos.Condition.Operator.is_integer": true,
- "google.generativeai.protos.Condition.Operator.numerator": true,
- "google.generativeai.protos.Condition.Operator.real": true,
- "google.generativeai.protos.Condition.Operator.to_bytes": true,
- "google.generativeai.protos.Condition.__call__": true,
- "google.generativeai.protos.Condition.__eq__": true,
- "google.generativeai.protos.Condition.__ge__": true,
- "google.generativeai.protos.Condition.__gt__": true,
- "google.generativeai.protos.Condition.__init__": true,
- "google.generativeai.protos.Condition.__le__": true,
- "google.generativeai.protos.Condition.__lt__": true,
- "google.generativeai.protos.Condition.__ne__": true,
- "google.generativeai.protos.Condition.__new__": true,
- "google.generativeai.protos.Condition.__or__": true,
- "google.generativeai.protos.Condition.__ror__": true,
- "google.generativeai.protos.Condition.copy_from": true,
- "google.generativeai.protos.Condition.deserialize": true,
- "google.generativeai.protos.Condition.from_json": true,
- "google.generativeai.protos.Condition.mro": true,
- "google.generativeai.protos.Condition.numeric_value": true,
- "google.generativeai.protos.Condition.operation": true,
- "google.generativeai.protos.Condition.pb": true,
- "google.generativeai.protos.Condition.serialize": true,
- "google.generativeai.protos.Condition.string_value": true,
- "google.generativeai.protos.Condition.to_dict": true,
- "google.generativeai.protos.Condition.to_json": true,
- "google.generativeai.protos.Condition.wrap": true,
- "google.generativeai.protos.Content": false,
- "google.generativeai.protos.Content.__call__": true,
- "google.generativeai.protos.Content.__eq__": true,
- "google.generativeai.protos.Content.__ge__": true,
- "google.generativeai.protos.Content.__gt__": true,
- "google.generativeai.protos.Content.__init__": true,
- "google.generativeai.protos.Content.__le__": true,
- "google.generativeai.protos.Content.__lt__": true,
- "google.generativeai.protos.Content.__ne__": true,
- "google.generativeai.protos.Content.__new__": true,
- "google.generativeai.protos.Content.__or__": true,
- "google.generativeai.protos.Content.__ror__": true,
- "google.generativeai.protos.Content.copy_from": true,
- "google.generativeai.protos.Content.deserialize": true,
- "google.generativeai.protos.Content.from_json": true,
- "google.generativeai.protos.Content.mro": true,
- "google.generativeai.protos.Content.parts": true,
- "google.generativeai.protos.Content.pb": true,
- "google.generativeai.protos.Content.role": true,
- "google.generativeai.protos.Content.serialize": true,
- "google.generativeai.protos.Content.to_dict": true,
- "google.generativeai.protos.Content.to_json": true,
- "google.generativeai.protos.Content.wrap": true,
- "google.generativeai.protos.ContentEmbedding": false,
- "google.generativeai.protos.ContentEmbedding.__call__": true,
- "google.generativeai.protos.ContentEmbedding.__eq__": true,
- "google.generativeai.protos.ContentEmbedding.__ge__": true,
- "google.generativeai.protos.ContentEmbedding.__gt__": true,
- "google.generativeai.protos.ContentEmbedding.__init__": true,
- "google.generativeai.protos.ContentEmbedding.__le__": true,
- "google.generativeai.protos.ContentEmbedding.__lt__": true,
- "google.generativeai.protos.ContentEmbedding.__ne__": true,
- "google.generativeai.protos.ContentEmbedding.__new__": true,
- "google.generativeai.protos.ContentEmbedding.__or__": true,
- "google.generativeai.protos.ContentEmbedding.__ror__": true,
- "google.generativeai.protos.ContentEmbedding.copy_from": true,
- "google.generativeai.protos.ContentEmbedding.deserialize": true,
- "google.generativeai.protos.ContentEmbedding.from_json": true,
- "google.generativeai.protos.ContentEmbedding.mro": true,
- "google.generativeai.protos.ContentEmbedding.pb": true,
- "google.generativeai.protos.ContentEmbedding.serialize": true,
- "google.generativeai.protos.ContentEmbedding.to_dict": true,
- "google.generativeai.protos.ContentEmbedding.to_json": true,
- "google.generativeai.protos.ContentEmbedding.values": true,
- "google.generativeai.protos.ContentEmbedding.wrap": true,
- "google.generativeai.protos.ContentFilter": false,
- "google.generativeai.protos.ContentFilter.BlockedReason": false,
- "google.generativeai.protos.ContentFilter.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.OTHER": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.SAFETY": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__add__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__and__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__contains__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__init__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__iter__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__le__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__len__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__new__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__or__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.denominator": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.imag": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.is_integer": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.numerator": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.real": true,
- "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": true,
- "google.generativeai.protos.ContentFilter.__call__": true,
- "google.generativeai.protos.ContentFilter.__eq__": true,
- "google.generativeai.protos.ContentFilter.__ge__": true,
- "google.generativeai.protos.ContentFilter.__gt__": true,
- "google.generativeai.protos.ContentFilter.__init__": true,
- "google.generativeai.protos.ContentFilter.__le__": true,
- "google.generativeai.protos.ContentFilter.__lt__": true,
- "google.generativeai.protos.ContentFilter.__ne__": true,
- "google.generativeai.protos.ContentFilter.__new__": true,
- "google.generativeai.protos.ContentFilter.__or__": true,
- "google.generativeai.protos.ContentFilter.__ror__": true,
- "google.generativeai.protos.ContentFilter.copy_from": true,
- "google.generativeai.protos.ContentFilter.deserialize": true,
- "google.generativeai.protos.ContentFilter.from_json": true,
- "google.generativeai.protos.ContentFilter.message": true,
- "google.generativeai.protos.ContentFilter.mro": true,
- "google.generativeai.protos.ContentFilter.pb": true,
- "google.generativeai.protos.ContentFilter.reason": true,
- "google.generativeai.protos.ContentFilter.serialize": true,
- "google.generativeai.protos.ContentFilter.to_dict": true,
- "google.generativeai.protos.ContentFilter.to_json": true,
- "google.generativeai.protos.ContentFilter.wrap": true,
- "google.generativeai.protos.Corpus": false,
- "google.generativeai.protos.Corpus.__call__": true,
- "google.generativeai.protos.Corpus.__eq__": true,
- "google.generativeai.protos.Corpus.__ge__": true,
- "google.generativeai.protos.Corpus.__gt__": true,
- "google.generativeai.protos.Corpus.__init__": true,
- "google.generativeai.protos.Corpus.__le__": true,
- "google.generativeai.protos.Corpus.__lt__": true,
- "google.generativeai.protos.Corpus.__ne__": true,
- "google.generativeai.protos.Corpus.__new__": true,
- "google.generativeai.protos.Corpus.__or__": true,
- "google.generativeai.protos.Corpus.__ror__": true,
- "google.generativeai.protos.Corpus.copy_from": true,
- "google.generativeai.protos.Corpus.create_time": true,
- "google.generativeai.protos.Corpus.deserialize": true,
- "google.generativeai.protos.Corpus.display_name": true,
- "google.generativeai.protos.Corpus.from_json": true,
- "google.generativeai.protos.Corpus.mro": true,
- "google.generativeai.protos.Corpus.name": true,
- "google.generativeai.protos.Corpus.pb": true,
- "google.generativeai.protos.Corpus.serialize": true,
- "google.generativeai.protos.Corpus.to_dict": true,
- "google.generativeai.protos.Corpus.to_json": true,
- "google.generativeai.protos.Corpus.update_time": true,
- "google.generativeai.protos.Corpus.wrap": true,
- "google.generativeai.protos.CountMessageTokensRequest": false,
- "google.generativeai.protos.CountMessageTokensRequest.__call__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__eq__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__ge__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__gt__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__init__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__le__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__lt__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__ne__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__new__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__or__": true,
- "google.generativeai.protos.CountMessageTokensRequest.__ror__": true,
- "google.generativeai.protos.CountMessageTokensRequest.copy_from": true,
- "google.generativeai.protos.CountMessageTokensRequest.deserialize": true,
- "google.generativeai.protos.CountMessageTokensRequest.from_json": true,
- "google.generativeai.protos.CountMessageTokensRequest.model": true,
- "google.generativeai.protos.CountMessageTokensRequest.mro": true,
- "google.generativeai.protos.CountMessageTokensRequest.pb": true,
- "google.generativeai.protos.CountMessageTokensRequest.prompt": true,
- "google.generativeai.protos.CountMessageTokensRequest.serialize": true,
- "google.generativeai.protos.CountMessageTokensRequest.to_dict": true,
- "google.generativeai.protos.CountMessageTokensRequest.to_json": true,
- "google.generativeai.protos.CountMessageTokensRequest.wrap": true,
- "google.generativeai.protos.CountMessageTokensResponse": false,
- "google.generativeai.protos.CountMessageTokensResponse.__call__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__eq__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__ge__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__gt__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__init__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__le__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__lt__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__ne__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__new__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__or__": true,
- "google.generativeai.protos.CountMessageTokensResponse.__ror__": true,
- "google.generativeai.protos.CountMessageTokensResponse.copy_from": true,
- "google.generativeai.protos.CountMessageTokensResponse.deserialize": true,
- "google.generativeai.protos.CountMessageTokensResponse.from_json": true,
- "google.generativeai.protos.CountMessageTokensResponse.mro": true,
- "google.generativeai.protos.CountMessageTokensResponse.pb": true,
- "google.generativeai.protos.CountMessageTokensResponse.serialize": true,
- "google.generativeai.protos.CountMessageTokensResponse.to_dict": true,
- "google.generativeai.protos.CountMessageTokensResponse.to_json": true,
- "google.generativeai.protos.CountMessageTokensResponse.token_count": true,
- "google.generativeai.protos.CountMessageTokensResponse.wrap": true,
- "google.generativeai.protos.CountTextTokensRequest": false,
- "google.generativeai.protos.CountTextTokensRequest.__call__": true,
- "google.generativeai.protos.CountTextTokensRequest.__eq__": true,
- "google.generativeai.protos.CountTextTokensRequest.__ge__": true,
- "google.generativeai.protos.CountTextTokensRequest.__gt__": true,
- "google.generativeai.protos.CountTextTokensRequest.__init__": true,
- "google.generativeai.protos.CountTextTokensRequest.__le__": true,
- "google.generativeai.protos.CountTextTokensRequest.__lt__": true,
- "google.generativeai.protos.CountTextTokensRequest.__ne__": true,
- "google.generativeai.protos.CountTextTokensRequest.__new__": true,
- "google.generativeai.protos.CountTextTokensRequest.__or__": true,
- "google.generativeai.protos.CountTextTokensRequest.__ror__": true,
- "google.generativeai.protos.CountTextTokensRequest.copy_from": true,
- "google.generativeai.protos.CountTextTokensRequest.deserialize": true,
- "google.generativeai.protos.CountTextTokensRequest.from_json": true,
- "google.generativeai.protos.CountTextTokensRequest.model": true,
- "google.generativeai.protos.CountTextTokensRequest.mro": true,
- "google.generativeai.protos.CountTextTokensRequest.pb": true,
- "google.generativeai.protos.CountTextTokensRequest.prompt": true,
- "google.generativeai.protos.CountTextTokensRequest.serialize": true,
- "google.generativeai.protos.CountTextTokensRequest.to_dict": true,
- "google.generativeai.protos.CountTextTokensRequest.to_json": true,
- "google.generativeai.protos.CountTextTokensRequest.wrap": true,
- "google.generativeai.protos.CountTextTokensResponse": false,
- "google.generativeai.protos.CountTextTokensResponse.__call__": true,
- "google.generativeai.protos.CountTextTokensResponse.__eq__": true,
- "google.generativeai.protos.CountTextTokensResponse.__ge__": true,
- "google.generativeai.protos.CountTextTokensResponse.__gt__": true,
- "google.generativeai.protos.CountTextTokensResponse.__init__": true,
- "google.generativeai.protos.CountTextTokensResponse.__le__": true,
- "google.generativeai.protos.CountTextTokensResponse.__lt__": true,
- "google.generativeai.protos.CountTextTokensResponse.__ne__": true,
- "google.generativeai.protos.CountTextTokensResponse.__new__": true,
- "google.generativeai.protos.CountTextTokensResponse.__or__": true,
- "google.generativeai.protos.CountTextTokensResponse.__ror__": true,
- "google.generativeai.protos.CountTextTokensResponse.copy_from": true,
- "google.generativeai.protos.CountTextTokensResponse.deserialize": true,
- "google.generativeai.protos.CountTextTokensResponse.from_json": true,
- "google.generativeai.protos.CountTextTokensResponse.mro": true,
- "google.generativeai.protos.CountTextTokensResponse.pb": true,
- "google.generativeai.protos.CountTextTokensResponse.serialize": true,
- "google.generativeai.protos.CountTextTokensResponse.to_dict": true,
- "google.generativeai.protos.CountTextTokensResponse.to_json": true,
- "google.generativeai.protos.CountTextTokensResponse.token_count": true,
- "google.generativeai.protos.CountTextTokensResponse.wrap": true,
- "google.generativeai.protos.CountTokensRequest": false,
- "google.generativeai.protos.CountTokensRequest.__call__": true,
- "google.generativeai.protos.CountTokensRequest.__eq__": true,
- "google.generativeai.protos.CountTokensRequest.__ge__": true,
- "google.generativeai.protos.CountTokensRequest.__gt__": true,
- "google.generativeai.protos.CountTokensRequest.__init__": true,
- "google.generativeai.protos.CountTokensRequest.__le__": true,
- "google.generativeai.protos.CountTokensRequest.__lt__": true,
- "google.generativeai.protos.CountTokensRequest.__ne__": true,
- "google.generativeai.protos.CountTokensRequest.__new__": true,
- "google.generativeai.protos.CountTokensRequest.__or__": true,
- "google.generativeai.protos.CountTokensRequest.__ror__": true,
- "google.generativeai.protos.CountTokensRequest.contents": true,
- "google.generativeai.protos.CountTokensRequest.copy_from": true,
- "google.generativeai.protos.CountTokensRequest.deserialize": true,
- "google.generativeai.protos.CountTokensRequest.from_json": true,
- "google.generativeai.protos.CountTokensRequest.generate_content_request": true,
- "google.generativeai.protos.CountTokensRequest.model": true,
- "google.generativeai.protos.CountTokensRequest.mro": true,
- "google.generativeai.protos.CountTokensRequest.pb": true,
- "google.generativeai.protos.CountTokensRequest.serialize": true,
- "google.generativeai.protos.CountTokensRequest.to_dict": true,
- "google.generativeai.protos.CountTokensRequest.to_json": true,
- "google.generativeai.protos.CountTokensRequest.wrap": true,
- "google.generativeai.protos.CountTokensResponse": false,
- "google.generativeai.protos.CountTokensResponse.__call__": true,
- "google.generativeai.protos.CountTokensResponse.__eq__": true,
- "google.generativeai.protos.CountTokensResponse.__ge__": true,
- "google.generativeai.protos.CountTokensResponse.__gt__": true,
- "google.generativeai.protos.CountTokensResponse.__init__": true,
- "google.generativeai.protos.CountTokensResponse.__le__": true,
- "google.generativeai.protos.CountTokensResponse.__lt__": true,
- "google.generativeai.protos.CountTokensResponse.__ne__": true,
- "google.generativeai.protos.CountTokensResponse.__new__": true,
- "google.generativeai.protos.CountTokensResponse.__or__": true,
- "google.generativeai.protos.CountTokensResponse.__ror__": true,
- "google.generativeai.protos.CountTokensResponse.cached_content_token_count": true,
- "google.generativeai.protos.CountTokensResponse.copy_from": true,
- "google.generativeai.protos.CountTokensResponse.deserialize": true,
- "google.generativeai.protos.CountTokensResponse.from_json": true,
- "google.generativeai.protos.CountTokensResponse.mro": true,
- "google.generativeai.protos.CountTokensResponse.pb": true,
- "google.generativeai.protos.CountTokensResponse.serialize": true,
- "google.generativeai.protos.CountTokensResponse.to_dict": true,
- "google.generativeai.protos.CountTokensResponse.to_json": true,
- "google.generativeai.protos.CountTokensResponse.total_tokens": true,
- "google.generativeai.protos.CountTokensResponse.wrap": true,
- "google.generativeai.protos.CreateCachedContentRequest": false,
- "google.generativeai.protos.CreateCachedContentRequest.__call__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__eq__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__ge__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__gt__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__init__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__le__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__lt__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__ne__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__new__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__or__": true,
- "google.generativeai.protos.CreateCachedContentRequest.__ror__": true,
- "google.generativeai.protos.CreateCachedContentRequest.cached_content": true,
- "google.generativeai.protos.CreateCachedContentRequest.copy_from": true,
- "google.generativeai.protos.CreateCachedContentRequest.deserialize": true,
- "google.generativeai.protos.CreateCachedContentRequest.from_json": true,
- "google.generativeai.protos.CreateCachedContentRequest.mro": true,
- "google.generativeai.protos.CreateCachedContentRequest.pb": true,
- "google.generativeai.protos.CreateCachedContentRequest.serialize": true,
- "google.generativeai.protos.CreateCachedContentRequest.to_dict": true,
- "google.generativeai.protos.CreateCachedContentRequest.to_json": true,
- "google.generativeai.protos.CreateCachedContentRequest.wrap": true,
- "google.generativeai.protos.CreateChunkRequest": false,
- "google.generativeai.protos.CreateChunkRequest.__call__": true,
- "google.generativeai.protos.CreateChunkRequest.__eq__": true,
- "google.generativeai.protos.CreateChunkRequest.__ge__": true,
- "google.generativeai.protos.CreateChunkRequest.__gt__": true,
- "google.generativeai.protos.CreateChunkRequest.__init__": true,
- "google.generativeai.protos.CreateChunkRequest.__le__": true,
- "google.generativeai.protos.CreateChunkRequest.__lt__": true,
- "google.generativeai.protos.CreateChunkRequest.__ne__": true,
- "google.generativeai.protos.CreateChunkRequest.__new__": true,
- "google.generativeai.protos.CreateChunkRequest.__or__": true,
- "google.generativeai.protos.CreateChunkRequest.__ror__": true,
- "google.generativeai.protos.CreateChunkRequest.chunk": true,
- "google.generativeai.protos.CreateChunkRequest.copy_from": true,
- "google.generativeai.protos.CreateChunkRequest.deserialize": true,
- "google.generativeai.protos.CreateChunkRequest.from_json": true,
- "google.generativeai.protos.CreateChunkRequest.mro": true,
- "google.generativeai.protos.CreateChunkRequest.parent": true,
- "google.generativeai.protos.CreateChunkRequest.pb": true,
- "google.generativeai.protos.CreateChunkRequest.serialize": true,
- "google.generativeai.protos.CreateChunkRequest.to_dict": true,
- "google.generativeai.protos.CreateChunkRequest.to_json": true,
- "google.generativeai.protos.CreateChunkRequest.wrap": true,
- "google.generativeai.protos.CreateCorpusRequest": false,
- "google.generativeai.protos.CreateCorpusRequest.__call__": true,
- "google.generativeai.protos.CreateCorpusRequest.__eq__": true,
- "google.generativeai.protos.CreateCorpusRequest.__ge__": true,
- "google.generativeai.protos.CreateCorpusRequest.__gt__": true,
- "google.generativeai.protos.CreateCorpusRequest.__init__": true,
- "google.generativeai.protos.CreateCorpusRequest.__le__": true,
- "google.generativeai.protos.CreateCorpusRequest.__lt__": true,
- "google.generativeai.protos.CreateCorpusRequest.__ne__": true,
- "google.generativeai.protos.CreateCorpusRequest.__new__": true,
- "google.generativeai.protos.CreateCorpusRequest.__or__": true,
- "google.generativeai.protos.CreateCorpusRequest.__ror__": true,
- "google.generativeai.protos.CreateCorpusRequest.copy_from": true,
- "google.generativeai.protos.CreateCorpusRequest.corpus": true,
- "google.generativeai.protos.CreateCorpusRequest.deserialize": true,
- "google.generativeai.protos.CreateCorpusRequest.from_json": true,
- "google.generativeai.protos.CreateCorpusRequest.mro": true,
- "google.generativeai.protos.CreateCorpusRequest.pb": true,
- "google.generativeai.protos.CreateCorpusRequest.serialize": true,
- "google.generativeai.protos.CreateCorpusRequest.to_dict": true,
- "google.generativeai.protos.CreateCorpusRequest.to_json": true,
- "google.generativeai.protos.CreateCorpusRequest.wrap": true,
- "google.generativeai.protos.CreateDocumentRequest": false,
- "google.generativeai.protos.CreateDocumentRequest.__call__": true,
- "google.generativeai.protos.CreateDocumentRequest.__eq__": true,
- "google.generativeai.protos.CreateDocumentRequest.__ge__": true,
- "google.generativeai.protos.CreateDocumentRequest.__gt__": true,
- "google.generativeai.protos.CreateDocumentRequest.__init__": true,
- "google.generativeai.protos.CreateDocumentRequest.__le__": true,
- "google.generativeai.protos.CreateDocumentRequest.__lt__": true,
- "google.generativeai.protos.CreateDocumentRequest.__ne__": true,
- "google.generativeai.protos.CreateDocumentRequest.__new__": true,
- "google.generativeai.protos.CreateDocumentRequest.__or__": true,
- "google.generativeai.protos.CreateDocumentRequest.__ror__": true,
- "google.generativeai.protos.CreateDocumentRequest.copy_from": true,
- "google.generativeai.protos.CreateDocumentRequest.deserialize": true,
- "google.generativeai.protos.CreateDocumentRequest.document": true,
- "google.generativeai.protos.CreateDocumentRequest.from_json": true,
- "google.generativeai.protos.CreateDocumentRequest.mro": true,
- "google.generativeai.protos.CreateDocumentRequest.parent": true,
- "google.generativeai.protos.CreateDocumentRequest.pb": true,
- "google.generativeai.protos.CreateDocumentRequest.serialize": true,
- "google.generativeai.protos.CreateDocumentRequest.to_dict": true,
- "google.generativeai.protos.CreateDocumentRequest.to_json": true,
- "google.generativeai.protos.CreateDocumentRequest.wrap": true,
- "google.generativeai.protos.CreateFileRequest": false,
- "google.generativeai.protos.CreateFileRequest.__call__": true,
- "google.generativeai.protos.CreateFileRequest.__eq__": true,
- "google.generativeai.protos.CreateFileRequest.__ge__": true,
- "google.generativeai.protos.CreateFileRequest.__gt__": true,
- "google.generativeai.protos.CreateFileRequest.__init__": true,
- "google.generativeai.protos.CreateFileRequest.__le__": true,
- "google.generativeai.protos.CreateFileRequest.__lt__": true,
- "google.generativeai.protos.CreateFileRequest.__ne__": true,
- "google.generativeai.protos.CreateFileRequest.__new__": true,
- "google.generativeai.protos.CreateFileRequest.__or__": true,
- "google.generativeai.protos.CreateFileRequest.__ror__": true,
- "google.generativeai.protos.CreateFileRequest.copy_from": true,
- "google.generativeai.protos.CreateFileRequest.deserialize": true,
- "google.generativeai.protos.CreateFileRequest.file": true,
- "google.generativeai.protos.CreateFileRequest.from_json": true,
- "google.generativeai.protos.CreateFileRequest.mro": true,
- "google.generativeai.protos.CreateFileRequest.pb": true,
- "google.generativeai.protos.CreateFileRequest.serialize": true,
- "google.generativeai.protos.CreateFileRequest.to_dict": true,
- "google.generativeai.protos.CreateFileRequest.to_json": true,
- "google.generativeai.protos.CreateFileRequest.wrap": true,
- "google.generativeai.protos.CreateFileResponse": false,
- "google.generativeai.protos.CreateFileResponse.__call__": true,
- "google.generativeai.protos.CreateFileResponse.__eq__": true,
- "google.generativeai.protos.CreateFileResponse.__ge__": true,
- "google.generativeai.protos.CreateFileResponse.__gt__": true,
- "google.generativeai.protos.CreateFileResponse.__init__": true,
- "google.generativeai.protos.CreateFileResponse.__le__": true,
- "google.generativeai.protos.CreateFileResponse.__lt__": true,
- "google.generativeai.protos.CreateFileResponse.__ne__": true,
- "google.generativeai.protos.CreateFileResponse.__new__": true,
- "google.generativeai.protos.CreateFileResponse.__or__": true,
- "google.generativeai.protos.CreateFileResponse.__ror__": true,
- "google.generativeai.protos.CreateFileResponse.copy_from": true,
- "google.generativeai.protos.CreateFileResponse.deserialize": true,
- "google.generativeai.protos.CreateFileResponse.file": true,
- "google.generativeai.protos.CreateFileResponse.from_json": true,
- "google.generativeai.protos.CreateFileResponse.mro": true,
- "google.generativeai.protos.CreateFileResponse.pb": true,
- "google.generativeai.protos.CreateFileResponse.serialize": true,
- "google.generativeai.protos.CreateFileResponse.to_dict": true,
- "google.generativeai.protos.CreateFileResponse.to_json": true,
- "google.generativeai.protos.CreateFileResponse.wrap": true,
- "google.generativeai.protos.CreatePermissionRequest": false,
- "google.generativeai.protos.CreatePermissionRequest.__call__": true,
- "google.generativeai.protos.CreatePermissionRequest.__eq__": true,
- "google.generativeai.protos.CreatePermissionRequest.__ge__": true,
- "google.generativeai.protos.CreatePermissionRequest.__gt__": true,
- "google.generativeai.protos.CreatePermissionRequest.__init__": true,
- "google.generativeai.protos.CreatePermissionRequest.__le__": true,
- "google.generativeai.protos.CreatePermissionRequest.__lt__": true,
- "google.generativeai.protos.CreatePermissionRequest.__ne__": true,
- "google.generativeai.protos.CreatePermissionRequest.__new__": true,
- "google.generativeai.protos.CreatePermissionRequest.__or__": true,
- "google.generativeai.protos.CreatePermissionRequest.__ror__": true,
- "google.generativeai.protos.CreatePermissionRequest.copy_from": true,
- "google.generativeai.protos.CreatePermissionRequest.deserialize": true,
- "google.generativeai.protos.CreatePermissionRequest.from_json": true,
- "google.generativeai.protos.CreatePermissionRequest.mro": true,
- "google.generativeai.protos.CreatePermissionRequest.parent": true,
- "google.generativeai.protos.CreatePermissionRequest.pb": true,
- "google.generativeai.protos.CreatePermissionRequest.permission": true,
- "google.generativeai.protos.CreatePermissionRequest.serialize": true,
- "google.generativeai.protos.CreatePermissionRequest.to_dict": true,
- "google.generativeai.protos.CreatePermissionRequest.to_json": true,
- "google.generativeai.protos.CreatePermissionRequest.wrap": true,
- "google.generativeai.protos.CreateTunedModelMetadata": false,
- "google.generativeai.protos.CreateTunedModelMetadata.__call__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__eq__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__ge__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__gt__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__init__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__le__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__lt__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__ne__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__new__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__or__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.__ror__": true,
- "google.generativeai.protos.CreateTunedModelMetadata.completed_percent": true,
- "google.generativeai.protos.CreateTunedModelMetadata.completed_steps": true,
- "google.generativeai.protos.CreateTunedModelMetadata.copy_from": true,
- "google.generativeai.protos.CreateTunedModelMetadata.deserialize": true,
- "google.generativeai.protos.CreateTunedModelMetadata.from_json": true,
- "google.generativeai.protos.CreateTunedModelMetadata.mro": true,
- "google.generativeai.protos.CreateTunedModelMetadata.pb": true,
- "google.generativeai.protos.CreateTunedModelMetadata.serialize": true,
- "google.generativeai.protos.CreateTunedModelMetadata.snapshots": true,
- "google.generativeai.protos.CreateTunedModelMetadata.to_dict": true,
- "google.generativeai.protos.CreateTunedModelMetadata.to_json": true,
- "google.generativeai.protos.CreateTunedModelMetadata.total_steps": true,
- "google.generativeai.protos.CreateTunedModelMetadata.tuned_model": true,
- "google.generativeai.protos.CreateTunedModelMetadata.wrap": true,
- "google.generativeai.protos.CreateTunedModelRequest": false,
- "google.generativeai.protos.CreateTunedModelRequest.__call__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__eq__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__ge__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__gt__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__init__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__le__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__lt__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__ne__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__new__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__or__": true,
- "google.generativeai.protos.CreateTunedModelRequest.__ror__": true,
- "google.generativeai.protos.CreateTunedModelRequest.copy_from": true,
- "google.generativeai.protos.CreateTunedModelRequest.deserialize": true,
- "google.generativeai.protos.CreateTunedModelRequest.from_json": true,
- "google.generativeai.protos.CreateTunedModelRequest.mro": true,
- "google.generativeai.protos.CreateTunedModelRequest.pb": true,
- "google.generativeai.protos.CreateTunedModelRequest.serialize": true,
- "google.generativeai.protos.CreateTunedModelRequest.to_dict": true,
- "google.generativeai.protos.CreateTunedModelRequest.to_json": true,
- "google.generativeai.protos.CreateTunedModelRequest.tuned_model": true,
- "google.generativeai.protos.CreateTunedModelRequest.tuned_model_id": true,
- "google.generativeai.protos.CreateTunedModelRequest.wrap": true,
- "google.generativeai.protos.CustomMetadata": false,
- "google.generativeai.protos.CustomMetadata.__call__": true,
- "google.generativeai.protos.CustomMetadata.__eq__": true,
- "google.generativeai.protos.CustomMetadata.__ge__": true,
- "google.generativeai.protos.CustomMetadata.__gt__": true,
- "google.generativeai.protos.CustomMetadata.__init__": true,
- "google.generativeai.protos.CustomMetadata.__le__": true,
- "google.generativeai.protos.CustomMetadata.__lt__": true,
- "google.generativeai.protos.CustomMetadata.__ne__": true,
- "google.generativeai.protos.CustomMetadata.__new__": true,
- "google.generativeai.protos.CustomMetadata.__or__": true,
- "google.generativeai.protos.CustomMetadata.__ror__": true,
- "google.generativeai.protos.CustomMetadata.copy_from": true,
- "google.generativeai.protos.CustomMetadata.deserialize": true,
- "google.generativeai.protos.CustomMetadata.from_json": true,
- "google.generativeai.protos.CustomMetadata.key": true,
- "google.generativeai.protos.CustomMetadata.mro": true,
- "google.generativeai.protos.CustomMetadata.numeric_value": true,
- "google.generativeai.protos.CustomMetadata.pb": true,
- "google.generativeai.protos.CustomMetadata.serialize": true,
- "google.generativeai.protos.CustomMetadata.string_list_value": true,
- "google.generativeai.protos.CustomMetadata.string_value": true,
- "google.generativeai.protos.CustomMetadata.to_dict": true,
- "google.generativeai.protos.CustomMetadata.to_json": true,
- "google.generativeai.protos.CustomMetadata.wrap": true,
- "google.generativeai.protos.Dataset": false,
- "google.generativeai.protos.Dataset.__call__": true,
- "google.generativeai.protos.Dataset.__eq__": true,
- "google.generativeai.protos.Dataset.__ge__": true,
- "google.generativeai.protos.Dataset.__gt__": true,
- "google.generativeai.protos.Dataset.__init__": true,
- "google.generativeai.protos.Dataset.__le__": true,
- "google.generativeai.protos.Dataset.__lt__": true,
- "google.generativeai.protos.Dataset.__ne__": true,
- "google.generativeai.protos.Dataset.__new__": true,
- "google.generativeai.protos.Dataset.__or__": true,
- "google.generativeai.protos.Dataset.__ror__": true,
- "google.generativeai.protos.Dataset.copy_from": true,
- "google.generativeai.protos.Dataset.deserialize": true,
- "google.generativeai.protos.Dataset.examples": true,
- "google.generativeai.protos.Dataset.from_json": true,
- "google.generativeai.protos.Dataset.mro": true,
- "google.generativeai.protos.Dataset.pb": true,
- "google.generativeai.protos.Dataset.serialize": true,
- "google.generativeai.protos.Dataset.to_dict": true,
- "google.generativeai.protos.Dataset.to_json": true,
- "google.generativeai.protos.Dataset.wrap": true,
- "google.generativeai.protos.DeleteCachedContentRequest": false,
- "google.generativeai.protos.DeleteCachedContentRequest.__call__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__eq__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__ge__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__gt__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__init__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__le__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__lt__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__ne__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__new__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__or__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.__ror__": true,
- "google.generativeai.protos.DeleteCachedContentRequest.copy_from": true,
- "google.generativeai.protos.DeleteCachedContentRequest.deserialize": true,
- "google.generativeai.protos.DeleteCachedContentRequest.from_json": true,
- "google.generativeai.protos.DeleteCachedContentRequest.mro": true,
- "google.generativeai.protos.DeleteCachedContentRequest.name": true,
- "google.generativeai.protos.DeleteCachedContentRequest.pb": true,
- "google.generativeai.protos.DeleteCachedContentRequest.serialize": true,
- "google.generativeai.protos.DeleteCachedContentRequest.to_dict": true,
- "google.generativeai.protos.DeleteCachedContentRequest.to_json": true,
- "google.generativeai.protos.DeleteCachedContentRequest.wrap": true,
- "google.generativeai.protos.DeleteChunkRequest": false,
- "google.generativeai.protos.DeleteChunkRequest.__call__": true,
- "google.generativeai.protos.DeleteChunkRequest.__eq__": true,
- "google.generativeai.protos.DeleteChunkRequest.__ge__": true,
- "google.generativeai.protos.DeleteChunkRequest.__gt__": true,
- "google.generativeai.protos.DeleteChunkRequest.__init__": true,
- "google.generativeai.protos.DeleteChunkRequest.__le__": true,
- "google.generativeai.protos.DeleteChunkRequest.__lt__": true,
- "google.generativeai.protos.DeleteChunkRequest.__ne__": true,
- "google.generativeai.protos.DeleteChunkRequest.__new__": true,
- "google.generativeai.protos.DeleteChunkRequest.__or__": true,
- "google.generativeai.protos.DeleteChunkRequest.__ror__": true,
- "google.generativeai.protos.DeleteChunkRequest.copy_from": true,
- "google.generativeai.protos.DeleteChunkRequest.deserialize": true,
- "google.generativeai.protos.DeleteChunkRequest.from_json": true,
- "google.generativeai.protos.DeleteChunkRequest.mro": true,
- "google.generativeai.protos.DeleteChunkRequest.name": true,
- "google.generativeai.protos.DeleteChunkRequest.pb": true,
- "google.generativeai.protos.DeleteChunkRequest.serialize": true,
- "google.generativeai.protos.DeleteChunkRequest.to_dict": true,
- "google.generativeai.protos.DeleteChunkRequest.to_json": true,
- "google.generativeai.protos.DeleteChunkRequest.wrap": true,
- "google.generativeai.protos.DeleteCorpusRequest": false,
- "google.generativeai.protos.DeleteCorpusRequest.__call__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__eq__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__ge__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__gt__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__init__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__le__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__lt__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__ne__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__new__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__or__": true,
- "google.generativeai.protos.DeleteCorpusRequest.__ror__": true,
- "google.generativeai.protos.DeleteCorpusRequest.copy_from": true,
- "google.generativeai.protos.DeleteCorpusRequest.deserialize": true,
- "google.generativeai.protos.DeleteCorpusRequest.force": true,
- "google.generativeai.protos.DeleteCorpusRequest.from_json": true,
- "google.generativeai.protos.DeleteCorpusRequest.mro": true,
- "google.generativeai.protos.DeleteCorpusRequest.name": true,
- "google.generativeai.protos.DeleteCorpusRequest.pb": true,
- "google.generativeai.protos.DeleteCorpusRequest.serialize": true,
- "google.generativeai.protos.DeleteCorpusRequest.to_dict": true,
- "google.generativeai.protos.DeleteCorpusRequest.to_json": true,
- "google.generativeai.protos.DeleteCorpusRequest.wrap": true,
- "google.generativeai.protos.DeleteDocumentRequest": false,
- "google.generativeai.protos.DeleteDocumentRequest.__call__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__eq__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__ge__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__gt__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__init__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__le__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__lt__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__ne__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__new__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__or__": true,
- "google.generativeai.protos.DeleteDocumentRequest.__ror__": true,
- "google.generativeai.protos.DeleteDocumentRequest.copy_from": true,
- "google.generativeai.protos.DeleteDocumentRequest.deserialize": true,
- "google.generativeai.protos.DeleteDocumentRequest.force": true,
- "google.generativeai.protos.DeleteDocumentRequest.from_json": true,
- "google.generativeai.protos.DeleteDocumentRequest.mro": true,
- "google.generativeai.protos.DeleteDocumentRequest.name": true,
- "google.generativeai.protos.DeleteDocumentRequest.pb": true,
- "google.generativeai.protos.DeleteDocumentRequest.serialize": true,
- "google.generativeai.protos.DeleteDocumentRequest.to_dict": true,
- "google.generativeai.protos.DeleteDocumentRequest.to_json": true,
- "google.generativeai.protos.DeleteDocumentRequest.wrap": true,
- "google.generativeai.protos.DeleteFileRequest": false,
- "google.generativeai.protos.DeleteFileRequest.__call__": true,
- "google.generativeai.protos.DeleteFileRequest.__eq__": true,
- "google.generativeai.protos.DeleteFileRequest.__ge__": true,
- "google.generativeai.protos.DeleteFileRequest.__gt__": true,
- "google.generativeai.protos.DeleteFileRequest.__init__": true,
- "google.generativeai.protos.DeleteFileRequest.__le__": true,
- "google.generativeai.protos.DeleteFileRequest.__lt__": true,
- "google.generativeai.protos.DeleteFileRequest.__ne__": true,
- "google.generativeai.protos.DeleteFileRequest.__new__": true,
- "google.generativeai.protos.DeleteFileRequest.__or__": true,
- "google.generativeai.protos.DeleteFileRequest.__ror__": true,
- "google.generativeai.protos.DeleteFileRequest.copy_from": true,
- "google.generativeai.protos.DeleteFileRequest.deserialize": true,
- "google.generativeai.protos.DeleteFileRequest.from_json": true,
- "google.generativeai.protos.DeleteFileRequest.mro": true,
- "google.generativeai.protos.DeleteFileRequest.name": true,
- "google.generativeai.protos.DeleteFileRequest.pb": true,
- "google.generativeai.protos.DeleteFileRequest.serialize": true,
- "google.generativeai.protos.DeleteFileRequest.to_dict": true,
- "google.generativeai.protos.DeleteFileRequest.to_json": true,
- "google.generativeai.protos.DeleteFileRequest.wrap": true,
- "google.generativeai.protos.DeletePermissionRequest": false,
- "google.generativeai.protos.DeletePermissionRequest.__call__": true,
- "google.generativeai.protos.DeletePermissionRequest.__eq__": true,
- "google.generativeai.protos.DeletePermissionRequest.__ge__": true,
- "google.generativeai.protos.DeletePermissionRequest.__gt__": true,
- "google.generativeai.protos.DeletePermissionRequest.__init__": true,
- "google.generativeai.protos.DeletePermissionRequest.__le__": true,
- "google.generativeai.protos.DeletePermissionRequest.__lt__": true,
- "google.generativeai.protos.DeletePermissionRequest.__ne__": true,
- "google.generativeai.protos.DeletePermissionRequest.__new__": true,
- "google.generativeai.protos.DeletePermissionRequest.__or__": true,
- "google.generativeai.protos.DeletePermissionRequest.__ror__": true,
- "google.generativeai.protos.DeletePermissionRequest.copy_from": true,
- "google.generativeai.protos.DeletePermissionRequest.deserialize": true,
- "google.generativeai.protos.DeletePermissionRequest.from_json": true,
- "google.generativeai.protos.DeletePermissionRequest.mro": true,
- "google.generativeai.protos.DeletePermissionRequest.name": true,
- "google.generativeai.protos.DeletePermissionRequest.pb": true,
- "google.generativeai.protos.DeletePermissionRequest.serialize": true,
- "google.generativeai.protos.DeletePermissionRequest.to_dict": true,
- "google.generativeai.protos.DeletePermissionRequest.to_json": true,
- "google.generativeai.protos.DeletePermissionRequest.wrap": true,
- "google.generativeai.protos.DeleteTunedModelRequest": false,
- "google.generativeai.protos.DeleteTunedModelRequest.__call__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__eq__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__ge__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__gt__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__init__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__le__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__lt__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__ne__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__new__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__or__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.__ror__": true,
- "google.generativeai.protos.DeleteTunedModelRequest.copy_from": true,
- "google.generativeai.protos.DeleteTunedModelRequest.deserialize": true,
- "google.generativeai.protos.DeleteTunedModelRequest.from_json": true,
- "google.generativeai.protos.DeleteTunedModelRequest.mro": true,
- "google.generativeai.protos.DeleteTunedModelRequest.name": true,
- "google.generativeai.protos.DeleteTunedModelRequest.pb": true,
- "google.generativeai.protos.DeleteTunedModelRequest.serialize": true,
- "google.generativeai.protos.DeleteTunedModelRequest.to_dict": true,
- "google.generativeai.protos.DeleteTunedModelRequest.to_json": true,
- "google.generativeai.protos.DeleteTunedModelRequest.wrap": true,
- "google.generativeai.protos.Document": false,
- "google.generativeai.protos.Document.__call__": true,
- "google.generativeai.protos.Document.__eq__": true,
- "google.generativeai.protos.Document.__ge__": true,
- "google.generativeai.protos.Document.__gt__": true,
- "google.generativeai.protos.Document.__init__": true,
- "google.generativeai.protos.Document.__le__": true,
- "google.generativeai.protos.Document.__lt__": true,
- "google.generativeai.protos.Document.__ne__": true,
- "google.generativeai.protos.Document.__new__": true,
- "google.generativeai.protos.Document.__or__": true,
- "google.generativeai.protos.Document.__ror__": true,
- "google.generativeai.protos.Document.copy_from": true,
- "google.generativeai.protos.Document.create_time": true,
- "google.generativeai.protos.Document.custom_metadata": true,
- "google.generativeai.protos.Document.deserialize": true,
- "google.generativeai.protos.Document.display_name": true,
- "google.generativeai.protos.Document.from_json": true,
- "google.generativeai.protos.Document.mro": true,
- "google.generativeai.protos.Document.name": true,
- "google.generativeai.protos.Document.pb": true,
- "google.generativeai.protos.Document.serialize": true,
- "google.generativeai.protos.Document.to_dict": true,
- "google.generativeai.protos.Document.to_json": true,
- "google.generativeai.protos.Document.update_time": true,
- "google.generativeai.protos.Document.wrap": true,
- "google.generativeai.protos.DynamicRetrievalConfig": false,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode": false,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.MODE_DYNAMIC": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.MODE_UNSPECIFIED": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__abs__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__add__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__and__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__bool__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__contains__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__eq__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__floordiv__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ge__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__getitem__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__gt__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__init__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__invert__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__iter__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__le__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__len__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lshift__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lt__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mod__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mul__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ne__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__neg__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__new__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__or__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pos__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pow__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__radd__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rand__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rfloordiv__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rlshift__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmod__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmul__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ror__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rpow__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rrshift__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rshift__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rsub__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rtruediv__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rxor__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__sub__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__truediv__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__xor__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.as_integer_ratio": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_count": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_length": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.conjugate": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.denominator": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.from_bytes": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.imag": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.is_integer": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.numerator": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.real": true,
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.to_bytes": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__call__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__eq__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__ge__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__gt__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__init__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__le__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__lt__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__ne__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__new__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__or__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.__ror__": true,
- "google.generativeai.protos.DynamicRetrievalConfig.copy_from": true,
- "google.generativeai.protos.DynamicRetrievalConfig.deserialize": true,
- "google.generativeai.protos.DynamicRetrievalConfig.dynamic_threshold": true,
- "google.generativeai.protos.DynamicRetrievalConfig.from_json": true,
- "google.generativeai.protos.DynamicRetrievalConfig.mode": true,
- "google.generativeai.protos.DynamicRetrievalConfig.mro": true,
- "google.generativeai.protos.DynamicRetrievalConfig.pb": true,
- "google.generativeai.protos.DynamicRetrievalConfig.serialize": true,
- "google.generativeai.protos.DynamicRetrievalConfig.to_dict": true,
- "google.generativeai.protos.DynamicRetrievalConfig.to_json": true,
- "google.generativeai.protos.DynamicRetrievalConfig.wrap": true,
- "google.generativeai.protos.EmbedContentRequest": false,
- "google.generativeai.protos.EmbedContentRequest.__call__": true,
- "google.generativeai.protos.EmbedContentRequest.__eq__": true,
- "google.generativeai.protos.EmbedContentRequest.__ge__": true,
- "google.generativeai.protos.EmbedContentRequest.__gt__": true,
- "google.generativeai.protos.EmbedContentRequest.__init__": true,
- "google.generativeai.protos.EmbedContentRequest.__le__": true,
- "google.generativeai.protos.EmbedContentRequest.__lt__": true,
- "google.generativeai.protos.EmbedContentRequest.__ne__": true,
- "google.generativeai.protos.EmbedContentRequest.__new__": true,
- "google.generativeai.protos.EmbedContentRequest.__or__": true,
- "google.generativeai.protos.EmbedContentRequest.__ror__": true,
- "google.generativeai.protos.EmbedContentRequest.content": true,
- "google.generativeai.protos.EmbedContentRequest.copy_from": true,
- "google.generativeai.protos.EmbedContentRequest.deserialize": true,
- "google.generativeai.protos.EmbedContentRequest.from_json": true,
- "google.generativeai.protos.EmbedContentRequest.model": true,
- "google.generativeai.protos.EmbedContentRequest.mro": true,
- "google.generativeai.protos.EmbedContentRequest.output_dimensionality": true,
- "google.generativeai.protos.EmbedContentRequest.pb": true,
- "google.generativeai.protos.EmbedContentRequest.serialize": true,
- "google.generativeai.protos.EmbedContentRequest.task_type": true,
- "google.generativeai.protos.EmbedContentRequest.title": true,
- "google.generativeai.protos.EmbedContentRequest.to_dict": true,
- "google.generativeai.protos.EmbedContentRequest.to_json": true,
- "google.generativeai.protos.EmbedContentRequest.wrap": true,
- "google.generativeai.protos.EmbedContentResponse": false,
- "google.generativeai.protos.EmbedContentResponse.__call__": true,
- "google.generativeai.protos.EmbedContentResponse.__eq__": true,
- "google.generativeai.protos.EmbedContentResponse.__ge__": true,
- "google.generativeai.protos.EmbedContentResponse.__gt__": true,
- "google.generativeai.protos.EmbedContentResponse.__init__": true,
- "google.generativeai.protos.EmbedContentResponse.__le__": true,
- "google.generativeai.protos.EmbedContentResponse.__lt__": true,
- "google.generativeai.protos.EmbedContentResponse.__ne__": true,
- "google.generativeai.protos.EmbedContentResponse.__new__": true,
- "google.generativeai.protos.EmbedContentResponse.__or__": true,
- "google.generativeai.protos.EmbedContentResponse.__ror__": true,
- "google.generativeai.protos.EmbedContentResponse.copy_from": true,
- "google.generativeai.protos.EmbedContentResponse.deserialize": true,
- "google.generativeai.protos.EmbedContentResponse.embedding": true,
- "google.generativeai.protos.EmbedContentResponse.from_json": true,
- "google.generativeai.protos.EmbedContentResponse.mro": true,
- "google.generativeai.protos.EmbedContentResponse.pb": true,
- "google.generativeai.protos.EmbedContentResponse.serialize": true,
- "google.generativeai.protos.EmbedContentResponse.to_dict": true,
- "google.generativeai.protos.EmbedContentResponse.to_json": true,
- "google.generativeai.protos.EmbedContentResponse.wrap": true,
- "google.generativeai.protos.EmbedTextRequest": false,
- "google.generativeai.protos.EmbedTextRequest.__call__": true,
- "google.generativeai.protos.EmbedTextRequest.__eq__": true,
- "google.generativeai.protos.EmbedTextRequest.__ge__": true,
- "google.generativeai.protos.EmbedTextRequest.__gt__": true,
- "google.generativeai.protos.EmbedTextRequest.__init__": true,
- "google.generativeai.protos.EmbedTextRequest.__le__": true,
- "google.generativeai.protos.EmbedTextRequest.__lt__": true,
- "google.generativeai.protos.EmbedTextRequest.__ne__": true,
- "google.generativeai.protos.EmbedTextRequest.__new__": true,
- "google.generativeai.protos.EmbedTextRequest.__or__": true,
- "google.generativeai.protos.EmbedTextRequest.__ror__": true,
- "google.generativeai.protos.EmbedTextRequest.copy_from": true,
- "google.generativeai.protos.EmbedTextRequest.deserialize": true,
- "google.generativeai.protos.EmbedTextRequest.from_json": true,
- "google.generativeai.protos.EmbedTextRequest.model": true,
- "google.generativeai.protos.EmbedTextRequest.mro": true,
- "google.generativeai.protos.EmbedTextRequest.pb": true,
- "google.generativeai.protos.EmbedTextRequest.serialize": true,
- "google.generativeai.protos.EmbedTextRequest.text": true,
- "google.generativeai.protos.EmbedTextRequest.to_dict": true,
- "google.generativeai.protos.EmbedTextRequest.to_json": true,
- "google.generativeai.protos.EmbedTextRequest.wrap": true,
- "google.generativeai.protos.EmbedTextResponse": false,
- "google.generativeai.protos.EmbedTextResponse.__call__": true,
- "google.generativeai.protos.EmbedTextResponse.__eq__": true,
- "google.generativeai.protos.EmbedTextResponse.__ge__": true,
- "google.generativeai.protos.EmbedTextResponse.__gt__": true,
- "google.generativeai.protos.EmbedTextResponse.__init__": true,
- "google.generativeai.protos.EmbedTextResponse.__le__": true,
- "google.generativeai.protos.EmbedTextResponse.__lt__": true,
- "google.generativeai.protos.EmbedTextResponse.__ne__": true,
- "google.generativeai.protos.EmbedTextResponse.__new__": true,
- "google.generativeai.protos.EmbedTextResponse.__or__": true,
- "google.generativeai.protos.EmbedTextResponse.__ror__": true,
- "google.generativeai.protos.EmbedTextResponse.copy_from": true,
- "google.generativeai.protos.EmbedTextResponse.deserialize": true,
- "google.generativeai.protos.EmbedTextResponse.embedding": true,
- "google.generativeai.protos.EmbedTextResponse.from_json": true,
- "google.generativeai.protos.EmbedTextResponse.mro": true,
- "google.generativeai.protos.EmbedTextResponse.pb": true,
- "google.generativeai.protos.EmbedTextResponse.serialize": true,
- "google.generativeai.protos.EmbedTextResponse.to_dict": true,
- "google.generativeai.protos.EmbedTextResponse.to_json": true,
- "google.generativeai.protos.EmbedTextResponse.wrap": true,
- "google.generativeai.protos.Embedding": false,
- "google.generativeai.protos.Embedding.__call__": true,
- "google.generativeai.protos.Embedding.__eq__": true,
- "google.generativeai.protos.Embedding.__ge__": true,
- "google.generativeai.protos.Embedding.__gt__": true,
- "google.generativeai.protos.Embedding.__init__": true,
- "google.generativeai.protos.Embedding.__le__": true,
- "google.generativeai.protos.Embedding.__lt__": true,
- "google.generativeai.protos.Embedding.__ne__": true,
- "google.generativeai.protos.Embedding.__new__": true,
- "google.generativeai.protos.Embedding.__or__": true,
- "google.generativeai.protos.Embedding.__ror__": true,
- "google.generativeai.protos.Embedding.copy_from": true,
- "google.generativeai.protos.Embedding.deserialize": true,
- "google.generativeai.protos.Embedding.from_json": true,
- "google.generativeai.protos.Embedding.mro": true,
- "google.generativeai.protos.Embedding.pb": true,
- "google.generativeai.protos.Embedding.serialize": true,
- "google.generativeai.protos.Embedding.to_dict": true,
- "google.generativeai.protos.Embedding.to_json": true,
- "google.generativeai.protos.Embedding.value": true,
- "google.generativeai.protos.Embedding.wrap": true,
- "google.generativeai.protos.Example": false,
- "google.generativeai.protos.Example.__call__": true,
- "google.generativeai.protos.Example.__eq__": true,
- "google.generativeai.protos.Example.__ge__": true,
- "google.generativeai.protos.Example.__gt__": true,
- "google.generativeai.protos.Example.__init__": true,
- "google.generativeai.protos.Example.__le__": true,
- "google.generativeai.protos.Example.__lt__": true,
- "google.generativeai.protos.Example.__ne__": true,
- "google.generativeai.protos.Example.__new__": true,
- "google.generativeai.protos.Example.__or__": true,
- "google.generativeai.protos.Example.__ror__": true,
- "google.generativeai.protos.Example.copy_from": true,
- "google.generativeai.protos.Example.deserialize": true,
- "google.generativeai.protos.Example.from_json": true,
- "google.generativeai.protos.Example.input": true,
- "google.generativeai.protos.Example.mro": true,
- "google.generativeai.protos.Example.output": true,
- "google.generativeai.protos.Example.pb": true,
- "google.generativeai.protos.Example.serialize": true,
- "google.generativeai.protos.Example.to_dict": true,
- "google.generativeai.protos.Example.to_json": true,
- "google.generativeai.protos.Example.wrap": true,
- "google.generativeai.protos.ExecutableCode": false,
- "google.generativeai.protos.ExecutableCode.Language": false,
- "google.generativeai.protos.ExecutableCode.Language.LANGUAGE_UNSPECIFIED": true,
- "google.generativeai.protos.ExecutableCode.Language.PYTHON": true,
- "google.generativeai.protos.ExecutableCode.Language.__abs__": true,
- "google.generativeai.protos.ExecutableCode.Language.__add__": true,
- "google.generativeai.protos.ExecutableCode.Language.__and__": true,
- "google.generativeai.protos.ExecutableCode.Language.__bool__": true,
- "google.generativeai.protos.ExecutableCode.Language.__contains__": true,
- "google.generativeai.protos.ExecutableCode.Language.__eq__": true,
- "google.generativeai.protos.ExecutableCode.Language.__floordiv__": true,
- "google.generativeai.protos.ExecutableCode.Language.__ge__": true,
- "google.generativeai.protos.ExecutableCode.Language.__getitem__": true,
- "google.generativeai.protos.ExecutableCode.Language.__gt__": true,
- "google.generativeai.protos.ExecutableCode.Language.__init__": true,
- "google.generativeai.protos.ExecutableCode.Language.__invert__": true,
- "google.generativeai.protos.ExecutableCode.Language.__iter__": true,
- "google.generativeai.protos.ExecutableCode.Language.__le__": true,
- "google.generativeai.protos.ExecutableCode.Language.__len__": true,
- "google.generativeai.protos.ExecutableCode.Language.__lshift__": true,
- "google.generativeai.protos.ExecutableCode.Language.__lt__": true,
- "google.generativeai.protos.ExecutableCode.Language.__mod__": true,
- "google.generativeai.protos.ExecutableCode.Language.__mul__": true,
- "google.generativeai.protos.ExecutableCode.Language.__ne__": true,
- "google.generativeai.protos.ExecutableCode.Language.__neg__": true,
- "google.generativeai.protos.ExecutableCode.Language.__new__": true,
- "google.generativeai.protos.ExecutableCode.Language.__or__": true,
- "google.generativeai.protos.ExecutableCode.Language.__pos__": true,
- "google.generativeai.protos.ExecutableCode.Language.__pow__": true,
- "google.generativeai.protos.ExecutableCode.Language.__radd__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rand__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rlshift__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rmod__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rmul__": true,
- "google.generativeai.protos.ExecutableCode.Language.__ror__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rpow__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rrshift__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rshift__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rsub__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": true,
- "google.generativeai.protos.ExecutableCode.Language.__rxor__": true,
- "google.generativeai.protos.ExecutableCode.Language.__sub__": true,
- "google.generativeai.protos.ExecutableCode.Language.__truediv__": true,
- "google.generativeai.protos.ExecutableCode.Language.__xor__": true,
- "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": true,
- "google.generativeai.protos.ExecutableCode.Language.bit_count": true,
- "google.generativeai.protos.ExecutableCode.Language.bit_length": true,
- "google.generativeai.protos.ExecutableCode.Language.conjugate": true,
- "google.generativeai.protos.ExecutableCode.Language.denominator": true,
- "google.generativeai.protos.ExecutableCode.Language.from_bytes": true,
- "google.generativeai.protos.ExecutableCode.Language.imag": true,
- "google.generativeai.protos.ExecutableCode.Language.is_integer": true,
- "google.generativeai.protos.ExecutableCode.Language.numerator": true,
- "google.generativeai.protos.ExecutableCode.Language.real": true,
- "google.generativeai.protos.ExecutableCode.Language.to_bytes": true,
- "google.generativeai.protos.ExecutableCode.__call__": true,
- "google.generativeai.protos.ExecutableCode.__eq__": true,
- "google.generativeai.protos.ExecutableCode.__ge__": true,
- "google.generativeai.protos.ExecutableCode.__gt__": true,
- "google.generativeai.protos.ExecutableCode.__init__": true,
- "google.generativeai.protos.ExecutableCode.__le__": true,
- "google.generativeai.protos.ExecutableCode.__lt__": true,
- "google.generativeai.protos.ExecutableCode.__ne__": true,
- "google.generativeai.protos.ExecutableCode.__new__": true,
- "google.generativeai.protos.ExecutableCode.__or__": true,
- "google.generativeai.protos.ExecutableCode.__ror__": true,
- "google.generativeai.protos.ExecutableCode.code": true,
- "google.generativeai.protos.ExecutableCode.copy_from": true,
- "google.generativeai.protos.ExecutableCode.deserialize": true,
- "google.generativeai.protos.ExecutableCode.from_json": true,
- "google.generativeai.protos.ExecutableCode.language": true,
- "google.generativeai.protos.ExecutableCode.mro": true,
- "google.generativeai.protos.ExecutableCode.pb": true,
- "google.generativeai.protos.ExecutableCode.serialize": true,
- "google.generativeai.protos.ExecutableCode.to_dict": true,
- "google.generativeai.protos.ExecutableCode.to_json": true,
- "google.generativeai.protos.ExecutableCode.wrap": true,
- "google.generativeai.protos.File": false,
- "google.generativeai.protos.File.State": false,
- "google.generativeai.protos.File.State.ACTIVE": true,
- "google.generativeai.protos.File.State.FAILED": true,
- "google.generativeai.protos.File.State.PROCESSING": true,
- "google.generativeai.protos.File.State.STATE_UNSPECIFIED": true,
- "google.generativeai.protos.File.State.__abs__": true,
- "google.generativeai.protos.File.State.__add__": true,
- "google.generativeai.protos.File.State.__and__": true,
- "google.generativeai.protos.File.State.__bool__": true,
- "google.generativeai.protos.File.State.__contains__": true,
- "google.generativeai.protos.File.State.__eq__": true,
- "google.generativeai.protos.File.State.__floordiv__": true,
- "google.generativeai.protos.File.State.__ge__": true,
- "google.generativeai.protos.File.State.__getitem__": true,
- "google.generativeai.protos.File.State.__gt__": true,
- "google.generativeai.protos.File.State.__init__": true,
- "google.generativeai.protos.File.State.__invert__": true,
- "google.generativeai.protos.File.State.__iter__": true,
- "google.generativeai.protos.File.State.__le__": true,
- "google.generativeai.protos.File.State.__len__": true,
- "google.generativeai.protos.File.State.__lshift__": true,
- "google.generativeai.protos.File.State.__lt__": true,
- "google.generativeai.protos.File.State.__mod__": true,
- "google.generativeai.protos.File.State.__mul__": true,
- "google.generativeai.protos.File.State.__ne__": true,
- "google.generativeai.protos.File.State.__neg__": true,
- "google.generativeai.protos.File.State.__new__": true,
- "google.generativeai.protos.File.State.__or__": true,
- "google.generativeai.protos.File.State.__pos__": true,
- "google.generativeai.protos.File.State.__pow__": true,
- "google.generativeai.protos.File.State.__radd__": true,
- "google.generativeai.protos.File.State.__rand__": true,
- "google.generativeai.protos.File.State.__rfloordiv__": true,
- "google.generativeai.protos.File.State.__rlshift__": true,
- "google.generativeai.protos.File.State.__rmod__": true,
- "google.generativeai.protos.File.State.__rmul__": true,
- "google.generativeai.protos.File.State.__ror__": true,
- "google.generativeai.protos.File.State.__rpow__": true,
- "google.generativeai.protos.File.State.__rrshift__": true,
- "google.generativeai.protos.File.State.__rshift__": true,
- "google.generativeai.protos.File.State.__rsub__": true,
- "google.generativeai.protos.File.State.__rtruediv__": true,
- "google.generativeai.protos.File.State.__rxor__": true,
- "google.generativeai.protos.File.State.__sub__": true,
- "google.generativeai.protos.File.State.__truediv__": true,
- "google.generativeai.protos.File.State.__xor__": true,
- "google.generativeai.protos.File.State.as_integer_ratio": true,
- "google.generativeai.protos.File.State.bit_count": true,
- "google.generativeai.protos.File.State.bit_length": true,
- "google.generativeai.protos.File.State.conjugate": true,
- "google.generativeai.protos.File.State.denominator": true,
- "google.generativeai.protos.File.State.from_bytes": true,
- "google.generativeai.protos.File.State.imag": true,
- "google.generativeai.protos.File.State.is_integer": true,
- "google.generativeai.protos.File.State.numerator": true,
- "google.generativeai.protos.File.State.real": true,
- "google.generativeai.protos.File.State.to_bytes": true,
- "google.generativeai.protos.File.__call__": true,
- "google.generativeai.protos.File.__eq__": true,
- "google.generativeai.protos.File.__ge__": true,
- "google.generativeai.protos.File.__gt__": true,
- "google.generativeai.protos.File.__init__": true,
- "google.generativeai.protos.File.__le__": true,
- "google.generativeai.protos.File.__lt__": true,
- "google.generativeai.protos.File.__ne__": true,
- "google.generativeai.protos.File.__new__": true,
- "google.generativeai.protos.File.__or__": true,
- "google.generativeai.protos.File.__ror__": true,
- "google.generativeai.protos.File.copy_from": true,
- "google.generativeai.protos.File.create_time": true,
- "google.generativeai.protos.File.deserialize": true,
- "google.generativeai.protos.File.display_name": true,
- "google.generativeai.protos.File.error": true,
- "google.generativeai.protos.File.expiration_time": true,
- "google.generativeai.protos.File.from_json": true,
- "google.generativeai.protos.File.mime_type": true,
- "google.generativeai.protos.File.mro": true,
- "google.generativeai.protos.File.name": true,
- "google.generativeai.protos.File.pb": true,
- "google.generativeai.protos.File.serialize": true,
- "google.generativeai.protos.File.sha256_hash": true,
- "google.generativeai.protos.File.size_bytes": true,
- "google.generativeai.protos.File.state": true,
- "google.generativeai.protos.File.to_dict": true,
- "google.generativeai.protos.File.to_json": true,
- "google.generativeai.protos.File.update_time": true,
- "google.generativeai.protos.File.uri": true,
- "google.generativeai.protos.File.video_metadata": true,
- "google.generativeai.protos.File.wrap": true,
- "google.generativeai.protos.FileData": false,
- "google.generativeai.protos.FileData.__call__": true,
- "google.generativeai.protos.FileData.__eq__": true,
- "google.generativeai.protos.FileData.__ge__": true,
- "google.generativeai.protos.FileData.__gt__": true,
- "google.generativeai.protos.FileData.__init__": true,
- "google.generativeai.protos.FileData.__le__": true,
- "google.generativeai.protos.FileData.__lt__": true,
- "google.generativeai.protos.FileData.__ne__": true,
- "google.generativeai.protos.FileData.__new__": true,
- "google.generativeai.protos.FileData.__or__": true,
- "google.generativeai.protos.FileData.__ror__": true,
- "google.generativeai.protos.FileData.copy_from": true,
- "google.generativeai.protos.FileData.deserialize": true,
- "google.generativeai.protos.FileData.file_uri": true,
- "google.generativeai.protos.FileData.from_json": true,
- "google.generativeai.protos.FileData.mime_type": true,
- "google.generativeai.protos.FileData.mro": true,
- "google.generativeai.protos.FileData.pb": true,
- "google.generativeai.protos.FileData.serialize": true,
- "google.generativeai.protos.FileData.to_dict": true,
- "google.generativeai.protos.FileData.to_json": true,
- "google.generativeai.protos.FileData.wrap": true,
- "google.generativeai.protos.FunctionCall": false,
- "google.generativeai.protos.FunctionCall.__call__": true,
- "google.generativeai.protos.FunctionCall.__eq__": true,
- "google.generativeai.protos.FunctionCall.__ge__": true,
- "google.generativeai.protos.FunctionCall.__gt__": true,
- "google.generativeai.protos.FunctionCall.__init__": true,
- "google.generativeai.protos.FunctionCall.__le__": true,
- "google.generativeai.protos.FunctionCall.__lt__": true,
- "google.generativeai.protos.FunctionCall.__ne__": true,
- "google.generativeai.protos.FunctionCall.__new__": true,
- "google.generativeai.protos.FunctionCall.__or__": true,
- "google.generativeai.protos.FunctionCall.__ror__": true,
- "google.generativeai.protos.FunctionCall.args": true,
- "google.generativeai.protos.FunctionCall.copy_from": true,
- "google.generativeai.protos.FunctionCall.deserialize": true,
- "google.generativeai.protos.FunctionCall.from_json": true,
- "google.generativeai.protos.FunctionCall.mro": true,
- "google.generativeai.protos.FunctionCall.name": true,
- "google.generativeai.protos.FunctionCall.pb": true,
- "google.generativeai.protos.FunctionCall.serialize": true,
- "google.generativeai.protos.FunctionCall.to_dict": true,
- "google.generativeai.protos.FunctionCall.to_json": true,
- "google.generativeai.protos.FunctionCall.wrap": true,
- "google.generativeai.protos.FunctionCallingConfig": false,
- "google.generativeai.protos.FunctionCallingConfig.Mode": false,
- "google.generativeai.protos.FunctionCallingConfig.Mode.ANY": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.AUTO": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.MODE_UNSPECIFIED": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.NONE": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.from_bytes": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.imag": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.is_integer": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.real": true,
- "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": true,
- "google.generativeai.protos.FunctionCallingConfig.__call__": true,
- "google.generativeai.protos.FunctionCallingConfig.__eq__": true,
- "google.generativeai.protos.FunctionCallingConfig.__ge__": true,
- "google.generativeai.protos.FunctionCallingConfig.__gt__": true,
- "google.generativeai.protos.FunctionCallingConfig.__init__": true,
- "google.generativeai.protos.FunctionCallingConfig.__le__": true,
- "google.generativeai.protos.FunctionCallingConfig.__lt__": true,
- "google.generativeai.protos.FunctionCallingConfig.__ne__": true,
- "google.generativeai.protos.FunctionCallingConfig.__new__": true,
- "google.generativeai.protos.FunctionCallingConfig.__or__": true,
- "google.generativeai.protos.FunctionCallingConfig.__ror__": true,
- "google.generativeai.protos.FunctionCallingConfig.allowed_function_names": true,
- "google.generativeai.protos.FunctionCallingConfig.copy_from": true,
- "google.generativeai.protos.FunctionCallingConfig.deserialize": true,
- "google.generativeai.protos.FunctionCallingConfig.from_json": true,
- "google.generativeai.protos.FunctionCallingConfig.mode": true,
- "google.generativeai.protos.FunctionCallingConfig.mro": true,
- "google.generativeai.protos.FunctionCallingConfig.pb": true,
- "google.generativeai.protos.FunctionCallingConfig.serialize": true,
- "google.generativeai.protos.FunctionCallingConfig.to_dict": true,
- "google.generativeai.protos.FunctionCallingConfig.to_json": true,
- "google.generativeai.protos.FunctionCallingConfig.wrap": true,
- "google.generativeai.protos.FunctionDeclaration": false,
- "google.generativeai.protos.FunctionDeclaration.__call__": true,
- "google.generativeai.protos.FunctionDeclaration.__eq__": true,
- "google.generativeai.protos.FunctionDeclaration.__ge__": true,
- "google.generativeai.protos.FunctionDeclaration.__gt__": true,
- "google.generativeai.protos.FunctionDeclaration.__init__": true,
- "google.generativeai.protos.FunctionDeclaration.__le__": true,
- "google.generativeai.protos.FunctionDeclaration.__lt__": true,
- "google.generativeai.protos.FunctionDeclaration.__ne__": true,
- "google.generativeai.protos.FunctionDeclaration.__new__": true,
- "google.generativeai.protos.FunctionDeclaration.__or__": true,
- "google.generativeai.protos.FunctionDeclaration.__ror__": true,
- "google.generativeai.protos.FunctionDeclaration.copy_from": true,
- "google.generativeai.protos.FunctionDeclaration.description": true,
- "google.generativeai.protos.FunctionDeclaration.deserialize": true,
- "google.generativeai.protos.FunctionDeclaration.from_json": true,
- "google.generativeai.protos.FunctionDeclaration.mro": true,
- "google.generativeai.protos.FunctionDeclaration.name": true,
- "google.generativeai.protos.FunctionDeclaration.parameters": true,
- "google.generativeai.protos.FunctionDeclaration.pb": true,
- "google.generativeai.protos.FunctionDeclaration.serialize": true,
- "google.generativeai.protos.FunctionDeclaration.to_dict": true,
- "google.generativeai.protos.FunctionDeclaration.to_json": true,
- "google.generativeai.protos.FunctionDeclaration.wrap": true,
- "google.generativeai.protos.FunctionResponse": false,
- "google.generativeai.protos.FunctionResponse.__call__": true,
- "google.generativeai.protos.FunctionResponse.__eq__": true,
- "google.generativeai.protos.FunctionResponse.__ge__": true,
- "google.generativeai.protos.FunctionResponse.__gt__": true,
- "google.generativeai.protos.FunctionResponse.__init__": true,
- "google.generativeai.protos.FunctionResponse.__le__": true,
- "google.generativeai.protos.FunctionResponse.__lt__": true,
- "google.generativeai.protos.FunctionResponse.__ne__": true,
- "google.generativeai.protos.FunctionResponse.__new__": true,
- "google.generativeai.protos.FunctionResponse.__or__": true,
- "google.generativeai.protos.FunctionResponse.__ror__": true,
- "google.generativeai.protos.FunctionResponse.copy_from": true,
- "google.generativeai.protos.FunctionResponse.deserialize": true,
- "google.generativeai.protos.FunctionResponse.from_json": true,
- "google.generativeai.protos.FunctionResponse.mro": true,
- "google.generativeai.protos.FunctionResponse.name": true,
- "google.generativeai.protos.FunctionResponse.pb": true,
- "google.generativeai.protos.FunctionResponse.response": true,
- "google.generativeai.protos.FunctionResponse.serialize": true,
- "google.generativeai.protos.FunctionResponse.to_dict": true,
- "google.generativeai.protos.FunctionResponse.to_json": true,
- "google.generativeai.protos.FunctionResponse.wrap": true,
- "google.generativeai.protos.GenerateAnswerRequest": false,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": false,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ANSWER_STYLE_UNSPECIFIED": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.VERBOSE": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.from_bytes": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.is_integer": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": true,
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": true,
- "google.generativeai.protos.GenerateAnswerRequest.__call__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__eq__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__ge__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__gt__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__init__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__le__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__lt__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__ne__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__new__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__or__": true,
- "google.generativeai.protos.GenerateAnswerRequest.__ror__": true,
- "google.generativeai.protos.GenerateAnswerRequest.answer_style": true,
- "google.generativeai.protos.GenerateAnswerRequest.contents": true,
- "google.generativeai.protos.GenerateAnswerRequest.copy_from": true,
- "google.generativeai.protos.GenerateAnswerRequest.deserialize": true,
- "google.generativeai.protos.GenerateAnswerRequest.from_json": true,
- "google.generativeai.protos.GenerateAnswerRequest.inline_passages": true,
- "google.generativeai.protos.GenerateAnswerRequest.model": true,
- "google.generativeai.protos.GenerateAnswerRequest.mro": true,
- "google.generativeai.protos.GenerateAnswerRequest.pb": true,
- "google.generativeai.protos.GenerateAnswerRequest.safety_settings": true,
- "google.generativeai.protos.GenerateAnswerRequest.semantic_retriever": true,
- "google.generativeai.protos.GenerateAnswerRequest.serialize": true,
- "google.generativeai.protos.GenerateAnswerRequest.temperature": true,
- "google.generativeai.protos.GenerateAnswerRequest.to_dict": true,
- "google.generativeai.protos.GenerateAnswerRequest.to_json": true,
- "google.generativeai.protos.GenerateAnswerRequest.wrap": true,
- "google.generativeai.protos.GenerateAnswerResponse": false,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": false,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": false,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.OTHER": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.SAFETY": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.from_bytes": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.is_integer": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__call__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__or__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ror__": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.block_reason": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.mro": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.safety_ratings": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": true,
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": true,
- "google.generativeai.protos.GenerateAnswerResponse.__call__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__eq__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__ge__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__gt__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__init__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__le__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__lt__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__ne__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__new__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__or__": true,
- "google.generativeai.protos.GenerateAnswerResponse.__ror__": true,
- "google.generativeai.protos.GenerateAnswerResponse.answer": true,
- "google.generativeai.protos.GenerateAnswerResponse.answerable_probability": true,
- "google.generativeai.protos.GenerateAnswerResponse.copy_from": true,
- "google.generativeai.protos.GenerateAnswerResponse.deserialize": true,
- "google.generativeai.protos.GenerateAnswerResponse.from_json": true,
- "google.generativeai.protos.GenerateAnswerResponse.input_feedback": true,
- "google.generativeai.protos.GenerateAnswerResponse.mro": true,
- "google.generativeai.protos.GenerateAnswerResponse.pb": true,
- "google.generativeai.protos.GenerateAnswerResponse.serialize": true,
- "google.generativeai.protos.GenerateAnswerResponse.to_dict": true,
- "google.generativeai.protos.GenerateAnswerResponse.to_json": true,
- "google.generativeai.protos.GenerateAnswerResponse.wrap": true,
- "google.generativeai.protos.GenerateContentRequest": false,
- "google.generativeai.protos.GenerateContentRequest.__call__": true,
- "google.generativeai.protos.GenerateContentRequest.__eq__": true,
- "google.generativeai.protos.GenerateContentRequest.__ge__": true,
- "google.generativeai.protos.GenerateContentRequest.__gt__": true,
- "google.generativeai.protos.GenerateContentRequest.__init__": true,
- "google.generativeai.protos.GenerateContentRequest.__le__": true,
- "google.generativeai.protos.GenerateContentRequest.__lt__": true,
- "google.generativeai.protos.GenerateContentRequest.__ne__": true,
- "google.generativeai.protos.GenerateContentRequest.__new__": true,
- "google.generativeai.protos.GenerateContentRequest.__or__": true,
- "google.generativeai.protos.GenerateContentRequest.__ror__": true,
- "google.generativeai.protos.GenerateContentRequest.cached_content": true,
- "google.generativeai.protos.GenerateContentRequest.contents": true,
- "google.generativeai.protos.GenerateContentRequest.copy_from": true,
- "google.generativeai.protos.GenerateContentRequest.deserialize": true,
- "google.generativeai.protos.GenerateContentRequest.from_json": true,
- "google.generativeai.protos.GenerateContentRequest.generation_config": true,
- "google.generativeai.protos.GenerateContentRequest.model": true,
- "google.generativeai.protos.GenerateContentRequest.mro": true,
- "google.generativeai.protos.GenerateContentRequest.pb": true,
- "google.generativeai.protos.GenerateContentRequest.safety_settings": true,
- "google.generativeai.protos.GenerateContentRequest.serialize": true,
- "google.generativeai.protos.GenerateContentRequest.system_instruction": true,
- "google.generativeai.protos.GenerateContentRequest.to_dict": true,
- "google.generativeai.protos.GenerateContentRequest.to_json": true,
- "google.generativeai.protos.GenerateContentRequest.tool_config": true,
- "google.generativeai.protos.GenerateContentRequest.tools": true,
- "google.generativeai.protos.GenerateContentRequest.wrap": true,
- "google.generativeai.protos.GenerateContentResponse": false,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback": false,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": false,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCKLIST": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.OTHER": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.PROHIBITED_CONTENT": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.SAFETY": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.from_bytes": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.is_integer": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__call__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__or__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ror__": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.block_reason": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.mro": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.safety_ratings": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": true,
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata": false,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__call__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__or__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ror__": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.cached_content_token_count": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.candidates_token_count": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.mro": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.prompt_token_count": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.total_token_count": true,
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": true,
- "google.generativeai.protos.GenerateContentResponse.__call__": true,
- "google.generativeai.protos.GenerateContentResponse.__eq__": true,
- "google.generativeai.protos.GenerateContentResponse.__ge__": true,
- "google.generativeai.protos.GenerateContentResponse.__gt__": true,
- "google.generativeai.protos.GenerateContentResponse.__init__": true,
- "google.generativeai.protos.GenerateContentResponse.__le__": true,
- "google.generativeai.protos.GenerateContentResponse.__lt__": true,
- "google.generativeai.protos.GenerateContentResponse.__ne__": true,
- "google.generativeai.protos.GenerateContentResponse.__new__": true,
- "google.generativeai.protos.GenerateContentResponse.__or__": true,
- "google.generativeai.protos.GenerateContentResponse.__ror__": true,
- "google.generativeai.protos.GenerateContentResponse.candidates": true,
- "google.generativeai.protos.GenerateContentResponse.copy_from": true,
- "google.generativeai.protos.GenerateContentResponse.deserialize": true,
- "google.generativeai.protos.GenerateContentResponse.from_json": true,
- "google.generativeai.protos.GenerateContentResponse.mro": true,
- "google.generativeai.protos.GenerateContentResponse.pb": true,
- "google.generativeai.protos.GenerateContentResponse.prompt_feedback": true,
- "google.generativeai.protos.GenerateContentResponse.serialize": true,
- "google.generativeai.protos.GenerateContentResponse.to_dict": true,
- "google.generativeai.protos.GenerateContentResponse.to_json": true,
- "google.generativeai.protos.GenerateContentResponse.usage_metadata": true,
- "google.generativeai.protos.GenerateContentResponse.wrap": true,
- "google.generativeai.protos.GenerateMessageRequest": false,
- "google.generativeai.protos.GenerateMessageRequest.__call__": true,
- "google.generativeai.protos.GenerateMessageRequest.__eq__": true,
- "google.generativeai.protos.GenerateMessageRequest.__ge__": true,
- "google.generativeai.protos.GenerateMessageRequest.__gt__": true,
- "google.generativeai.protos.GenerateMessageRequest.__init__": true,
- "google.generativeai.protos.GenerateMessageRequest.__le__": true,
- "google.generativeai.protos.GenerateMessageRequest.__lt__": true,
- "google.generativeai.protos.GenerateMessageRequest.__ne__": true,
- "google.generativeai.protos.GenerateMessageRequest.__new__": true,
- "google.generativeai.protos.GenerateMessageRequest.__or__": true,
- "google.generativeai.protos.GenerateMessageRequest.__ror__": true,
- "google.generativeai.protos.GenerateMessageRequest.candidate_count": true,
- "google.generativeai.protos.GenerateMessageRequest.copy_from": true,
- "google.generativeai.protos.GenerateMessageRequest.deserialize": true,
- "google.generativeai.protos.GenerateMessageRequest.from_json": true,
- "google.generativeai.protos.GenerateMessageRequest.model": true,
- "google.generativeai.protos.GenerateMessageRequest.mro": true,
- "google.generativeai.protos.GenerateMessageRequest.pb": true,
- "google.generativeai.protos.GenerateMessageRequest.prompt": true,
- "google.generativeai.protos.GenerateMessageRequest.serialize": true,
- "google.generativeai.protos.GenerateMessageRequest.temperature": true,
- "google.generativeai.protos.GenerateMessageRequest.to_dict": true,
- "google.generativeai.protos.GenerateMessageRequest.to_json": true,
- "google.generativeai.protos.GenerateMessageRequest.top_k": true,
- "google.generativeai.protos.GenerateMessageRequest.top_p": true,
- "google.generativeai.protos.GenerateMessageRequest.wrap": true,
- "google.generativeai.protos.GenerateMessageResponse": false,
- "google.generativeai.protos.GenerateMessageResponse.__call__": true,
- "google.generativeai.protos.GenerateMessageResponse.__eq__": true,
- "google.generativeai.protos.GenerateMessageResponse.__ge__": true,
- "google.generativeai.protos.GenerateMessageResponse.__gt__": true,
- "google.generativeai.protos.GenerateMessageResponse.__init__": true,
- "google.generativeai.protos.GenerateMessageResponse.__le__": true,
- "google.generativeai.protos.GenerateMessageResponse.__lt__": true,
- "google.generativeai.protos.GenerateMessageResponse.__ne__": true,
- "google.generativeai.protos.GenerateMessageResponse.__new__": true,
- "google.generativeai.protos.GenerateMessageResponse.__or__": true,
- "google.generativeai.protos.GenerateMessageResponse.__ror__": true,
- "google.generativeai.protos.GenerateMessageResponse.candidates": true,
- "google.generativeai.protos.GenerateMessageResponse.copy_from": true,
- "google.generativeai.protos.GenerateMessageResponse.deserialize": true,
- "google.generativeai.protos.GenerateMessageResponse.filters": true,
- "google.generativeai.protos.GenerateMessageResponse.from_json": true,
- "google.generativeai.protos.GenerateMessageResponse.messages": true,
- "google.generativeai.protos.GenerateMessageResponse.mro": true,
- "google.generativeai.protos.GenerateMessageResponse.pb": true,
- "google.generativeai.protos.GenerateMessageResponse.serialize": true,
- "google.generativeai.protos.GenerateMessageResponse.to_dict": true,
- "google.generativeai.protos.GenerateMessageResponse.to_json": true,
- "google.generativeai.protos.GenerateMessageResponse.wrap": true,
- "google.generativeai.protos.GenerateTextRequest": false,
- "google.generativeai.protos.GenerateTextRequest.__call__": true,
- "google.generativeai.protos.GenerateTextRequest.__eq__": true,
- "google.generativeai.protos.GenerateTextRequest.__ge__": true,
- "google.generativeai.protos.GenerateTextRequest.__gt__": true,
- "google.generativeai.protos.GenerateTextRequest.__init__": true,
- "google.generativeai.protos.GenerateTextRequest.__le__": true,
- "google.generativeai.protos.GenerateTextRequest.__lt__": true,
- "google.generativeai.protos.GenerateTextRequest.__ne__": true,
- "google.generativeai.protos.GenerateTextRequest.__new__": true,
- "google.generativeai.protos.GenerateTextRequest.__or__": true,
- "google.generativeai.protos.GenerateTextRequest.__ror__": true,
- "google.generativeai.protos.GenerateTextRequest.candidate_count": true,
- "google.generativeai.protos.GenerateTextRequest.copy_from": true,
- "google.generativeai.protos.GenerateTextRequest.deserialize": true,
- "google.generativeai.protos.GenerateTextRequest.from_json": true,
- "google.generativeai.protos.GenerateTextRequest.max_output_tokens": true,
- "google.generativeai.protos.GenerateTextRequest.model": true,
- "google.generativeai.protos.GenerateTextRequest.mro": true,
- "google.generativeai.protos.GenerateTextRequest.pb": true,
- "google.generativeai.protos.GenerateTextRequest.prompt": true,
- "google.generativeai.protos.GenerateTextRequest.safety_settings": true,
- "google.generativeai.protos.GenerateTextRequest.serialize": true,
- "google.generativeai.protos.GenerateTextRequest.stop_sequences": true,
- "google.generativeai.protos.GenerateTextRequest.temperature": true,
- "google.generativeai.protos.GenerateTextRequest.to_dict": true,
- "google.generativeai.protos.GenerateTextRequest.to_json": true,
- "google.generativeai.protos.GenerateTextRequest.top_k": true,
- "google.generativeai.protos.GenerateTextRequest.top_p": true,
- "google.generativeai.protos.GenerateTextRequest.wrap": true,
- "google.generativeai.protos.GenerateTextResponse": false,
- "google.generativeai.protos.GenerateTextResponse.__call__": true,
- "google.generativeai.protos.GenerateTextResponse.__eq__": true,
- "google.generativeai.protos.GenerateTextResponse.__ge__": true,
- "google.generativeai.protos.GenerateTextResponse.__gt__": true,
- "google.generativeai.protos.GenerateTextResponse.__init__": true,
- "google.generativeai.protos.GenerateTextResponse.__le__": true,
- "google.generativeai.protos.GenerateTextResponse.__lt__": true,
- "google.generativeai.protos.GenerateTextResponse.__ne__": true,
- "google.generativeai.protos.GenerateTextResponse.__new__": true,
- "google.generativeai.protos.GenerateTextResponse.__or__": true,
- "google.generativeai.protos.GenerateTextResponse.__ror__": true,
- "google.generativeai.protos.GenerateTextResponse.candidates": true,
- "google.generativeai.protos.GenerateTextResponse.copy_from": true,
- "google.generativeai.protos.GenerateTextResponse.deserialize": true,
- "google.generativeai.protos.GenerateTextResponse.filters": true,
- "google.generativeai.protos.GenerateTextResponse.from_json": true,
- "google.generativeai.protos.GenerateTextResponse.mro": true,
- "google.generativeai.protos.GenerateTextResponse.pb": true,
- "google.generativeai.protos.GenerateTextResponse.safety_feedback": true,
- "google.generativeai.protos.GenerateTextResponse.serialize": true,
- "google.generativeai.protos.GenerateTextResponse.to_dict": true,
- "google.generativeai.protos.GenerateTextResponse.to_json": true,
- "google.generativeai.protos.GenerateTextResponse.wrap": true,
- "google.generativeai.protos.GenerationConfig": false,
- "google.generativeai.protos.GenerationConfig.__call__": true,
- "google.generativeai.protos.GenerationConfig.__eq__": true,
- "google.generativeai.protos.GenerationConfig.__ge__": true,
- "google.generativeai.protos.GenerationConfig.__gt__": true,
- "google.generativeai.protos.GenerationConfig.__init__": true,
- "google.generativeai.protos.GenerationConfig.__le__": true,
- "google.generativeai.protos.GenerationConfig.__lt__": true,
- "google.generativeai.protos.GenerationConfig.__ne__": true,
- "google.generativeai.protos.GenerationConfig.__new__": true,
- "google.generativeai.protos.GenerationConfig.__or__": true,
- "google.generativeai.protos.GenerationConfig.__ror__": true,
- "google.generativeai.protos.GenerationConfig.candidate_count": true,
- "google.generativeai.protos.GenerationConfig.copy_from": true,
- "google.generativeai.protos.GenerationConfig.deserialize": true,
- "google.generativeai.protos.GenerationConfig.frequency_penalty": true,
- "google.generativeai.protos.GenerationConfig.from_json": true,
- "google.generativeai.protos.GenerationConfig.logprobs": true,
- "google.generativeai.protos.GenerationConfig.max_output_tokens": true,
- "google.generativeai.protos.GenerationConfig.mro": true,
- "google.generativeai.protos.GenerationConfig.pb": true,
- "google.generativeai.protos.GenerationConfig.presence_penalty": true,
- "google.generativeai.protos.GenerationConfig.response_logprobs": true,
- "google.generativeai.protos.GenerationConfig.response_mime_type": true,
- "google.generativeai.protos.GenerationConfig.response_schema": true,
- "google.generativeai.protos.GenerationConfig.serialize": true,
- "google.generativeai.protos.GenerationConfig.stop_sequences": true,
- "google.generativeai.protos.GenerationConfig.temperature": true,
- "google.generativeai.protos.GenerationConfig.to_dict": true,
- "google.generativeai.protos.GenerationConfig.to_json": true,
- "google.generativeai.protos.GenerationConfig.top_k": true,
- "google.generativeai.protos.GenerationConfig.top_p": true,
- "google.generativeai.protos.GenerationConfig.wrap": true,
- "google.generativeai.protos.GetCachedContentRequest": false,
- "google.generativeai.protos.GetCachedContentRequest.__call__": true,
- "google.generativeai.protos.GetCachedContentRequest.__eq__": true,
- "google.generativeai.protos.GetCachedContentRequest.__ge__": true,
- "google.generativeai.protos.GetCachedContentRequest.__gt__": true,
- "google.generativeai.protos.GetCachedContentRequest.__init__": true,
- "google.generativeai.protos.GetCachedContentRequest.__le__": true,
- "google.generativeai.protos.GetCachedContentRequest.__lt__": true,
- "google.generativeai.protos.GetCachedContentRequest.__ne__": true,
- "google.generativeai.protos.GetCachedContentRequest.__new__": true,
- "google.generativeai.protos.GetCachedContentRequest.__or__": true,
- "google.generativeai.protos.GetCachedContentRequest.__ror__": true,
- "google.generativeai.protos.GetCachedContentRequest.copy_from": true,
- "google.generativeai.protos.GetCachedContentRequest.deserialize": true,
- "google.generativeai.protos.GetCachedContentRequest.from_json": true,
- "google.generativeai.protos.GetCachedContentRequest.mro": true,
- "google.generativeai.protos.GetCachedContentRequest.name": true,
- "google.generativeai.protos.GetCachedContentRequest.pb": true,
- "google.generativeai.protos.GetCachedContentRequest.serialize": true,
- "google.generativeai.protos.GetCachedContentRequest.to_dict": true,
- "google.generativeai.protos.GetCachedContentRequest.to_json": true,
- "google.generativeai.protos.GetCachedContentRequest.wrap": true,
- "google.generativeai.protos.GetChunkRequest": false,
- "google.generativeai.protos.GetChunkRequest.__call__": true,
- "google.generativeai.protos.GetChunkRequest.__eq__": true,
- "google.generativeai.protos.GetChunkRequest.__ge__": true,
- "google.generativeai.protos.GetChunkRequest.__gt__": true,
- "google.generativeai.protos.GetChunkRequest.__init__": true,
- "google.generativeai.protos.GetChunkRequest.__le__": true,
- "google.generativeai.protos.GetChunkRequest.__lt__": true,
- "google.generativeai.protos.GetChunkRequest.__ne__": true,
- "google.generativeai.protos.GetChunkRequest.__new__": true,
- "google.generativeai.protos.GetChunkRequest.__or__": true,
- "google.generativeai.protos.GetChunkRequest.__ror__": true,
- "google.generativeai.protos.GetChunkRequest.copy_from": true,
- "google.generativeai.protos.GetChunkRequest.deserialize": true,
- "google.generativeai.protos.GetChunkRequest.from_json": true,
- "google.generativeai.protos.GetChunkRequest.mro": true,
- "google.generativeai.protos.GetChunkRequest.name": true,
- "google.generativeai.protos.GetChunkRequest.pb": true,
- "google.generativeai.protos.GetChunkRequest.serialize": true,
- "google.generativeai.protos.GetChunkRequest.to_dict": true,
- "google.generativeai.protos.GetChunkRequest.to_json": true,
- "google.generativeai.protos.GetChunkRequest.wrap": true,
- "google.generativeai.protos.GetCorpusRequest": false,
- "google.generativeai.protos.GetCorpusRequest.__call__": true,
- "google.generativeai.protos.GetCorpusRequest.__eq__": true,
- "google.generativeai.protos.GetCorpusRequest.__ge__": true,
- "google.generativeai.protos.GetCorpusRequest.__gt__": true,
- "google.generativeai.protos.GetCorpusRequest.__init__": true,
- "google.generativeai.protos.GetCorpusRequest.__le__": true,
- "google.generativeai.protos.GetCorpusRequest.__lt__": true,
- "google.generativeai.protos.GetCorpusRequest.__ne__": true,
- "google.generativeai.protos.GetCorpusRequest.__new__": true,
- "google.generativeai.protos.GetCorpusRequest.__or__": true,
- "google.generativeai.protos.GetCorpusRequest.__ror__": true,
- "google.generativeai.protos.GetCorpusRequest.copy_from": true,
- "google.generativeai.protos.GetCorpusRequest.deserialize": true,
- "google.generativeai.protos.GetCorpusRequest.from_json": true,
- "google.generativeai.protos.GetCorpusRequest.mro": true,
- "google.generativeai.protos.GetCorpusRequest.name": true,
- "google.generativeai.protos.GetCorpusRequest.pb": true,
- "google.generativeai.protos.GetCorpusRequest.serialize": true,
- "google.generativeai.protos.GetCorpusRequest.to_dict": true,
- "google.generativeai.protos.GetCorpusRequest.to_json": true,
- "google.generativeai.protos.GetCorpusRequest.wrap": true,
- "google.generativeai.protos.GetDocumentRequest": false,
- "google.generativeai.protos.GetDocumentRequest.__call__": true,
- "google.generativeai.protos.GetDocumentRequest.__eq__": true,
- "google.generativeai.protos.GetDocumentRequest.__ge__": true,
- "google.generativeai.protos.GetDocumentRequest.__gt__": true,
- "google.generativeai.protos.GetDocumentRequest.__init__": true,
- "google.generativeai.protos.GetDocumentRequest.__le__": true,
- "google.generativeai.protos.GetDocumentRequest.__lt__": true,
- "google.generativeai.protos.GetDocumentRequest.__ne__": true,
- "google.generativeai.protos.GetDocumentRequest.__new__": true,
- "google.generativeai.protos.GetDocumentRequest.__or__": true,
- "google.generativeai.protos.GetDocumentRequest.__ror__": true,
- "google.generativeai.protos.GetDocumentRequest.copy_from": true,
- "google.generativeai.protos.GetDocumentRequest.deserialize": true,
- "google.generativeai.protos.GetDocumentRequest.from_json": true,
- "google.generativeai.protos.GetDocumentRequest.mro": true,
- "google.generativeai.protos.GetDocumentRequest.name": true,
- "google.generativeai.protos.GetDocumentRequest.pb": true,
- "google.generativeai.protos.GetDocumentRequest.serialize": true,
- "google.generativeai.protos.GetDocumentRequest.to_dict": true,
- "google.generativeai.protos.GetDocumentRequest.to_json": true,
- "google.generativeai.protos.GetDocumentRequest.wrap": true,
- "google.generativeai.protos.GetFileRequest": false,
- "google.generativeai.protos.GetFileRequest.__call__": true,
- "google.generativeai.protos.GetFileRequest.__eq__": true,
- "google.generativeai.protos.GetFileRequest.__ge__": true,
- "google.generativeai.protos.GetFileRequest.__gt__": true,
- "google.generativeai.protos.GetFileRequest.__init__": true,
- "google.generativeai.protos.GetFileRequest.__le__": true,
- "google.generativeai.protos.GetFileRequest.__lt__": true,
- "google.generativeai.protos.GetFileRequest.__ne__": true,
- "google.generativeai.protos.GetFileRequest.__new__": true,
- "google.generativeai.protos.GetFileRequest.__or__": true,
- "google.generativeai.protos.GetFileRequest.__ror__": true,
- "google.generativeai.protos.GetFileRequest.copy_from": true,
- "google.generativeai.protos.GetFileRequest.deserialize": true,
- "google.generativeai.protos.GetFileRequest.from_json": true,
- "google.generativeai.protos.GetFileRequest.mro": true,
- "google.generativeai.protos.GetFileRequest.name": true,
- "google.generativeai.protos.GetFileRequest.pb": true,
- "google.generativeai.protos.GetFileRequest.serialize": true,
- "google.generativeai.protos.GetFileRequest.to_dict": true,
- "google.generativeai.protos.GetFileRequest.to_json": true,
- "google.generativeai.protos.GetFileRequest.wrap": true,
- "google.generativeai.protos.GetModelRequest": false,
- "google.generativeai.protos.GetModelRequest.__call__": true,
- "google.generativeai.protos.GetModelRequest.__eq__": true,
- "google.generativeai.protos.GetModelRequest.__ge__": true,
- "google.generativeai.protos.GetModelRequest.__gt__": true,
- "google.generativeai.protos.GetModelRequest.__init__": true,
- "google.generativeai.protos.GetModelRequest.__le__": true,
- "google.generativeai.protos.GetModelRequest.__lt__": true,
- "google.generativeai.protos.GetModelRequest.__ne__": true,
- "google.generativeai.protos.GetModelRequest.__new__": true,
- "google.generativeai.protos.GetModelRequest.__or__": true,
- "google.generativeai.protos.GetModelRequest.__ror__": true,
- "google.generativeai.protos.GetModelRequest.copy_from": true,
- "google.generativeai.protos.GetModelRequest.deserialize": true,
- "google.generativeai.protos.GetModelRequest.from_json": true,
- "google.generativeai.protos.GetModelRequest.mro": true,
- "google.generativeai.protos.GetModelRequest.name": true,
- "google.generativeai.protos.GetModelRequest.pb": true,
- "google.generativeai.protos.GetModelRequest.serialize": true,
- "google.generativeai.protos.GetModelRequest.to_dict": true,
- "google.generativeai.protos.GetModelRequest.to_json": true,
- "google.generativeai.protos.GetModelRequest.wrap": true,
- "google.generativeai.protos.GetPermissionRequest": false,
- "google.generativeai.protos.GetPermissionRequest.__call__": true,
- "google.generativeai.protos.GetPermissionRequest.__eq__": true,
- "google.generativeai.protos.GetPermissionRequest.__ge__": true,
- "google.generativeai.protos.GetPermissionRequest.__gt__": true,
- "google.generativeai.protos.GetPermissionRequest.__init__": true,
- "google.generativeai.protos.GetPermissionRequest.__le__": true,
- "google.generativeai.protos.GetPermissionRequest.__lt__": true,
- "google.generativeai.protos.GetPermissionRequest.__ne__": true,
- "google.generativeai.protos.GetPermissionRequest.__new__": true,
- "google.generativeai.protos.GetPermissionRequest.__or__": true,
- "google.generativeai.protos.GetPermissionRequest.__ror__": true,
- "google.generativeai.protos.GetPermissionRequest.copy_from": true,
- "google.generativeai.protos.GetPermissionRequest.deserialize": true,
- "google.generativeai.protos.GetPermissionRequest.from_json": true,
- "google.generativeai.protos.GetPermissionRequest.mro": true,
- "google.generativeai.protos.GetPermissionRequest.name": true,
- "google.generativeai.protos.GetPermissionRequest.pb": true,
- "google.generativeai.protos.GetPermissionRequest.serialize": true,
- "google.generativeai.protos.GetPermissionRequest.to_dict": true,
- "google.generativeai.protos.GetPermissionRequest.to_json": true,
- "google.generativeai.protos.GetPermissionRequest.wrap": true,
- "google.generativeai.protos.GetTunedModelRequest": false,
- "google.generativeai.protos.GetTunedModelRequest.__call__": true,
- "google.generativeai.protos.GetTunedModelRequest.__eq__": true,
- "google.generativeai.protos.GetTunedModelRequest.__ge__": true,
- "google.generativeai.protos.GetTunedModelRequest.__gt__": true,
- "google.generativeai.protos.GetTunedModelRequest.__init__": true,
- "google.generativeai.protos.GetTunedModelRequest.__le__": true,
- "google.generativeai.protos.GetTunedModelRequest.__lt__": true,
- "google.generativeai.protos.GetTunedModelRequest.__ne__": true,
- "google.generativeai.protos.GetTunedModelRequest.__new__": true,
- "google.generativeai.protos.GetTunedModelRequest.__or__": true,
- "google.generativeai.protos.GetTunedModelRequest.__ror__": true,
- "google.generativeai.protos.GetTunedModelRequest.copy_from": true,
- "google.generativeai.protos.GetTunedModelRequest.deserialize": true,
- "google.generativeai.protos.GetTunedModelRequest.from_json": true,
- "google.generativeai.protos.GetTunedModelRequest.mro": true,
- "google.generativeai.protos.GetTunedModelRequest.name": true,
- "google.generativeai.protos.GetTunedModelRequest.pb": true,
- "google.generativeai.protos.GetTunedModelRequest.serialize": true,
- "google.generativeai.protos.GetTunedModelRequest.to_dict": true,
- "google.generativeai.protos.GetTunedModelRequest.to_json": true,
- "google.generativeai.protos.GetTunedModelRequest.wrap": true,
- "google.generativeai.protos.GoogleSearchRetrieval": false,
- "google.generativeai.protos.GoogleSearchRetrieval.__call__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__eq__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__ge__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__gt__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__init__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__le__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__lt__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__ne__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__new__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__or__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.__ror__": true,
- "google.generativeai.protos.GoogleSearchRetrieval.copy_from": true,
- "google.generativeai.protos.GoogleSearchRetrieval.deserialize": true,
- "google.generativeai.protos.GoogleSearchRetrieval.dynamic_retrieval_config": true,
- "google.generativeai.protos.GoogleSearchRetrieval.from_json": true,
- "google.generativeai.protos.GoogleSearchRetrieval.mro": true,
- "google.generativeai.protos.GoogleSearchRetrieval.pb": true,
- "google.generativeai.protos.GoogleSearchRetrieval.serialize": true,
- "google.generativeai.protos.GoogleSearchRetrieval.to_dict": true,
- "google.generativeai.protos.GoogleSearchRetrieval.to_json": true,
- "google.generativeai.protos.GoogleSearchRetrieval.wrap": true,
- "google.generativeai.protos.GroundingAttribution": false,
- "google.generativeai.protos.GroundingAttribution.__call__": true,
- "google.generativeai.protos.GroundingAttribution.__eq__": true,
- "google.generativeai.protos.GroundingAttribution.__ge__": true,
- "google.generativeai.protos.GroundingAttribution.__gt__": true,
- "google.generativeai.protos.GroundingAttribution.__init__": true,
- "google.generativeai.protos.GroundingAttribution.__le__": true,
- "google.generativeai.protos.GroundingAttribution.__lt__": true,
- "google.generativeai.protos.GroundingAttribution.__ne__": true,
- "google.generativeai.protos.GroundingAttribution.__new__": true,
- "google.generativeai.protos.GroundingAttribution.__or__": true,
- "google.generativeai.protos.GroundingAttribution.__ror__": true,
- "google.generativeai.protos.GroundingAttribution.content": true,
- "google.generativeai.protos.GroundingAttribution.copy_from": true,
- "google.generativeai.protos.GroundingAttribution.deserialize": true,
- "google.generativeai.protos.GroundingAttribution.from_json": true,
- "google.generativeai.protos.GroundingAttribution.mro": true,
- "google.generativeai.protos.GroundingAttribution.pb": true,
- "google.generativeai.protos.GroundingAttribution.serialize": true,
- "google.generativeai.protos.GroundingAttribution.source_id": true,
- "google.generativeai.protos.GroundingAttribution.to_dict": true,
- "google.generativeai.protos.GroundingAttribution.to_json": true,
- "google.generativeai.protos.GroundingAttribution.wrap": true,
- "google.generativeai.protos.GroundingChunk": false,
- "google.generativeai.protos.GroundingChunk.Web": false,
- "google.generativeai.protos.GroundingChunk.Web.__call__": true,
- "google.generativeai.protos.GroundingChunk.Web.__eq__": true,
- "google.generativeai.protos.GroundingChunk.Web.__ge__": true,
- "google.generativeai.protos.GroundingChunk.Web.__gt__": true,
- "google.generativeai.protos.GroundingChunk.Web.__init__": true,
- "google.generativeai.protos.GroundingChunk.Web.__le__": true,
- "google.generativeai.protos.GroundingChunk.Web.__lt__": true,
- "google.generativeai.protos.GroundingChunk.Web.__ne__": true,
- "google.generativeai.protos.GroundingChunk.Web.__new__": true,
- "google.generativeai.protos.GroundingChunk.Web.__or__": true,
- "google.generativeai.protos.GroundingChunk.Web.__ror__": true,
- "google.generativeai.protos.GroundingChunk.Web.copy_from": true,
- "google.generativeai.protos.GroundingChunk.Web.deserialize": true,
- "google.generativeai.protos.GroundingChunk.Web.from_json": true,
- "google.generativeai.protos.GroundingChunk.Web.mro": true,
- "google.generativeai.protos.GroundingChunk.Web.pb": true,
- "google.generativeai.protos.GroundingChunk.Web.serialize": true,
- "google.generativeai.protos.GroundingChunk.Web.title": true,
- "google.generativeai.protos.GroundingChunk.Web.to_dict": true,
- "google.generativeai.protos.GroundingChunk.Web.to_json": true,
- "google.generativeai.protos.GroundingChunk.Web.uri": true,
- "google.generativeai.protos.GroundingChunk.Web.wrap": true,
- "google.generativeai.protos.GroundingChunk.__call__": true,
- "google.generativeai.protos.GroundingChunk.__eq__": true,
- "google.generativeai.protos.GroundingChunk.__ge__": true,
- "google.generativeai.protos.GroundingChunk.__gt__": true,
- "google.generativeai.protos.GroundingChunk.__init__": true,
- "google.generativeai.protos.GroundingChunk.__le__": true,
- "google.generativeai.protos.GroundingChunk.__lt__": true,
- "google.generativeai.protos.GroundingChunk.__ne__": true,
- "google.generativeai.protos.GroundingChunk.__new__": true,
- "google.generativeai.protos.GroundingChunk.__or__": true,
- "google.generativeai.protos.GroundingChunk.__ror__": true,
- "google.generativeai.protos.GroundingChunk.copy_from": true,
- "google.generativeai.protos.GroundingChunk.deserialize": true,
- "google.generativeai.protos.GroundingChunk.from_json": true,
- "google.generativeai.protos.GroundingChunk.mro": true,
- "google.generativeai.protos.GroundingChunk.pb": true,
- "google.generativeai.protos.GroundingChunk.serialize": true,
- "google.generativeai.protos.GroundingChunk.to_dict": true,
- "google.generativeai.protos.GroundingChunk.to_json": true,
- "google.generativeai.protos.GroundingChunk.web": true,
- "google.generativeai.protos.GroundingChunk.wrap": true,
- "google.generativeai.protos.GroundingMetadata": false,
- "google.generativeai.protos.GroundingMetadata.__call__": true,
- "google.generativeai.protos.GroundingMetadata.__eq__": true,
- "google.generativeai.protos.GroundingMetadata.__ge__": true,
- "google.generativeai.protos.GroundingMetadata.__gt__": true,
- "google.generativeai.protos.GroundingMetadata.__init__": true,
- "google.generativeai.protos.GroundingMetadata.__le__": true,
- "google.generativeai.protos.GroundingMetadata.__lt__": true,
- "google.generativeai.protos.GroundingMetadata.__ne__": true,
- "google.generativeai.protos.GroundingMetadata.__new__": true,
- "google.generativeai.protos.GroundingMetadata.__or__": true,
- "google.generativeai.protos.GroundingMetadata.__ror__": true,
- "google.generativeai.protos.GroundingMetadata.copy_from": true,
- "google.generativeai.protos.GroundingMetadata.deserialize": true,
- "google.generativeai.protos.GroundingMetadata.from_json": true,
- "google.generativeai.protos.GroundingMetadata.grounding_chunks": true,
- "google.generativeai.protos.GroundingMetadata.grounding_supports": true,
- "google.generativeai.protos.GroundingMetadata.mro": true,
- "google.generativeai.protos.GroundingMetadata.pb": true,
- "google.generativeai.protos.GroundingMetadata.retrieval_metadata": true,
- "google.generativeai.protos.GroundingMetadata.search_entry_point": true,
- "google.generativeai.protos.GroundingMetadata.serialize": true,
- "google.generativeai.protos.GroundingMetadata.to_dict": true,
- "google.generativeai.protos.GroundingMetadata.to_json": true,
- "google.generativeai.protos.GroundingMetadata.wrap": true,
- "google.generativeai.protos.GroundingPassage": false,
- "google.generativeai.protos.GroundingPassage.__call__": true,
- "google.generativeai.protos.GroundingPassage.__eq__": true,
- "google.generativeai.protos.GroundingPassage.__ge__": true,
- "google.generativeai.protos.GroundingPassage.__gt__": true,
- "google.generativeai.protos.GroundingPassage.__init__": true,
- "google.generativeai.protos.GroundingPassage.__le__": true,
- "google.generativeai.protos.GroundingPassage.__lt__": true,
- "google.generativeai.protos.GroundingPassage.__ne__": true,
- "google.generativeai.protos.GroundingPassage.__new__": true,
- "google.generativeai.protos.GroundingPassage.__or__": true,
- "google.generativeai.protos.GroundingPassage.__ror__": true,
- "google.generativeai.protos.GroundingPassage.content": true,
- "google.generativeai.protos.GroundingPassage.copy_from": true,
- "google.generativeai.protos.GroundingPassage.deserialize": true,
- "google.generativeai.protos.GroundingPassage.from_json": true,
- "google.generativeai.protos.GroundingPassage.id": true,
- "google.generativeai.protos.GroundingPassage.mro": true,
- "google.generativeai.protos.GroundingPassage.pb": true,
- "google.generativeai.protos.GroundingPassage.serialize": true,
- "google.generativeai.protos.GroundingPassage.to_dict": true,
- "google.generativeai.protos.GroundingPassage.to_json": true,
- "google.generativeai.protos.GroundingPassage.wrap": true,
- "google.generativeai.protos.GroundingPassages": false,
- "google.generativeai.protos.GroundingPassages.__call__": true,
- "google.generativeai.protos.GroundingPassages.__eq__": true,
- "google.generativeai.protos.GroundingPassages.__ge__": true,
- "google.generativeai.protos.GroundingPassages.__gt__": true,
- "google.generativeai.protos.GroundingPassages.__init__": true,
- "google.generativeai.protos.GroundingPassages.__le__": true,
- "google.generativeai.protos.GroundingPassages.__lt__": true,
- "google.generativeai.protos.GroundingPassages.__ne__": true,
- "google.generativeai.protos.GroundingPassages.__new__": true,
- "google.generativeai.protos.GroundingPassages.__or__": true,
- "google.generativeai.protos.GroundingPassages.__ror__": true,
- "google.generativeai.protos.GroundingPassages.copy_from": true,
- "google.generativeai.protos.GroundingPassages.deserialize": true,
- "google.generativeai.protos.GroundingPassages.from_json": true,
- "google.generativeai.protos.GroundingPassages.mro": true,
- "google.generativeai.protos.GroundingPassages.passages": true,
- "google.generativeai.protos.GroundingPassages.pb": true,
- "google.generativeai.protos.GroundingPassages.serialize": true,
- "google.generativeai.protos.GroundingPassages.to_dict": true,
- "google.generativeai.protos.GroundingPassages.to_json": true,
- "google.generativeai.protos.GroundingPassages.wrap": true,
- "google.generativeai.protos.GroundingSupport": false,
- "google.generativeai.protos.GroundingSupport.__call__": true,
- "google.generativeai.protos.GroundingSupport.__eq__": true,
- "google.generativeai.protos.GroundingSupport.__ge__": true,
- "google.generativeai.protos.GroundingSupport.__gt__": true,
- "google.generativeai.protos.GroundingSupport.__init__": true,
- "google.generativeai.protos.GroundingSupport.__le__": true,
- "google.generativeai.protos.GroundingSupport.__lt__": true,
- "google.generativeai.protos.GroundingSupport.__ne__": true,
- "google.generativeai.protos.GroundingSupport.__new__": true,
- "google.generativeai.protos.GroundingSupport.__or__": true,
- "google.generativeai.protos.GroundingSupport.__ror__": true,
- "google.generativeai.protos.GroundingSupport.confidence_scores": true,
- "google.generativeai.protos.GroundingSupport.copy_from": true,
- "google.generativeai.protos.GroundingSupport.deserialize": true,
- "google.generativeai.protos.GroundingSupport.from_json": true,
- "google.generativeai.protos.GroundingSupport.grounding_chunk_indices": true,
- "google.generativeai.protos.GroundingSupport.mro": true,
- "google.generativeai.protos.GroundingSupport.pb": true,
- "google.generativeai.protos.GroundingSupport.segment": true,
- "google.generativeai.protos.GroundingSupport.serialize": true,
- "google.generativeai.protos.GroundingSupport.to_dict": true,
- "google.generativeai.protos.GroundingSupport.to_json": true,
- "google.generativeai.protos.GroundingSupport.wrap": true,
- "google.generativeai.protos.HarmCategory": false,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DEROGATORY": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HARASSMENT": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_MEDICAL": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUAL": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_TOXICITY": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true,
- "google.generativeai.protos.HarmCategory.HARM_CATEGORY_VIOLENCE": true,
- "google.generativeai.protos.HarmCategory.__abs__": true,
- "google.generativeai.protos.HarmCategory.__add__": true,
- "google.generativeai.protos.HarmCategory.__and__": true,
- "google.generativeai.protos.HarmCategory.__bool__": true,
- "google.generativeai.protos.HarmCategory.__contains__": true,
- "google.generativeai.protos.HarmCategory.__eq__": true,
- "google.generativeai.protos.HarmCategory.__floordiv__": true,
- "google.generativeai.protos.HarmCategory.__ge__": true,
- "google.generativeai.protos.HarmCategory.__getitem__": true,
- "google.generativeai.protos.HarmCategory.__gt__": true,
- "google.generativeai.protos.HarmCategory.__init__": true,
- "google.generativeai.protos.HarmCategory.__invert__": true,
- "google.generativeai.protos.HarmCategory.__iter__": true,
- "google.generativeai.protos.HarmCategory.__le__": true,
- "google.generativeai.protos.HarmCategory.__len__": true,
- "google.generativeai.protos.HarmCategory.__lshift__": true,
- "google.generativeai.protos.HarmCategory.__lt__": true,
- "google.generativeai.protos.HarmCategory.__mod__": true,
- "google.generativeai.protos.HarmCategory.__mul__": true,
- "google.generativeai.protos.HarmCategory.__ne__": true,
- "google.generativeai.protos.HarmCategory.__neg__": true,
- "google.generativeai.protos.HarmCategory.__new__": true,
- "google.generativeai.protos.HarmCategory.__or__": true,
- "google.generativeai.protos.HarmCategory.__pos__": true,
- "google.generativeai.protos.HarmCategory.__pow__": true,
- "google.generativeai.protos.HarmCategory.__radd__": true,
- "google.generativeai.protos.HarmCategory.__rand__": true,
- "google.generativeai.protos.HarmCategory.__rfloordiv__": true,
- "google.generativeai.protos.HarmCategory.__rlshift__": true,
- "google.generativeai.protos.HarmCategory.__rmod__": true,
- "google.generativeai.protos.HarmCategory.__rmul__": true,
- "google.generativeai.protos.HarmCategory.__ror__": true,
- "google.generativeai.protos.HarmCategory.__rpow__": true,
- "google.generativeai.protos.HarmCategory.__rrshift__": true,
- "google.generativeai.protos.HarmCategory.__rshift__": true,
- "google.generativeai.protos.HarmCategory.__rsub__": true,
- "google.generativeai.protos.HarmCategory.__rtruediv__": true,
- "google.generativeai.protos.HarmCategory.__rxor__": true,
- "google.generativeai.protos.HarmCategory.__sub__": true,
- "google.generativeai.protos.HarmCategory.__truediv__": true,
- "google.generativeai.protos.HarmCategory.__xor__": true,
- "google.generativeai.protos.HarmCategory.as_integer_ratio": true,
- "google.generativeai.protos.HarmCategory.bit_count": true,
- "google.generativeai.protos.HarmCategory.bit_length": true,
- "google.generativeai.protos.HarmCategory.conjugate": true,
- "google.generativeai.protos.HarmCategory.denominator": true,
- "google.generativeai.protos.HarmCategory.from_bytes": true,
- "google.generativeai.protos.HarmCategory.imag": true,
- "google.generativeai.protos.HarmCategory.is_integer": true,
- "google.generativeai.protos.HarmCategory.numerator": true,
- "google.generativeai.protos.HarmCategory.real": true,
- "google.generativeai.protos.HarmCategory.to_bytes": true,
- "google.generativeai.protos.Hyperparameters": false,
- "google.generativeai.protos.Hyperparameters.__call__": true,
- "google.generativeai.protos.Hyperparameters.__eq__": true,
- "google.generativeai.protos.Hyperparameters.__ge__": true,
- "google.generativeai.protos.Hyperparameters.__gt__": true,
- "google.generativeai.protos.Hyperparameters.__init__": true,
- "google.generativeai.protos.Hyperparameters.__le__": true,
- "google.generativeai.protos.Hyperparameters.__lt__": true,
- "google.generativeai.protos.Hyperparameters.__ne__": true,
- "google.generativeai.protos.Hyperparameters.__new__": true,
- "google.generativeai.protos.Hyperparameters.__or__": true,
- "google.generativeai.protos.Hyperparameters.__ror__": true,
- "google.generativeai.protos.Hyperparameters.batch_size": true,
- "google.generativeai.protos.Hyperparameters.copy_from": true,
- "google.generativeai.protos.Hyperparameters.deserialize": true,
- "google.generativeai.protos.Hyperparameters.epoch_count": true,
- "google.generativeai.protos.Hyperparameters.from_json": true,
- "google.generativeai.protos.Hyperparameters.learning_rate": true,
- "google.generativeai.protos.Hyperparameters.learning_rate_multiplier": true,
- "google.generativeai.protos.Hyperparameters.mro": true,
- "google.generativeai.protos.Hyperparameters.pb": true,
- "google.generativeai.protos.Hyperparameters.serialize": true,
- "google.generativeai.protos.Hyperparameters.to_dict": true,
- "google.generativeai.protos.Hyperparameters.to_json": true,
- "google.generativeai.protos.Hyperparameters.wrap": true,
- "google.generativeai.protos.ListCachedContentsRequest": false,
- "google.generativeai.protos.ListCachedContentsRequest.__call__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__eq__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__ge__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__gt__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__init__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__le__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__lt__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__ne__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__new__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__or__": true,
- "google.generativeai.protos.ListCachedContentsRequest.__ror__": true,
- "google.generativeai.protos.ListCachedContentsRequest.copy_from": true,
- "google.generativeai.protos.ListCachedContentsRequest.deserialize": true,
- "google.generativeai.protos.ListCachedContentsRequest.from_json": true,
- "google.generativeai.protos.ListCachedContentsRequest.mro": true,
- "google.generativeai.protos.ListCachedContentsRequest.page_size": true,
- "google.generativeai.protos.ListCachedContentsRequest.page_token": true,
- "google.generativeai.protos.ListCachedContentsRequest.pb": true,
- "google.generativeai.protos.ListCachedContentsRequest.serialize": true,
- "google.generativeai.protos.ListCachedContentsRequest.to_dict": true,
- "google.generativeai.protos.ListCachedContentsRequest.to_json": true,
- "google.generativeai.protos.ListCachedContentsRequest.wrap": true,
- "google.generativeai.protos.ListCachedContentsResponse": false,
- "google.generativeai.protos.ListCachedContentsResponse.__call__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__eq__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__ge__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__gt__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__init__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__le__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__lt__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__ne__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__new__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__or__": true,
- "google.generativeai.protos.ListCachedContentsResponse.__ror__": true,
- "google.generativeai.protos.ListCachedContentsResponse.cached_contents": true,
- "google.generativeai.protos.ListCachedContentsResponse.copy_from": true,
- "google.generativeai.protos.ListCachedContentsResponse.deserialize": true,
- "google.generativeai.protos.ListCachedContentsResponse.from_json": true,
- "google.generativeai.protos.ListCachedContentsResponse.mro": true,
- "google.generativeai.protos.ListCachedContentsResponse.next_page_token": true,
- "google.generativeai.protos.ListCachedContentsResponse.pb": true,
- "google.generativeai.protos.ListCachedContentsResponse.serialize": true,
- "google.generativeai.protos.ListCachedContentsResponse.to_dict": true,
- "google.generativeai.protos.ListCachedContentsResponse.to_json": true,
- "google.generativeai.protos.ListCachedContentsResponse.wrap": true,
- "google.generativeai.protos.ListChunksRequest": false,
- "google.generativeai.protos.ListChunksRequest.__call__": true,
- "google.generativeai.protos.ListChunksRequest.__eq__": true,
- "google.generativeai.protos.ListChunksRequest.__ge__": true,
- "google.generativeai.protos.ListChunksRequest.__gt__": true,
- "google.generativeai.protos.ListChunksRequest.__init__": true,
- "google.generativeai.protos.ListChunksRequest.__le__": true,
- "google.generativeai.protos.ListChunksRequest.__lt__": true,
- "google.generativeai.protos.ListChunksRequest.__ne__": true,
- "google.generativeai.protos.ListChunksRequest.__new__": true,
- "google.generativeai.protos.ListChunksRequest.__or__": true,
- "google.generativeai.protos.ListChunksRequest.__ror__": true,
- "google.generativeai.protos.ListChunksRequest.copy_from": true,
- "google.generativeai.protos.ListChunksRequest.deserialize": true,
- "google.generativeai.protos.ListChunksRequest.from_json": true,
- "google.generativeai.protos.ListChunksRequest.mro": true,
- "google.generativeai.protos.ListChunksRequest.page_size": true,
- "google.generativeai.protos.ListChunksRequest.page_token": true,
- "google.generativeai.protos.ListChunksRequest.parent": true,
- "google.generativeai.protos.ListChunksRequest.pb": true,
- "google.generativeai.protos.ListChunksRequest.serialize": true,
- "google.generativeai.protos.ListChunksRequest.to_dict": true,
- "google.generativeai.protos.ListChunksRequest.to_json": true,
- "google.generativeai.protos.ListChunksRequest.wrap": true,
- "google.generativeai.protos.ListChunksResponse": false,
- "google.generativeai.protos.ListChunksResponse.__call__": true,
- "google.generativeai.protos.ListChunksResponse.__eq__": true,
- "google.generativeai.protos.ListChunksResponse.__ge__": true,
- "google.generativeai.protos.ListChunksResponse.__gt__": true,
- "google.generativeai.protos.ListChunksResponse.__init__": true,
- "google.generativeai.protos.ListChunksResponse.__le__": true,
- "google.generativeai.protos.ListChunksResponse.__lt__": true,
- "google.generativeai.protos.ListChunksResponse.__ne__": true,
- "google.generativeai.protos.ListChunksResponse.__new__": true,
- "google.generativeai.protos.ListChunksResponse.__or__": true,
- "google.generativeai.protos.ListChunksResponse.__ror__": true,
- "google.generativeai.protos.ListChunksResponse.chunks": true,
- "google.generativeai.protos.ListChunksResponse.copy_from": true,
- "google.generativeai.protos.ListChunksResponse.deserialize": true,
- "google.generativeai.protos.ListChunksResponse.from_json": true,
- "google.generativeai.protos.ListChunksResponse.mro": true,
- "google.generativeai.protos.ListChunksResponse.next_page_token": true,
- "google.generativeai.protos.ListChunksResponse.pb": true,
- "google.generativeai.protos.ListChunksResponse.serialize": true,
- "google.generativeai.protos.ListChunksResponse.to_dict": true,
- "google.generativeai.protos.ListChunksResponse.to_json": true,
- "google.generativeai.protos.ListChunksResponse.wrap": true,
- "google.generativeai.protos.ListCorporaRequest": false,
- "google.generativeai.protos.ListCorporaRequest.__call__": true,
- "google.generativeai.protos.ListCorporaRequest.__eq__": true,
- "google.generativeai.protos.ListCorporaRequest.__ge__": true,
- "google.generativeai.protos.ListCorporaRequest.__gt__": true,
- "google.generativeai.protos.ListCorporaRequest.__init__": true,
- "google.generativeai.protos.ListCorporaRequest.__le__": true,
- "google.generativeai.protos.ListCorporaRequest.__lt__": true,
- "google.generativeai.protos.ListCorporaRequest.__ne__": true,
- "google.generativeai.protos.ListCorporaRequest.__new__": true,
- "google.generativeai.protos.ListCorporaRequest.__or__": true,
- "google.generativeai.protos.ListCorporaRequest.__ror__": true,
- "google.generativeai.protos.ListCorporaRequest.copy_from": true,
- "google.generativeai.protos.ListCorporaRequest.deserialize": true,
- "google.generativeai.protos.ListCorporaRequest.from_json": true,
- "google.generativeai.protos.ListCorporaRequest.mro": true,
- "google.generativeai.protos.ListCorporaRequest.page_size": true,
- "google.generativeai.protos.ListCorporaRequest.page_token": true,
- "google.generativeai.protos.ListCorporaRequest.pb": true,
- "google.generativeai.protos.ListCorporaRequest.serialize": true,
- "google.generativeai.protos.ListCorporaRequest.to_dict": true,
- "google.generativeai.protos.ListCorporaRequest.to_json": true,
- "google.generativeai.protos.ListCorporaRequest.wrap": true,
- "google.generativeai.protos.ListCorporaResponse": false,
- "google.generativeai.protos.ListCorporaResponse.__call__": true,
- "google.generativeai.protos.ListCorporaResponse.__eq__": true,
- "google.generativeai.protos.ListCorporaResponse.__ge__": true,
- "google.generativeai.protos.ListCorporaResponse.__gt__": true,
- "google.generativeai.protos.ListCorporaResponse.__init__": true,
- "google.generativeai.protos.ListCorporaResponse.__le__": true,
- "google.generativeai.protos.ListCorporaResponse.__lt__": true,
- "google.generativeai.protos.ListCorporaResponse.__ne__": true,
- "google.generativeai.protos.ListCorporaResponse.__new__": true,
- "google.generativeai.protos.ListCorporaResponse.__or__": true,
- "google.generativeai.protos.ListCorporaResponse.__ror__": true,
- "google.generativeai.protos.ListCorporaResponse.copy_from": true,
- "google.generativeai.protos.ListCorporaResponse.corpora": true,
- "google.generativeai.protos.ListCorporaResponse.deserialize": true,
- "google.generativeai.protos.ListCorporaResponse.from_json": true,
- "google.generativeai.protos.ListCorporaResponse.mro": true,
- "google.generativeai.protos.ListCorporaResponse.next_page_token": true,
- "google.generativeai.protos.ListCorporaResponse.pb": true,
- "google.generativeai.protos.ListCorporaResponse.serialize": true,
- "google.generativeai.protos.ListCorporaResponse.to_dict": true,
- "google.generativeai.protos.ListCorporaResponse.to_json": true,
- "google.generativeai.protos.ListCorporaResponse.wrap": true,
- "google.generativeai.protos.ListDocumentsRequest": false,
- "google.generativeai.protos.ListDocumentsRequest.__call__": true,
- "google.generativeai.protos.ListDocumentsRequest.__eq__": true,
- "google.generativeai.protos.ListDocumentsRequest.__ge__": true,
- "google.generativeai.protos.ListDocumentsRequest.__gt__": true,
- "google.generativeai.protos.ListDocumentsRequest.__init__": true,
- "google.generativeai.protos.ListDocumentsRequest.__le__": true,
- "google.generativeai.protos.ListDocumentsRequest.__lt__": true,
- "google.generativeai.protos.ListDocumentsRequest.__ne__": true,
- "google.generativeai.protos.ListDocumentsRequest.__new__": true,
- "google.generativeai.protos.ListDocumentsRequest.__or__": true,
- "google.generativeai.protos.ListDocumentsRequest.__ror__": true,
- "google.generativeai.protos.ListDocumentsRequest.copy_from": true,
- "google.generativeai.protos.ListDocumentsRequest.deserialize": true,
- "google.generativeai.protos.ListDocumentsRequest.from_json": true,
- "google.generativeai.protos.ListDocumentsRequest.mro": true,
- "google.generativeai.protos.ListDocumentsRequest.page_size": true,
- "google.generativeai.protos.ListDocumentsRequest.page_token": true,
- "google.generativeai.protos.ListDocumentsRequest.parent": true,
- "google.generativeai.protos.ListDocumentsRequest.pb": true,
- "google.generativeai.protos.ListDocumentsRequest.serialize": true,
- "google.generativeai.protos.ListDocumentsRequest.to_dict": true,
- "google.generativeai.protos.ListDocumentsRequest.to_json": true,
- "google.generativeai.protos.ListDocumentsRequest.wrap": true,
- "google.generativeai.protos.ListDocumentsResponse": false,
- "google.generativeai.protos.ListDocumentsResponse.__call__": true,
- "google.generativeai.protos.ListDocumentsResponse.__eq__": true,
- "google.generativeai.protos.ListDocumentsResponse.__ge__": true,
- "google.generativeai.protos.ListDocumentsResponse.__gt__": true,
- "google.generativeai.protos.ListDocumentsResponse.__init__": true,
- "google.generativeai.protos.ListDocumentsResponse.__le__": true,
- "google.generativeai.protos.ListDocumentsResponse.__lt__": true,
- "google.generativeai.protos.ListDocumentsResponse.__ne__": true,
- "google.generativeai.protos.ListDocumentsResponse.__new__": true,
- "google.generativeai.protos.ListDocumentsResponse.__or__": true,
- "google.generativeai.protos.ListDocumentsResponse.__ror__": true,
- "google.generativeai.protos.ListDocumentsResponse.copy_from": true,
- "google.generativeai.protos.ListDocumentsResponse.deserialize": true,
- "google.generativeai.protos.ListDocumentsResponse.documents": true,
- "google.generativeai.protos.ListDocumentsResponse.from_json": true,
- "google.generativeai.protos.ListDocumentsResponse.mro": true,
- "google.generativeai.protos.ListDocumentsResponse.next_page_token": true,
- "google.generativeai.protos.ListDocumentsResponse.pb": true,
- "google.generativeai.protos.ListDocumentsResponse.serialize": true,
- "google.generativeai.protos.ListDocumentsResponse.to_dict": true,
- "google.generativeai.protos.ListDocumentsResponse.to_json": true,
- "google.generativeai.protos.ListDocumentsResponse.wrap": true,
- "google.generativeai.protos.ListFilesRequest": false,
- "google.generativeai.protos.ListFilesRequest.__call__": true,
- "google.generativeai.protos.ListFilesRequest.__eq__": true,
- "google.generativeai.protos.ListFilesRequest.__ge__": true,
- "google.generativeai.protos.ListFilesRequest.__gt__": true,
- "google.generativeai.protos.ListFilesRequest.__init__": true,
- "google.generativeai.protos.ListFilesRequest.__le__": true,
- "google.generativeai.protos.ListFilesRequest.__lt__": true,
- "google.generativeai.protos.ListFilesRequest.__ne__": true,
- "google.generativeai.protos.ListFilesRequest.__new__": true,
- "google.generativeai.protos.ListFilesRequest.__or__": true,
- "google.generativeai.protos.ListFilesRequest.__ror__": true,
- "google.generativeai.protos.ListFilesRequest.copy_from": true,
- "google.generativeai.protos.ListFilesRequest.deserialize": true,
- "google.generativeai.protos.ListFilesRequest.from_json": true,
- "google.generativeai.protos.ListFilesRequest.mro": true,
- "google.generativeai.protos.ListFilesRequest.page_size": true,
- "google.generativeai.protos.ListFilesRequest.page_token": true,
- "google.generativeai.protos.ListFilesRequest.pb": true,
- "google.generativeai.protos.ListFilesRequest.serialize": true,
- "google.generativeai.protos.ListFilesRequest.to_dict": true,
- "google.generativeai.protos.ListFilesRequest.to_json": true,
- "google.generativeai.protos.ListFilesRequest.wrap": true,
- "google.generativeai.protos.ListFilesResponse": false,
- "google.generativeai.protos.ListFilesResponse.__call__": true,
- "google.generativeai.protos.ListFilesResponse.__eq__": true,
- "google.generativeai.protos.ListFilesResponse.__ge__": true,
- "google.generativeai.protos.ListFilesResponse.__gt__": true,
- "google.generativeai.protos.ListFilesResponse.__init__": true,
- "google.generativeai.protos.ListFilesResponse.__le__": true,
- "google.generativeai.protos.ListFilesResponse.__lt__": true,
- "google.generativeai.protos.ListFilesResponse.__ne__": true,
- "google.generativeai.protos.ListFilesResponse.__new__": true,
- "google.generativeai.protos.ListFilesResponse.__or__": true,
- "google.generativeai.protos.ListFilesResponse.__ror__": true,
- "google.generativeai.protos.ListFilesResponse.copy_from": true,
- "google.generativeai.protos.ListFilesResponse.deserialize": true,
- "google.generativeai.protos.ListFilesResponse.files": true,
- "google.generativeai.protos.ListFilesResponse.from_json": true,
- "google.generativeai.protos.ListFilesResponse.mro": true,
- "google.generativeai.protos.ListFilesResponse.next_page_token": true,
- "google.generativeai.protos.ListFilesResponse.pb": true,
- "google.generativeai.protos.ListFilesResponse.serialize": true,
- "google.generativeai.protos.ListFilesResponse.to_dict": true,
- "google.generativeai.protos.ListFilesResponse.to_json": true,
- "google.generativeai.protos.ListFilesResponse.wrap": true,
- "google.generativeai.protos.ListModelsRequest": false,
- "google.generativeai.protos.ListModelsRequest.__call__": true,
- "google.generativeai.protos.ListModelsRequest.__eq__": true,
- "google.generativeai.protos.ListModelsRequest.__ge__": true,
- "google.generativeai.protos.ListModelsRequest.__gt__": true,
- "google.generativeai.protos.ListModelsRequest.__init__": true,
- "google.generativeai.protos.ListModelsRequest.__le__": true,
- "google.generativeai.protos.ListModelsRequest.__lt__": true,
- "google.generativeai.protos.ListModelsRequest.__ne__": true,
- "google.generativeai.protos.ListModelsRequest.__new__": true,
- "google.generativeai.protos.ListModelsRequest.__or__": true,
- "google.generativeai.protos.ListModelsRequest.__ror__": true,
- "google.generativeai.protos.ListModelsRequest.copy_from": true,
- "google.generativeai.protos.ListModelsRequest.deserialize": true,
- "google.generativeai.protos.ListModelsRequest.from_json": true,
- "google.generativeai.protos.ListModelsRequest.mro": true,
- "google.generativeai.protos.ListModelsRequest.page_size": true,
- "google.generativeai.protos.ListModelsRequest.page_token": true,
- "google.generativeai.protos.ListModelsRequest.pb": true,
- "google.generativeai.protos.ListModelsRequest.serialize": true,
- "google.generativeai.protos.ListModelsRequest.to_dict": true,
- "google.generativeai.protos.ListModelsRequest.to_json": true,
- "google.generativeai.protos.ListModelsRequest.wrap": true,
- "google.generativeai.protos.ListModelsResponse": false,
- "google.generativeai.protos.ListModelsResponse.__call__": true,
- "google.generativeai.protos.ListModelsResponse.__eq__": true,
- "google.generativeai.protos.ListModelsResponse.__ge__": true,
- "google.generativeai.protos.ListModelsResponse.__gt__": true,
- "google.generativeai.protos.ListModelsResponse.__init__": true,
- "google.generativeai.protos.ListModelsResponse.__le__": true,
- "google.generativeai.protos.ListModelsResponse.__lt__": true,
- "google.generativeai.protos.ListModelsResponse.__ne__": true,
- "google.generativeai.protos.ListModelsResponse.__new__": true,
- "google.generativeai.protos.ListModelsResponse.__or__": true,
- "google.generativeai.protos.ListModelsResponse.__ror__": true,
- "google.generativeai.protos.ListModelsResponse.copy_from": true,
- "google.generativeai.protos.ListModelsResponse.deserialize": true,
- "google.generativeai.protos.ListModelsResponse.from_json": true,
- "google.generativeai.protos.ListModelsResponse.models": true,
- "google.generativeai.protos.ListModelsResponse.mro": true,
- "google.generativeai.protos.ListModelsResponse.next_page_token": true,
- "google.generativeai.protos.ListModelsResponse.pb": true,
- "google.generativeai.protos.ListModelsResponse.serialize": true,
- "google.generativeai.protos.ListModelsResponse.to_dict": true,
- "google.generativeai.protos.ListModelsResponse.to_json": true,
- "google.generativeai.protos.ListModelsResponse.wrap": true,
- "google.generativeai.protos.ListPermissionsRequest": false,
- "google.generativeai.protos.ListPermissionsRequest.__call__": true,
- "google.generativeai.protos.ListPermissionsRequest.__eq__": true,
- "google.generativeai.protos.ListPermissionsRequest.__ge__": true,
- "google.generativeai.protos.ListPermissionsRequest.__gt__": true,
- "google.generativeai.protos.ListPermissionsRequest.__init__": true,
- "google.generativeai.protos.ListPermissionsRequest.__le__": true,
- "google.generativeai.protos.ListPermissionsRequest.__lt__": true,
- "google.generativeai.protos.ListPermissionsRequest.__ne__": true,
- "google.generativeai.protos.ListPermissionsRequest.__new__": true,
- "google.generativeai.protos.ListPermissionsRequest.__or__": true,
- "google.generativeai.protos.ListPermissionsRequest.__ror__": true,
- "google.generativeai.protos.ListPermissionsRequest.copy_from": true,
- "google.generativeai.protos.ListPermissionsRequest.deserialize": true,
- "google.generativeai.protos.ListPermissionsRequest.from_json": true,
- "google.generativeai.protos.ListPermissionsRequest.mro": true,
- "google.generativeai.protos.ListPermissionsRequest.page_size": true,
- "google.generativeai.protos.ListPermissionsRequest.page_token": true,
- "google.generativeai.protos.ListPermissionsRequest.parent": true,
- "google.generativeai.protos.ListPermissionsRequest.pb": true,
- "google.generativeai.protos.ListPermissionsRequest.serialize": true,
- "google.generativeai.protos.ListPermissionsRequest.to_dict": true,
- "google.generativeai.protos.ListPermissionsRequest.to_json": true,
- "google.generativeai.protos.ListPermissionsRequest.wrap": true,
- "google.generativeai.protos.ListPermissionsResponse": false,
- "google.generativeai.protos.ListPermissionsResponse.__call__": true,
- "google.generativeai.protos.ListPermissionsResponse.__eq__": true,
- "google.generativeai.protos.ListPermissionsResponse.__ge__": true,
- "google.generativeai.protos.ListPermissionsResponse.__gt__": true,
- "google.generativeai.protos.ListPermissionsResponse.__init__": true,
- "google.generativeai.protos.ListPermissionsResponse.__le__": true,
- "google.generativeai.protos.ListPermissionsResponse.__lt__": true,
- "google.generativeai.protos.ListPermissionsResponse.__ne__": true,
- "google.generativeai.protos.ListPermissionsResponse.__new__": true,
- "google.generativeai.protos.ListPermissionsResponse.__or__": true,
- "google.generativeai.protos.ListPermissionsResponse.__ror__": true,
- "google.generativeai.protos.ListPermissionsResponse.copy_from": true,
- "google.generativeai.protos.ListPermissionsResponse.deserialize": true,
- "google.generativeai.protos.ListPermissionsResponse.from_json": true,
- "google.generativeai.protos.ListPermissionsResponse.mro": true,
- "google.generativeai.protos.ListPermissionsResponse.next_page_token": true,
- "google.generativeai.protos.ListPermissionsResponse.pb": true,
- "google.generativeai.protos.ListPermissionsResponse.permissions": true,
- "google.generativeai.protos.ListPermissionsResponse.serialize": true,
- "google.generativeai.protos.ListPermissionsResponse.to_dict": true,
- "google.generativeai.protos.ListPermissionsResponse.to_json": true,
- "google.generativeai.protos.ListPermissionsResponse.wrap": true,
- "google.generativeai.protos.ListTunedModelsRequest": false,
- "google.generativeai.protos.ListTunedModelsRequest.__call__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__eq__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__ge__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__gt__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__init__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__le__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__lt__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__ne__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__new__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__or__": true,
- "google.generativeai.protos.ListTunedModelsRequest.__ror__": true,
- "google.generativeai.protos.ListTunedModelsRequest.copy_from": true,
- "google.generativeai.protos.ListTunedModelsRequest.deserialize": true,
- "google.generativeai.protos.ListTunedModelsRequest.filter": true,
- "google.generativeai.protos.ListTunedModelsRequest.from_json": true,
- "google.generativeai.protos.ListTunedModelsRequest.mro": true,
- "google.generativeai.protos.ListTunedModelsRequest.page_size": true,
- "google.generativeai.protos.ListTunedModelsRequest.page_token": true,
- "google.generativeai.protos.ListTunedModelsRequest.pb": true,
- "google.generativeai.protos.ListTunedModelsRequest.serialize": true,
- "google.generativeai.protos.ListTunedModelsRequest.to_dict": true,
- "google.generativeai.protos.ListTunedModelsRequest.to_json": true,
- "google.generativeai.protos.ListTunedModelsRequest.wrap": true,
- "google.generativeai.protos.ListTunedModelsResponse": false,
- "google.generativeai.protos.ListTunedModelsResponse.__call__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__eq__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__ge__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__gt__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__init__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__le__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__lt__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__ne__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__new__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__or__": true,
- "google.generativeai.protos.ListTunedModelsResponse.__ror__": true,
- "google.generativeai.protos.ListTunedModelsResponse.copy_from": true,
- "google.generativeai.protos.ListTunedModelsResponse.deserialize": true,
- "google.generativeai.protos.ListTunedModelsResponse.from_json": true,
- "google.generativeai.protos.ListTunedModelsResponse.mro": true,
- "google.generativeai.protos.ListTunedModelsResponse.next_page_token": true,
- "google.generativeai.protos.ListTunedModelsResponse.pb": true,
- "google.generativeai.protos.ListTunedModelsResponse.serialize": true,
- "google.generativeai.protos.ListTunedModelsResponse.to_dict": true,
- "google.generativeai.protos.ListTunedModelsResponse.to_json": true,
- "google.generativeai.protos.ListTunedModelsResponse.tuned_models": true,
- "google.generativeai.protos.ListTunedModelsResponse.wrap": true,
- "google.generativeai.protos.LogprobsResult": false,
- "google.generativeai.protos.LogprobsResult.Candidate": false,
- "google.generativeai.protos.LogprobsResult.Candidate.__call__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__eq__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__ge__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__gt__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__init__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__le__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__lt__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__ne__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__new__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__or__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.__ror__": true,
- "google.generativeai.protos.LogprobsResult.Candidate.copy_from": true,
- "google.generativeai.protos.LogprobsResult.Candidate.deserialize": true,
- "google.generativeai.protos.LogprobsResult.Candidate.from_json": true,
- "google.generativeai.protos.LogprobsResult.Candidate.log_probability": true,
- "google.generativeai.protos.LogprobsResult.Candidate.mro": true,
- "google.generativeai.protos.LogprobsResult.Candidate.pb": true,
- "google.generativeai.protos.LogprobsResult.Candidate.serialize": true,
- "google.generativeai.protos.LogprobsResult.Candidate.to_dict": true,
- "google.generativeai.protos.LogprobsResult.Candidate.to_json": true,
- "google.generativeai.protos.LogprobsResult.Candidate.token": true,
- "google.generativeai.protos.LogprobsResult.Candidate.token_id": true,
- "google.generativeai.protos.LogprobsResult.Candidate.wrap": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates": false,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__call__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__eq__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__ge__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__gt__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__init__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__le__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__lt__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__ne__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__new__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__or__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.__ror__": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.candidates": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.copy_from": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.deserialize": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.from_json": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.mro": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.pb": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.serialize": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.to_dict": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.to_json": true,
- "google.generativeai.protos.LogprobsResult.TopCandidates.wrap": true,
- "google.generativeai.protos.LogprobsResult.__call__": true,
- "google.generativeai.protos.LogprobsResult.__eq__": true,
- "google.generativeai.protos.LogprobsResult.__ge__": true,
- "google.generativeai.protos.LogprobsResult.__gt__": true,
- "google.generativeai.protos.LogprobsResult.__init__": true,
- "google.generativeai.protos.LogprobsResult.__le__": true,
- "google.generativeai.protos.LogprobsResult.__lt__": true,
- "google.generativeai.protos.LogprobsResult.__ne__": true,
- "google.generativeai.protos.LogprobsResult.__new__": true,
- "google.generativeai.protos.LogprobsResult.__or__": true,
- "google.generativeai.protos.LogprobsResult.__ror__": true,
- "google.generativeai.protos.LogprobsResult.chosen_candidates": true,
- "google.generativeai.protos.LogprobsResult.copy_from": true,
- "google.generativeai.protos.LogprobsResult.deserialize": true,
- "google.generativeai.protos.LogprobsResult.from_json": true,
- "google.generativeai.protos.LogprobsResult.mro": true,
- "google.generativeai.protos.LogprobsResult.pb": true,
- "google.generativeai.protos.LogprobsResult.serialize": true,
- "google.generativeai.protos.LogprobsResult.to_dict": true,
- "google.generativeai.protos.LogprobsResult.to_json": true,
- "google.generativeai.protos.LogprobsResult.top_candidates": true,
- "google.generativeai.protos.LogprobsResult.wrap": true,
- "google.generativeai.protos.Message": false,
- "google.generativeai.protos.Message.__call__": true,
- "google.generativeai.protos.Message.__eq__": true,
- "google.generativeai.protos.Message.__ge__": true,
- "google.generativeai.protos.Message.__gt__": true,
- "google.generativeai.protos.Message.__init__": true,
- "google.generativeai.protos.Message.__le__": true,
- "google.generativeai.protos.Message.__lt__": true,
- "google.generativeai.protos.Message.__ne__": true,
- "google.generativeai.protos.Message.__new__": true,
- "google.generativeai.protos.Message.__or__": true,
- "google.generativeai.protos.Message.__ror__": true,
- "google.generativeai.protos.Message.author": true,
- "google.generativeai.protos.Message.citation_metadata": true,
- "google.generativeai.protos.Message.content": true,
- "google.generativeai.protos.Message.copy_from": true,
- "google.generativeai.protos.Message.deserialize": true,
- "google.generativeai.protos.Message.from_json": true,
- "google.generativeai.protos.Message.mro": true,
- "google.generativeai.protos.Message.pb": true,
- "google.generativeai.protos.Message.serialize": true,
- "google.generativeai.protos.Message.to_dict": true,
- "google.generativeai.protos.Message.to_json": true,
- "google.generativeai.protos.Message.wrap": true,
- "google.generativeai.protos.MessagePrompt": false,
- "google.generativeai.protos.MessagePrompt.__call__": true,
- "google.generativeai.protos.MessagePrompt.__eq__": true,
- "google.generativeai.protos.MessagePrompt.__ge__": true,
- "google.generativeai.protos.MessagePrompt.__gt__": true,
- "google.generativeai.protos.MessagePrompt.__init__": true,
- "google.generativeai.protos.MessagePrompt.__le__": true,
- "google.generativeai.protos.MessagePrompt.__lt__": true,
- "google.generativeai.protos.MessagePrompt.__ne__": true,
- "google.generativeai.protos.MessagePrompt.__new__": true,
- "google.generativeai.protos.MessagePrompt.__or__": true,
- "google.generativeai.protos.MessagePrompt.__ror__": true,
- "google.generativeai.protos.MessagePrompt.context": true,
- "google.generativeai.protos.MessagePrompt.copy_from": true,
- "google.generativeai.protos.MessagePrompt.deserialize": true,
- "google.generativeai.protos.MessagePrompt.examples": true,
- "google.generativeai.protos.MessagePrompt.from_json": true,
- "google.generativeai.protos.MessagePrompt.messages": true,
- "google.generativeai.protos.MessagePrompt.mro": true,
- "google.generativeai.protos.MessagePrompt.pb": true,
- "google.generativeai.protos.MessagePrompt.serialize": true,
- "google.generativeai.protos.MessagePrompt.to_dict": true,
- "google.generativeai.protos.MessagePrompt.to_json": true,
- "google.generativeai.protos.MessagePrompt.wrap": true,
- "google.generativeai.protos.MetadataFilter": false,
- "google.generativeai.protos.MetadataFilter.__call__": true,
- "google.generativeai.protos.MetadataFilter.__eq__": true,
- "google.generativeai.protos.MetadataFilter.__ge__": true,
- "google.generativeai.protos.MetadataFilter.__gt__": true,
- "google.generativeai.protos.MetadataFilter.__init__": true,
- "google.generativeai.protos.MetadataFilter.__le__": true,
- "google.generativeai.protos.MetadataFilter.__lt__": true,
- "google.generativeai.protos.MetadataFilter.__ne__": true,
- "google.generativeai.protos.MetadataFilter.__new__": true,
- "google.generativeai.protos.MetadataFilter.__or__": true,
- "google.generativeai.protos.MetadataFilter.__ror__": true,
- "google.generativeai.protos.MetadataFilter.conditions": true,
- "google.generativeai.protos.MetadataFilter.copy_from": true,
- "google.generativeai.protos.MetadataFilter.deserialize": true,
- "google.generativeai.protos.MetadataFilter.from_json": true,
- "google.generativeai.protos.MetadataFilter.key": true,
- "google.generativeai.protos.MetadataFilter.mro": true,
- "google.generativeai.protos.MetadataFilter.pb": true,
- "google.generativeai.protos.MetadataFilter.serialize": true,
- "google.generativeai.protos.MetadataFilter.to_dict": true,
- "google.generativeai.protos.MetadataFilter.to_json": true,
- "google.generativeai.protos.MetadataFilter.wrap": true,
- "google.generativeai.protos.Model": false,
- "google.generativeai.protos.Model.__call__": true,
- "google.generativeai.protos.Model.__eq__": true,
- "google.generativeai.protos.Model.__ge__": true,
- "google.generativeai.protos.Model.__gt__": true,
- "google.generativeai.protos.Model.__init__": true,
- "google.generativeai.protos.Model.__le__": true,
- "google.generativeai.protos.Model.__lt__": true,
- "google.generativeai.protos.Model.__ne__": true,
- "google.generativeai.protos.Model.__new__": true,
- "google.generativeai.protos.Model.__or__": true,
- "google.generativeai.protos.Model.__ror__": true,
- "google.generativeai.protos.Model.base_model_id": true,
- "google.generativeai.protos.Model.copy_from": true,
- "google.generativeai.protos.Model.description": true,
- "google.generativeai.protos.Model.deserialize": true,
- "google.generativeai.protos.Model.display_name": true,
- "google.generativeai.protos.Model.from_json": true,
- "google.generativeai.protos.Model.input_token_limit": true,
- "google.generativeai.protos.Model.max_temperature": true,
- "google.generativeai.protos.Model.mro": true,
- "google.generativeai.protos.Model.name": true,
- "google.generativeai.protos.Model.output_token_limit": true,
- "google.generativeai.protos.Model.pb": true,
- "google.generativeai.protos.Model.serialize": true,
- "google.generativeai.protos.Model.supported_generation_methods": true,
- "google.generativeai.protos.Model.temperature": true,
- "google.generativeai.protos.Model.to_dict": true,
- "google.generativeai.protos.Model.to_json": true,
- "google.generativeai.protos.Model.top_k": true,
- "google.generativeai.protos.Model.top_p": true,
- "google.generativeai.protos.Model.version": true,
- "google.generativeai.protos.Model.wrap": true,
- "google.generativeai.protos.Part": false,
- "google.generativeai.protos.Part.__call__": true,
- "google.generativeai.protos.Part.__eq__": true,
- "google.generativeai.protos.Part.__ge__": true,
- "google.generativeai.protos.Part.__gt__": true,
- "google.generativeai.protos.Part.__init__": true,
- "google.generativeai.protos.Part.__le__": true,
- "google.generativeai.protos.Part.__lt__": true,
- "google.generativeai.protos.Part.__ne__": true,
- "google.generativeai.protos.Part.__new__": true,
- "google.generativeai.protos.Part.__or__": true,
- "google.generativeai.protos.Part.__ror__": true,
- "google.generativeai.protos.Part.code_execution_result": true,
- "google.generativeai.protos.Part.copy_from": true,
- "google.generativeai.protos.Part.deserialize": true,
- "google.generativeai.protos.Part.executable_code": true,
- "google.generativeai.protos.Part.file_data": true,
- "google.generativeai.protos.Part.from_json": true,
- "google.generativeai.protos.Part.function_call": true,
- "google.generativeai.protos.Part.function_response": true,
- "google.generativeai.protos.Part.inline_data": true,
- "google.generativeai.protos.Part.mro": true,
- "google.generativeai.protos.Part.pb": true,
- "google.generativeai.protos.Part.serialize": true,
- "google.generativeai.protos.Part.text": true,
- "google.generativeai.protos.Part.to_dict": true,
- "google.generativeai.protos.Part.to_json": true,
- "google.generativeai.protos.Part.wrap": true,
- "google.generativeai.protos.Permission": false,
- "google.generativeai.protos.Permission.GranteeType": false,
- "google.generativeai.protos.Permission.GranteeType.EVERYONE": true,
- "google.generativeai.protos.Permission.GranteeType.GRANTEE_TYPE_UNSPECIFIED": true,
- "google.generativeai.protos.Permission.GranteeType.GROUP": true,
- "google.generativeai.protos.Permission.GranteeType.USER": true,
- "google.generativeai.protos.Permission.GranteeType.__abs__": true,
- "google.generativeai.protos.Permission.GranteeType.__add__": true,
- "google.generativeai.protos.Permission.GranteeType.__and__": true,
- "google.generativeai.protos.Permission.GranteeType.__bool__": true,
- "google.generativeai.protos.Permission.GranteeType.__contains__": true,
- "google.generativeai.protos.Permission.GranteeType.__eq__": true,
- "google.generativeai.protos.Permission.GranteeType.__floordiv__": true,
- "google.generativeai.protos.Permission.GranteeType.__ge__": true,
- "google.generativeai.protos.Permission.GranteeType.__getitem__": true,
- "google.generativeai.protos.Permission.GranteeType.__gt__": true,
- "google.generativeai.protos.Permission.GranteeType.__init__": true,
- "google.generativeai.protos.Permission.GranteeType.__invert__": true,
- "google.generativeai.protos.Permission.GranteeType.__iter__": true,
- "google.generativeai.protos.Permission.GranteeType.__le__": true,
- "google.generativeai.protos.Permission.GranteeType.__len__": true,
- "google.generativeai.protos.Permission.GranteeType.__lshift__": true,
- "google.generativeai.protos.Permission.GranteeType.__lt__": true,
- "google.generativeai.protos.Permission.GranteeType.__mod__": true,
- "google.generativeai.protos.Permission.GranteeType.__mul__": true,
- "google.generativeai.protos.Permission.GranteeType.__ne__": true,
- "google.generativeai.protos.Permission.GranteeType.__neg__": true,
- "google.generativeai.protos.Permission.GranteeType.__new__": true,
- "google.generativeai.protos.Permission.GranteeType.__or__": true,
- "google.generativeai.protos.Permission.GranteeType.__pos__": true,
- "google.generativeai.protos.Permission.GranteeType.__pow__": true,
- "google.generativeai.protos.Permission.GranteeType.__radd__": true,
- "google.generativeai.protos.Permission.GranteeType.__rand__": true,
- "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": true,
- "google.generativeai.protos.Permission.GranteeType.__rlshift__": true,
- "google.generativeai.protos.Permission.GranteeType.__rmod__": true,
- "google.generativeai.protos.Permission.GranteeType.__rmul__": true,
- "google.generativeai.protos.Permission.GranteeType.__ror__": true,
- "google.generativeai.protos.Permission.GranteeType.__rpow__": true,
- "google.generativeai.protos.Permission.GranteeType.__rrshift__": true,
- "google.generativeai.protos.Permission.GranteeType.__rshift__": true,
- "google.generativeai.protos.Permission.GranteeType.__rsub__": true,
- "google.generativeai.protos.Permission.GranteeType.__rtruediv__": true,
- "google.generativeai.protos.Permission.GranteeType.__rxor__": true,
- "google.generativeai.protos.Permission.GranteeType.__sub__": true,
- "google.generativeai.protos.Permission.GranteeType.__truediv__": true,
- "google.generativeai.protos.Permission.GranteeType.__xor__": true,
- "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": true,
- "google.generativeai.protos.Permission.GranteeType.bit_count": true,
- "google.generativeai.protos.Permission.GranteeType.bit_length": true,
- "google.generativeai.protos.Permission.GranteeType.conjugate": true,
- "google.generativeai.protos.Permission.GranteeType.denominator": true,
- "google.generativeai.protos.Permission.GranteeType.from_bytes": true,
- "google.generativeai.protos.Permission.GranteeType.imag": true,
- "google.generativeai.protos.Permission.GranteeType.is_integer": true,
- "google.generativeai.protos.Permission.GranteeType.numerator": true,
- "google.generativeai.protos.Permission.GranteeType.real": true,
- "google.generativeai.protos.Permission.GranteeType.to_bytes": true,
- "google.generativeai.protos.Permission.Role": false,
- "google.generativeai.protos.Permission.Role.OWNER": true,
- "google.generativeai.protos.Permission.Role.READER": true,
- "google.generativeai.protos.Permission.Role.ROLE_UNSPECIFIED": true,
- "google.generativeai.protos.Permission.Role.WRITER": true,
- "google.generativeai.protos.Permission.Role.__abs__": true,
- "google.generativeai.protos.Permission.Role.__add__": true,
- "google.generativeai.protos.Permission.Role.__and__": true,
- "google.generativeai.protos.Permission.Role.__bool__": true,
- "google.generativeai.protos.Permission.Role.__contains__": true,
- "google.generativeai.protos.Permission.Role.__eq__": true,
- "google.generativeai.protos.Permission.Role.__floordiv__": true,
- "google.generativeai.protos.Permission.Role.__ge__": true,
- "google.generativeai.protos.Permission.Role.__getitem__": true,
- "google.generativeai.protos.Permission.Role.__gt__": true,
- "google.generativeai.protos.Permission.Role.__init__": true,
- "google.generativeai.protos.Permission.Role.__invert__": true,
- "google.generativeai.protos.Permission.Role.__iter__": true,
- "google.generativeai.protos.Permission.Role.__le__": true,
- "google.generativeai.protos.Permission.Role.__len__": true,
- "google.generativeai.protos.Permission.Role.__lshift__": true,
- "google.generativeai.protos.Permission.Role.__lt__": true,
- "google.generativeai.protos.Permission.Role.__mod__": true,
- "google.generativeai.protos.Permission.Role.__mul__": true,
- "google.generativeai.protos.Permission.Role.__ne__": true,
- "google.generativeai.protos.Permission.Role.__neg__": true,
- "google.generativeai.protos.Permission.Role.__new__": true,
- "google.generativeai.protos.Permission.Role.__or__": true,
- "google.generativeai.protos.Permission.Role.__pos__": true,
- "google.generativeai.protos.Permission.Role.__pow__": true,
- "google.generativeai.protos.Permission.Role.__radd__": true,
- "google.generativeai.protos.Permission.Role.__rand__": true,
- "google.generativeai.protos.Permission.Role.__rfloordiv__": true,
- "google.generativeai.protos.Permission.Role.__rlshift__": true,
- "google.generativeai.protos.Permission.Role.__rmod__": true,
- "google.generativeai.protos.Permission.Role.__rmul__": true,
- "google.generativeai.protos.Permission.Role.__ror__": true,
- "google.generativeai.protos.Permission.Role.__rpow__": true,
- "google.generativeai.protos.Permission.Role.__rrshift__": true,
- "google.generativeai.protos.Permission.Role.__rshift__": true,
- "google.generativeai.protos.Permission.Role.__rsub__": true,
- "google.generativeai.protos.Permission.Role.__rtruediv__": true,
- "google.generativeai.protos.Permission.Role.__rxor__": true,
- "google.generativeai.protos.Permission.Role.__sub__": true,
- "google.generativeai.protos.Permission.Role.__truediv__": true,
- "google.generativeai.protos.Permission.Role.__xor__": true,
- "google.generativeai.protos.Permission.Role.as_integer_ratio": true,
- "google.generativeai.protos.Permission.Role.bit_count": true,
- "google.generativeai.protos.Permission.Role.bit_length": true,
- "google.generativeai.protos.Permission.Role.conjugate": true,
- "google.generativeai.protos.Permission.Role.denominator": true,
- "google.generativeai.protos.Permission.Role.from_bytes": true,
- "google.generativeai.protos.Permission.Role.imag": true,
- "google.generativeai.protos.Permission.Role.is_integer": true,
- "google.generativeai.protos.Permission.Role.numerator": true,
- "google.generativeai.protos.Permission.Role.real": true,
- "google.generativeai.protos.Permission.Role.to_bytes": true,
- "google.generativeai.protos.Permission.__call__": true,
- "google.generativeai.protos.Permission.__eq__": true,
- "google.generativeai.protos.Permission.__ge__": true,
- "google.generativeai.protos.Permission.__gt__": true,
- "google.generativeai.protos.Permission.__init__": true,
- "google.generativeai.protos.Permission.__le__": true,
- "google.generativeai.protos.Permission.__lt__": true,
- "google.generativeai.protos.Permission.__ne__": true,
- "google.generativeai.protos.Permission.__new__": true,
- "google.generativeai.protos.Permission.__or__": true,
- "google.generativeai.protos.Permission.__ror__": true,
- "google.generativeai.protos.Permission.copy_from": true,
- "google.generativeai.protos.Permission.deserialize": true,
- "google.generativeai.protos.Permission.email_address": true,
- "google.generativeai.protos.Permission.from_json": true,
- "google.generativeai.protos.Permission.grantee_type": true,
- "google.generativeai.protos.Permission.mro": true,
- "google.generativeai.protos.Permission.name": true,
- "google.generativeai.protos.Permission.pb": true,
- "google.generativeai.protos.Permission.role": true,
- "google.generativeai.protos.Permission.serialize": true,
- "google.generativeai.protos.Permission.to_dict": true,
- "google.generativeai.protos.Permission.to_json": true,
- "google.generativeai.protos.Permission.wrap": true,
- "google.generativeai.protos.PredictRequest": false,
- "google.generativeai.protos.PredictRequest.__call__": true,
- "google.generativeai.protos.PredictRequest.__eq__": true,
- "google.generativeai.protos.PredictRequest.__ge__": true,
- "google.generativeai.protos.PredictRequest.__gt__": true,
- "google.generativeai.protos.PredictRequest.__init__": true,
- "google.generativeai.protos.PredictRequest.__le__": true,
- "google.generativeai.protos.PredictRequest.__lt__": true,
- "google.generativeai.protos.PredictRequest.__ne__": true,
- "google.generativeai.protos.PredictRequest.__new__": true,
- "google.generativeai.protos.PredictRequest.__or__": true,
- "google.generativeai.protos.PredictRequest.__ror__": true,
- "google.generativeai.protos.PredictRequest.copy_from": true,
- "google.generativeai.protos.PredictRequest.deserialize": true,
- "google.generativeai.protos.PredictRequest.from_json": true,
- "google.generativeai.protos.PredictRequest.instances": true,
- "google.generativeai.protos.PredictRequest.model": true,
- "google.generativeai.protos.PredictRequest.mro": true,
- "google.generativeai.protos.PredictRequest.parameters": true,
- "google.generativeai.protos.PredictRequest.pb": true,
- "google.generativeai.protos.PredictRequest.serialize": true,
- "google.generativeai.protos.PredictRequest.to_dict": true,
- "google.generativeai.protos.PredictRequest.to_json": true,
- "google.generativeai.protos.PredictRequest.wrap": true,
- "google.generativeai.protos.PredictResponse": false,
- "google.generativeai.protos.PredictResponse.__call__": true,
- "google.generativeai.protos.PredictResponse.__eq__": true,
- "google.generativeai.protos.PredictResponse.__ge__": true,
- "google.generativeai.protos.PredictResponse.__gt__": true,
- "google.generativeai.protos.PredictResponse.__init__": true,
- "google.generativeai.protos.PredictResponse.__le__": true,
- "google.generativeai.protos.PredictResponse.__lt__": true,
- "google.generativeai.protos.PredictResponse.__ne__": true,
- "google.generativeai.protos.PredictResponse.__new__": true,
- "google.generativeai.protos.PredictResponse.__or__": true,
- "google.generativeai.protos.PredictResponse.__ror__": true,
- "google.generativeai.protos.PredictResponse.copy_from": true,
- "google.generativeai.protos.PredictResponse.deserialize": true,
- "google.generativeai.protos.PredictResponse.from_json": true,
- "google.generativeai.protos.PredictResponse.mro": true,
- "google.generativeai.protos.PredictResponse.pb": true,
- "google.generativeai.protos.PredictResponse.predictions": true,
- "google.generativeai.protos.PredictResponse.serialize": true,
- "google.generativeai.protos.PredictResponse.to_dict": true,
- "google.generativeai.protos.PredictResponse.to_json": true,
- "google.generativeai.protos.PredictResponse.wrap": true,
- "google.generativeai.protos.QueryCorpusRequest": false,
- "google.generativeai.protos.QueryCorpusRequest.__call__": true,
- "google.generativeai.protos.QueryCorpusRequest.__eq__": true,
- "google.generativeai.protos.QueryCorpusRequest.__ge__": true,
- "google.generativeai.protos.QueryCorpusRequest.__gt__": true,
- "google.generativeai.protos.QueryCorpusRequest.__init__": true,
- "google.generativeai.protos.QueryCorpusRequest.__le__": true,
- "google.generativeai.protos.QueryCorpusRequest.__lt__": true,
- "google.generativeai.protos.QueryCorpusRequest.__ne__": true,
- "google.generativeai.protos.QueryCorpusRequest.__new__": true,
- "google.generativeai.protos.QueryCorpusRequest.__or__": true,
- "google.generativeai.protos.QueryCorpusRequest.__ror__": true,
- "google.generativeai.protos.QueryCorpusRequest.copy_from": true,
- "google.generativeai.protos.QueryCorpusRequest.deserialize": true,
- "google.generativeai.protos.QueryCorpusRequest.from_json": true,
- "google.generativeai.protos.QueryCorpusRequest.metadata_filters": true,
- "google.generativeai.protos.QueryCorpusRequest.mro": true,
- "google.generativeai.protos.QueryCorpusRequest.name": true,
- "google.generativeai.protos.QueryCorpusRequest.pb": true,
- "google.generativeai.protos.QueryCorpusRequest.query": true,
- "google.generativeai.protos.QueryCorpusRequest.results_count": true,
- "google.generativeai.protos.QueryCorpusRequest.serialize": true,
- "google.generativeai.protos.QueryCorpusRequest.to_dict": true,
- "google.generativeai.protos.QueryCorpusRequest.to_json": true,
- "google.generativeai.protos.QueryCorpusRequest.wrap": true,
- "google.generativeai.protos.QueryCorpusResponse": false,
- "google.generativeai.protos.QueryCorpusResponse.__call__": true,
- "google.generativeai.protos.QueryCorpusResponse.__eq__": true,
- "google.generativeai.protos.QueryCorpusResponse.__ge__": true,
- "google.generativeai.protos.QueryCorpusResponse.__gt__": true,
- "google.generativeai.protos.QueryCorpusResponse.__init__": true,
- "google.generativeai.protos.QueryCorpusResponse.__le__": true,
- "google.generativeai.protos.QueryCorpusResponse.__lt__": true,
- "google.generativeai.protos.QueryCorpusResponse.__ne__": true,
- "google.generativeai.protos.QueryCorpusResponse.__new__": true,
- "google.generativeai.protos.QueryCorpusResponse.__or__": true,
- "google.generativeai.protos.QueryCorpusResponse.__ror__": true,
- "google.generativeai.protos.QueryCorpusResponse.copy_from": true,
- "google.generativeai.protos.QueryCorpusResponse.deserialize": true,
- "google.generativeai.protos.QueryCorpusResponse.from_json": true,
- "google.generativeai.protos.QueryCorpusResponse.mro": true,
- "google.generativeai.protos.QueryCorpusResponse.pb": true,
- "google.generativeai.protos.QueryCorpusResponse.relevant_chunks": true,
- "google.generativeai.protos.QueryCorpusResponse.serialize": true,
- "google.generativeai.protos.QueryCorpusResponse.to_dict": true,
- "google.generativeai.protos.QueryCorpusResponse.to_json": true,
- "google.generativeai.protos.QueryCorpusResponse.wrap": true,
- "google.generativeai.protos.QueryDocumentRequest": false,
- "google.generativeai.protos.QueryDocumentRequest.__call__": true,
- "google.generativeai.protos.QueryDocumentRequest.__eq__": true,
- "google.generativeai.protos.QueryDocumentRequest.__ge__": true,
- "google.generativeai.protos.QueryDocumentRequest.__gt__": true,
- "google.generativeai.protos.QueryDocumentRequest.__init__": true,
- "google.generativeai.protos.QueryDocumentRequest.__le__": true,
- "google.generativeai.protos.QueryDocumentRequest.__lt__": true,
- "google.generativeai.protos.QueryDocumentRequest.__ne__": true,
- "google.generativeai.protos.QueryDocumentRequest.__new__": true,
- "google.generativeai.protos.QueryDocumentRequest.__or__": true,
- "google.generativeai.protos.QueryDocumentRequest.__ror__": true,
- "google.generativeai.protos.QueryDocumentRequest.copy_from": true,
- "google.generativeai.protos.QueryDocumentRequest.deserialize": true,
- "google.generativeai.protos.QueryDocumentRequest.from_json": true,
- "google.generativeai.protos.QueryDocumentRequest.metadata_filters": true,
- "google.generativeai.protos.QueryDocumentRequest.mro": true,
- "google.generativeai.protos.QueryDocumentRequest.name": true,
- "google.generativeai.protos.QueryDocumentRequest.pb": true,
- "google.generativeai.protos.QueryDocumentRequest.query": true,
- "google.generativeai.protos.QueryDocumentRequest.results_count": true,
- "google.generativeai.protos.QueryDocumentRequest.serialize": true,
- "google.generativeai.protos.QueryDocumentRequest.to_dict": true,
- "google.generativeai.protos.QueryDocumentRequest.to_json": true,
- "google.generativeai.protos.QueryDocumentRequest.wrap": true,
- "google.generativeai.protos.QueryDocumentResponse": false,
- "google.generativeai.protos.QueryDocumentResponse.__call__": true,
- "google.generativeai.protos.QueryDocumentResponse.__eq__": true,
- "google.generativeai.protos.QueryDocumentResponse.__ge__": true,
- "google.generativeai.protos.QueryDocumentResponse.__gt__": true,
- "google.generativeai.protos.QueryDocumentResponse.__init__": true,
- "google.generativeai.protos.QueryDocumentResponse.__le__": true,
- "google.generativeai.protos.QueryDocumentResponse.__lt__": true,
- "google.generativeai.protos.QueryDocumentResponse.__ne__": true,
- "google.generativeai.protos.QueryDocumentResponse.__new__": true,
- "google.generativeai.protos.QueryDocumentResponse.__or__": true,
- "google.generativeai.protos.QueryDocumentResponse.__ror__": true,
- "google.generativeai.protos.QueryDocumentResponse.copy_from": true,
- "google.generativeai.protos.QueryDocumentResponse.deserialize": true,
- "google.generativeai.protos.QueryDocumentResponse.from_json": true,
- "google.generativeai.protos.QueryDocumentResponse.mro": true,
- "google.generativeai.protos.QueryDocumentResponse.pb": true,
- "google.generativeai.protos.QueryDocumentResponse.relevant_chunks": true,
- "google.generativeai.protos.QueryDocumentResponse.serialize": true,
- "google.generativeai.protos.QueryDocumentResponse.to_dict": true,
- "google.generativeai.protos.QueryDocumentResponse.to_json": true,
- "google.generativeai.protos.QueryDocumentResponse.wrap": true,
- "google.generativeai.protos.RelevantChunk": false,
- "google.generativeai.protos.RelevantChunk.__call__": true,
- "google.generativeai.protos.RelevantChunk.__eq__": true,
- "google.generativeai.protos.RelevantChunk.__ge__": true,
- "google.generativeai.protos.RelevantChunk.__gt__": true,
- "google.generativeai.protos.RelevantChunk.__init__": true,
- "google.generativeai.protos.RelevantChunk.__le__": true,
- "google.generativeai.protos.RelevantChunk.__lt__": true,
- "google.generativeai.protos.RelevantChunk.__ne__": true,
- "google.generativeai.protos.RelevantChunk.__new__": true,
- "google.generativeai.protos.RelevantChunk.__or__": true,
- "google.generativeai.protos.RelevantChunk.__ror__": true,
- "google.generativeai.protos.RelevantChunk.chunk": true,
- "google.generativeai.protos.RelevantChunk.chunk_relevance_score": true,
- "google.generativeai.protos.RelevantChunk.copy_from": true,
- "google.generativeai.protos.RelevantChunk.deserialize": true,
- "google.generativeai.protos.RelevantChunk.from_json": true,
- "google.generativeai.protos.RelevantChunk.mro": true,
- "google.generativeai.protos.RelevantChunk.pb": true,
- "google.generativeai.protos.RelevantChunk.serialize": true,
- "google.generativeai.protos.RelevantChunk.to_dict": true,
- "google.generativeai.protos.RelevantChunk.to_json": true,
- "google.generativeai.protos.RelevantChunk.wrap": true,
- "google.generativeai.protos.RetrievalMetadata": false,
- "google.generativeai.protos.RetrievalMetadata.__call__": true,
- "google.generativeai.protos.RetrievalMetadata.__eq__": true,
- "google.generativeai.protos.RetrievalMetadata.__ge__": true,
- "google.generativeai.protos.RetrievalMetadata.__gt__": true,
- "google.generativeai.protos.RetrievalMetadata.__init__": true,
- "google.generativeai.protos.RetrievalMetadata.__le__": true,
- "google.generativeai.protos.RetrievalMetadata.__lt__": true,
- "google.generativeai.protos.RetrievalMetadata.__ne__": true,
- "google.generativeai.protos.RetrievalMetadata.__new__": true,
- "google.generativeai.protos.RetrievalMetadata.__or__": true,
- "google.generativeai.protos.RetrievalMetadata.__ror__": true,
- "google.generativeai.protos.RetrievalMetadata.copy_from": true,
- "google.generativeai.protos.RetrievalMetadata.deserialize": true,
- "google.generativeai.protos.RetrievalMetadata.from_json": true,
- "google.generativeai.protos.RetrievalMetadata.google_search_dynamic_retrieval_score": true,
- "google.generativeai.protos.RetrievalMetadata.mro": true,
- "google.generativeai.protos.RetrievalMetadata.pb": true,
- "google.generativeai.protos.RetrievalMetadata.serialize": true,
- "google.generativeai.protos.RetrievalMetadata.to_dict": true,
- "google.generativeai.protos.RetrievalMetadata.to_json": true,
- "google.generativeai.protos.RetrievalMetadata.wrap": true,
- "google.generativeai.protos.SafetyFeedback": false,
- "google.generativeai.protos.SafetyFeedback.__call__": true,
- "google.generativeai.protos.SafetyFeedback.__eq__": true,
- "google.generativeai.protos.SafetyFeedback.__ge__": true,
- "google.generativeai.protos.SafetyFeedback.__gt__": true,
- "google.generativeai.protos.SafetyFeedback.__init__": true,
- "google.generativeai.protos.SafetyFeedback.__le__": true,
- "google.generativeai.protos.SafetyFeedback.__lt__": true,
- "google.generativeai.protos.SafetyFeedback.__ne__": true,
- "google.generativeai.protos.SafetyFeedback.__new__": true,
- "google.generativeai.protos.SafetyFeedback.__or__": true,
- "google.generativeai.protos.SafetyFeedback.__ror__": true,
- "google.generativeai.protos.SafetyFeedback.copy_from": true,
- "google.generativeai.protos.SafetyFeedback.deserialize": true,
- "google.generativeai.protos.SafetyFeedback.from_json": true,
- "google.generativeai.protos.SafetyFeedback.mro": true,
- "google.generativeai.protos.SafetyFeedback.pb": true,
- "google.generativeai.protos.SafetyFeedback.rating": true,
- "google.generativeai.protos.SafetyFeedback.serialize": true,
- "google.generativeai.protos.SafetyFeedback.setting": true,
- "google.generativeai.protos.SafetyFeedback.to_dict": true,
- "google.generativeai.protos.SafetyFeedback.to_json": true,
- "google.generativeai.protos.SafetyFeedback.wrap": true,
- "google.generativeai.protos.SafetyRating": false,
- "google.generativeai.protos.SafetyRating.HarmProbability": false,
- "google.generativeai.protos.SafetyRating.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.HIGH": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.LOW": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.MEDIUM": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.NEGLIGIBLE": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__add__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__and__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__contains__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__init__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__iter__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__le__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__len__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__new__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__or__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.denominator": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.imag": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.is_integer": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.numerator": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.real": true,
- "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": true,
- "google.generativeai.protos.SafetyRating.__call__": true,
- "google.generativeai.protos.SafetyRating.__eq__": true,
- "google.generativeai.protos.SafetyRating.__ge__": true,
- "google.generativeai.protos.SafetyRating.__gt__": true,
- "google.generativeai.protos.SafetyRating.__init__": true,
- "google.generativeai.protos.SafetyRating.__le__": true,
- "google.generativeai.protos.SafetyRating.__lt__": true,
- "google.generativeai.protos.SafetyRating.__ne__": true,
- "google.generativeai.protos.SafetyRating.__new__": true,
- "google.generativeai.protos.SafetyRating.__or__": true,
- "google.generativeai.protos.SafetyRating.__ror__": true,
- "google.generativeai.protos.SafetyRating.blocked": true,
- "google.generativeai.protos.SafetyRating.category": true,
- "google.generativeai.protos.SafetyRating.copy_from": true,
- "google.generativeai.protos.SafetyRating.deserialize": true,
- "google.generativeai.protos.SafetyRating.from_json": true,
- "google.generativeai.protos.SafetyRating.mro": true,
- "google.generativeai.protos.SafetyRating.pb": true,
- "google.generativeai.protos.SafetyRating.probability": true,
- "google.generativeai.protos.SafetyRating.serialize": true,
- "google.generativeai.protos.SafetyRating.to_dict": true,
- "google.generativeai.protos.SafetyRating.to_json": true,
- "google.generativeai.protos.SafetyRating.wrap": true,
- "google.generativeai.protos.SafetySetting": false,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold": false,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_NONE": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.OFF": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.is_integer": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": true,
- "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": true,
- "google.generativeai.protos.SafetySetting.__call__": true,
- "google.generativeai.protos.SafetySetting.__eq__": true,
- "google.generativeai.protos.SafetySetting.__ge__": true,
- "google.generativeai.protos.SafetySetting.__gt__": true,
- "google.generativeai.protos.SafetySetting.__init__": true,
- "google.generativeai.protos.SafetySetting.__le__": true,
- "google.generativeai.protos.SafetySetting.__lt__": true,
- "google.generativeai.protos.SafetySetting.__ne__": true,
- "google.generativeai.protos.SafetySetting.__new__": true,
- "google.generativeai.protos.SafetySetting.__or__": true,
- "google.generativeai.protos.SafetySetting.__ror__": true,
- "google.generativeai.protos.SafetySetting.category": true,
- "google.generativeai.protos.SafetySetting.copy_from": true,
- "google.generativeai.protos.SafetySetting.deserialize": true,
- "google.generativeai.protos.SafetySetting.from_json": true,
- "google.generativeai.protos.SafetySetting.mro": true,
- "google.generativeai.protos.SafetySetting.pb": true,
- "google.generativeai.protos.SafetySetting.serialize": true,
- "google.generativeai.protos.SafetySetting.threshold": true,
- "google.generativeai.protos.SafetySetting.to_dict": true,
- "google.generativeai.protos.SafetySetting.to_json": true,
- "google.generativeai.protos.SafetySetting.wrap": true,
- "google.generativeai.protos.Schema": false,
- "google.generativeai.protos.Schema.PropertiesEntry": false,
- "google.generativeai.protos.Schema.PropertiesEntry.__call__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__eq__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__ge__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__gt__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__init__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__le__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__lt__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__ne__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__new__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__or__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.__ror__": true,
- "google.generativeai.protos.Schema.PropertiesEntry.copy_from": true,
- "google.generativeai.protos.Schema.PropertiesEntry.deserialize": true,
- "google.generativeai.protos.Schema.PropertiesEntry.from_json": true,
- "google.generativeai.protos.Schema.PropertiesEntry.key": true,
- "google.generativeai.protos.Schema.PropertiesEntry.mro": true,
- "google.generativeai.protos.Schema.PropertiesEntry.pb": true,
- "google.generativeai.protos.Schema.PropertiesEntry.serialize": true,
- "google.generativeai.protos.Schema.PropertiesEntry.to_dict": true,
- "google.generativeai.protos.Schema.PropertiesEntry.to_json": true,
- "google.generativeai.protos.Schema.PropertiesEntry.value": true,
- "google.generativeai.protos.Schema.PropertiesEntry.wrap": true,
- "google.generativeai.protos.Schema.__call__": true,
- "google.generativeai.protos.Schema.__eq__": true,
- "google.generativeai.protos.Schema.__ge__": true,
- "google.generativeai.protos.Schema.__gt__": true,
- "google.generativeai.protos.Schema.__init__": true,
- "google.generativeai.protos.Schema.__le__": true,
- "google.generativeai.protos.Schema.__lt__": true,
- "google.generativeai.protos.Schema.__ne__": true,
- "google.generativeai.protos.Schema.__new__": true,
- "google.generativeai.protos.Schema.__or__": true,
- "google.generativeai.protos.Schema.__ror__": true,
- "google.generativeai.protos.Schema.copy_from": true,
- "google.generativeai.protos.Schema.description": true,
- "google.generativeai.protos.Schema.deserialize": true,
- "google.generativeai.protos.Schema.enum": true,
- "google.generativeai.protos.Schema.format_": true,
- "google.generativeai.protos.Schema.from_json": true,
- "google.generativeai.protos.Schema.items": true,
- "google.generativeai.protos.Schema.max_items": true,
- "google.generativeai.protos.Schema.min_items": true,
- "google.generativeai.protos.Schema.mro": true,
- "google.generativeai.protos.Schema.nullable": true,
- "google.generativeai.protos.Schema.pb": true,
- "google.generativeai.protos.Schema.properties": true,
- "google.generativeai.protos.Schema.required": true,
- "google.generativeai.protos.Schema.serialize": true,
- "google.generativeai.protos.Schema.to_dict": true,
- "google.generativeai.protos.Schema.to_json": true,
- "google.generativeai.protos.Schema.type_": true,
- "google.generativeai.protos.Schema.wrap": true,
- "google.generativeai.protos.SearchEntryPoint": false,
- "google.generativeai.protos.SearchEntryPoint.__call__": true,
- "google.generativeai.protos.SearchEntryPoint.__eq__": true,
- "google.generativeai.protos.SearchEntryPoint.__ge__": true,
- "google.generativeai.protos.SearchEntryPoint.__gt__": true,
- "google.generativeai.protos.SearchEntryPoint.__init__": true,
- "google.generativeai.protos.SearchEntryPoint.__le__": true,
- "google.generativeai.protos.SearchEntryPoint.__lt__": true,
- "google.generativeai.protos.SearchEntryPoint.__ne__": true,
- "google.generativeai.protos.SearchEntryPoint.__new__": true,
- "google.generativeai.protos.SearchEntryPoint.__or__": true,
- "google.generativeai.protos.SearchEntryPoint.__ror__": true,
- "google.generativeai.protos.SearchEntryPoint.copy_from": true,
- "google.generativeai.protos.SearchEntryPoint.deserialize": true,
- "google.generativeai.protos.SearchEntryPoint.from_json": true,
- "google.generativeai.protos.SearchEntryPoint.mro": true,
- "google.generativeai.protos.SearchEntryPoint.pb": true,
- "google.generativeai.protos.SearchEntryPoint.rendered_content": true,
- "google.generativeai.protos.SearchEntryPoint.sdk_blob": true,
- "google.generativeai.protos.SearchEntryPoint.serialize": true,
- "google.generativeai.protos.SearchEntryPoint.to_dict": true,
- "google.generativeai.protos.SearchEntryPoint.to_json": true,
- "google.generativeai.protos.SearchEntryPoint.wrap": true,
- "google.generativeai.protos.Segment": false,
- "google.generativeai.protos.Segment.__call__": true,
- "google.generativeai.protos.Segment.__eq__": true,
- "google.generativeai.protos.Segment.__ge__": true,
- "google.generativeai.protos.Segment.__gt__": true,
- "google.generativeai.protos.Segment.__init__": true,
- "google.generativeai.protos.Segment.__le__": true,
- "google.generativeai.protos.Segment.__lt__": true,
- "google.generativeai.protos.Segment.__ne__": true,
- "google.generativeai.protos.Segment.__new__": true,
- "google.generativeai.protos.Segment.__or__": true,
- "google.generativeai.protos.Segment.__ror__": true,
- "google.generativeai.protos.Segment.copy_from": true,
- "google.generativeai.protos.Segment.deserialize": true,
- "google.generativeai.protos.Segment.end_index": true,
- "google.generativeai.protos.Segment.from_json": true,
- "google.generativeai.protos.Segment.mro": true,
- "google.generativeai.protos.Segment.part_index": true,
- "google.generativeai.protos.Segment.pb": true,
- "google.generativeai.protos.Segment.serialize": true,
- "google.generativeai.protos.Segment.start_index": true,
- "google.generativeai.protos.Segment.text": true,
- "google.generativeai.protos.Segment.to_dict": true,
- "google.generativeai.protos.Segment.to_json": true,
- "google.generativeai.protos.Segment.wrap": true,
- "google.generativeai.protos.SemanticRetrieverConfig": false,
- "google.generativeai.protos.SemanticRetrieverConfig.__call__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__eq__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__ge__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__gt__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__init__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__le__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__lt__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__ne__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__new__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__or__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.__ror__": true,
- "google.generativeai.protos.SemanticRetrieverConfig.copy_from": true,
- "google.generativeai.protos.SemanticRetrieverConfig.deserialize": true,
- "google.generativeai.protos.SemanticRetrieverConfig.from_json": true,
- "google.generativeai.protos.SemanticRetrieverConfig.max_chunks_count": true,
- "google.generativeai.protos.SemanticRetrieverConfig.metadata_filters": true,
- "google.generativeai.protos.SemanticRetrieverConfig.minimum_relevance_score": true,
- "google.generativeai.protos.SemanticRetrieverConfig.mro": true,
- "google.generativeai.protos.SemanticRetrieverConfig.pb": true,
- "google.generativeai.protos.SemanticRetrieverConfig.query": true,
- "google.generativeai.protos.SemanticRetrieverConfig.serialize": true,
- "google.generativeai.protos.SemanticRetrieverConfig.source": true,
- "google.generativeai.protos.SemanticRetrieverConfig.to_dict": true,
- "google.generativeai.protos.SemanticRetrieverConfig.to_json": true,
- "google.generativeai.protos.SemanticRetrieverConfig.wrap": true,
- "google.generativeai.protos.StringList": false,
- "google.generativeai.protos.StringList.__call__": true,
- "google.generativeai.protos.StringList.__eq__": true,
- "google.generativeai.protos.StringList.__ge__": true,
- "google.generativeai.protos.StringList.__gt__": true,
- "google.generativeai.protos.StringList.__init__": true,
- "google.generativeai.protos.StringList.__le__": true,
- "google.generativeai.protos.StringList.__lt__": true,
- "google.generativeai.protos.StringList.__ne__": true,
- "google.generativeai.protos.StringList.__new__": true,
- "google.generativeai.protos.StringList.__or__": true,
- "google.generativeai.protos.StringList.__ror__": true,
- "google.generativeai.protos.StringList.copy_from": true,
- "google.generativeai.protos.StringList.deserialize": true,
- "google.generativeai.protos.StringList.from_json": true,
- "google.generativeai.protos.StringList.mro": true,
- "google.generativeai.protos.StringList.pb": true,
- "google.generativeai.protos.StringList.serialize": true,
- "google.generativeai.protos.StringList.to_dict": true,
- "google.generativeai.protos.StringList.to_json": true,
- "google.generativeai.protos.StringList.values": true,
- "google.generativeai.protos.StringList.wrap": true,
- "google.generativeai.protos.TaskType": false,
- "google.generativeai.protos.TaskType.CLASSIFICATION": true,
- "google.generativeai.protos.TaskType.CLUSTERING": true,
- "google.generativeai.protos.TaskType.FACT_VERIFICATION": true,
- "google.generativeai.protos.TaskType.QUESTION_ANSWERING": true,
- "google.generativeai.protos.TaskType.RETRIEVAL_DOCUMENT": true,
- "google.generativeai.protos.TaskType.RETRIEVAL_QUERY": true,
- "google.generativeai.protos.TaskType.SEMANTIC_SIMILARITY": true,
- "google.generativeai.protos.TaskType.TASK_TYPE_UNSPECIFIED": true,
- "google.generativeai.protos.TaskType.__abs__": true,
- "google.generativeai.protos.TaskType.__add__": true,
- "google.generativeai.protos.TaskType.__and__": true,
- "google.generativeai.protos.TaskType.__bool__": true,
- "google.generativeai.protos.TaskType.__contains__": true,
- "google.generativeai.protos.TaskType.__eq__": true,
- "google.generativeai.protos.TaskType.__floordiv__": true,
- "google.generativeai.protos.TaskType.__ge__": true,
- "google.generativeai.protos.TaskType.__getitem__": true,
- "google.generativeai.protos.TaskType.__gt__": true,
- "google.generativeai.protos.TaskType.__init__": true,
- "google.generativeai.protos.TaskType.__invert__": true,
- "google.generativeai.protos.TaskType.__iter__": true,
- "google.generativeai.protos.TaskType.__le__": true,
- "google.generativeai.protos.TaskType.__len__": true,
- "google.generativeai.protos.TaskType.__lshift__": true,
- "google.generativeai.protos.TaskType.__lt__": true,
- "google.generativeai.protos.TaskType.__mod__": true,
- "google.generativeai.protos.TaskType.__mul__": true,
- "google.generativeai.protos.TaskType.__ne__": true,
- "google.generativeai.protos.TaskType.__neg__": true,
- "google.generativeai.protos.TaskType.__new__": true,
- "google.generativeai.protos.TaskType.__or__": true,
- "google.generativeai.protos.TaskType.__pos__": true,
- "google.generativeai.protos.TaskType.__pow__": true,
- "google.generativeai.protos.TaskType.__radd__": true,
- "google.generativeai.protos.TaskType.__rand__": true,
- "google.generativeai.protos.TaskType.__rfloordiv__": true,
- "google.generativeai.protos.TaskType.__rlshift__": true,
- "google.generativeai.protos.TaskType.__rmod__": true,
- "google.generativeai.protos.TaskType.__rmul__": true,
- "google.generativeai.protos.TaskType.__ror__": true,
- "google.generativeai.protos.TaskType.__rpow__": true,
- "google.generativeai.protos.TaskType.__rrshift__": true,
- "google.generativeai.protos.TaskType.__rshift__": true,
- "google.generativeai.protos.TaskType.__rsub__": true,
- "google.generativeai.protos.TaskType.__rtruediv__": true,
- "google.generativeai.protos.TaskType.__rxor__": true,
- "google.generativeai.protos.TaskType.__sub__": true,
- "google.generativeai.protos.TaskType.__truediv__": true,
- "google.generativeai.protos.TaskType.__xor__": true,
- "google.generativeai.protos.TaskType.as_integer_ratio": true,
- "google.generativeai.protos.TaskType.bit_count": true,
- "google.generativeai.protos.TaskType.bit_length": true,
- "google.generativeai.protos.TaskType.conjugate": true,
- "google.generativeai.protos.TaskType.denominator": true,
- "google.generativeai.protos.TaskType.from_bytes": true,
- "google.generativeai.protos.TaskType.imag": true,
- "google.generativeai.protos.TaskType.is_integer": true,
- "google.generativeai.protos.TaskType.numerator": true,
- "google.generativeai.protos.TaskType.real": true,
- "google.generativeai.protos.TaskType.to_bytes": true,
- "google.generativeai.protos.TextCompletion": false,
- "google.generativeai.protos.TextCompletion.__call__": true,
- "google.generativeai.protos.TextCompletion.__eq__": true,
- "google.generativeai.protos.TextCompletion.__ge__": true,
- "google.generativeai.protos.TextCompletion.__gt__": true,
- "google.generativeai.protos.TextCompletion.__init__": true,
- "google.generativeai.protos.TextCompletion.__le__": true,
- "google.generativeai.protos.TextCompletion.__lt__": true,
- "google.generativeai.protos.TextCompletion.__ne__": true,
- "google.generativeai.protos.TextCompletion.__new__": true,
- "google.generativeai.protos.TextCompletion.__or__": true,
- "google.generativeai.protos.TextCompletion.__ror__": true,
- "google.generativeai.protos.TextCompletion.citation_metadata": true,
- "google.generativeai.protos.TextCompletion.copy_from": true,
- "google.generativeai.protos.TextCompletion.deserialize": true,
- "google.generativeai.protos.TextCompletion.from_json": true,
- "google.generativeai.protos.TextCompletion.mro": true,
- "google.generativeai.protos.TextCompletion.output": true,
- "google.generativeai.protos.TextCompletion.pb": true,
- "google.generativeai.protos.TextCompletion.safety_ratings": true,
- "google.generativeai.protos.TextCompletion.serialize": true,
- "google.generativeai.protos.TextCompletion.to_dict": true,
- "google.generativeai.protos.TextCompletion.to_json": true,
- "google.generativeai.protos.TextCompletion.wrap": true,
- "google.generativeai.protos.TextPrompt": false,
- "google.generativeai.protos.TextPrompt.__call__": true,
- "google.generativeai.protos.TextPrompt.__eq__": true,
- "google.generativeai.protos.TextPrompt.__ge__": true,
- "google.generativeai.protos.TextPrompt.__gt__": true,
- "google.generativeai.protos.TextPrompt.__init__": true,
- "google.generativeai.protos.TextPrompt.__le__": true,
- "google.generativeai.protos.TextPrompt.__lt__": true,
- "google.generativeai.protos.TextPrompt.__ne__": true,
- "google.generativeai.protos.TextPrompt.__new__": true,
- "google.generativeai.protos.TextPrompt.__or__": true,
- "google.generativeai.protos.TextPrompt.__ror__": true,
- "google.generativeai.protos.TextPrompt.copy_from": true,
- "google.generativeai.protos.TextPrompt.deserialize": true,
- "google.generativeai.protos.TextPrompt.from_json": true,
- "google.generativeai.protos.TextPrompt.mro": true,
- "google.generativeai.protos.TextPrompt.pb": true,
- "google.generativeai.protos.TextPrompt.serialize": true,
- "google.generativeai.protos.TextPrompt.text": true,
- "google.generativeai.protos.TextPrompt.to_dict": true,
- "google.generativeai.protos.TextPrompt.to_json": true,
- "google.generativeai.protos.TextPrompt.wrap": true,
- "google.generativeai.protos.Tool": false,
- "google.generativeai.protos.Tool.__call__": true,
- "google.generativeai.protos.Tool.__eq__": true,
- "google.generativeai.protos.Tool.__ge__": true,
- "google.generativeai.protos.Tool.__gt__": true,
- "google.generativeai.protos.Tool.__init__": true,
- "google.generativeai.protos.Tool.__le__": true,
- "google.generativeai.protos.Tool.__lt__": true,
- "google.generativeai.protos.Tool.__ne__": true,
- "google.generativeai.protos.Tool.__new__": true,
- "google.generativeai.protos.Tool.__or__": true,
- "google.generativeai.protos.Tool.__ror__": true,
- "google.generativeai.protos.Tool.code_execution": true,
- "google.generativeai.protos.Tool.copy_from": true,
- "google.generativeai.protos.Tool.deserialize": true,
- "google.generativeai.protos.Tool.from_json": true,
- "google.generativeai.protos.Tool.function_declarations": true,
- "google.generativeai.protos.Tool.google_search_retrieval": true,
- "google.generativeai.protos.Tool.mro": true,
- "google.generativeai.protos.Tool.pb": true,
- "google.generativeai.protos.Tool.serialize": true,
- "google.generativeai.protos.Tool.to_dict": true,
- "google.generativeai.protos.Tool.to_json": true,
- "google.generativeai.protos.Tool.wrap": true,
- "google.generativeai.protos.ToolConfig": false,
- "google.generativeai.protos.ToolConfig.__call__": true,
- "google.generativeai.protos.ToolConfig.__eq__": true,
- "google.generativeai.protos.ToolConfig.__ge__": true,
- "google.generativeai.protos.ToolConfig.__gt__": true,
- "google.generativeai.protos.ToolConfig.__init__": true,
- "google.generativeai.protos.ToolConfig.__le__": true,
- "google.generativeai.protos.ToolConfig.__lt__": true,
- "google.generativeai.protos.ToolConfig.__ne__": true,
- "google.generativeai.protos.ToolConfig.__new__": true,
- "google.generativeai.protos.ToolConfig.__or__": true,
- "google.generativeai.protos.ToolConfig.__ror__": true,
- "google.generativeai.protos.ToolConfig.copy_from": true,
- "google.generativeai.protos.ToolConfig.deserialize": true,
- "google.generativeai.protos.ToolConfig.from_json": true,
- "google.generativeai.protos.ToolConfig.function_calling_config": true,
- "google.generativeai.protos.ToolConfig.mro": true,
- "google.generativeai.protos.ToolConfig.pb": true,
- "google.generativeai.protos.ToolConfig.serialize": true,
- "google.generativeai.protos.ToolConfig.to_dict": true,
- "google.generativeai.protos.ToolConfig.to_json": true,
- "google.generativeai.protos.ToolConfig.wrap": true,
- "google.generativeai.protos.TransferOwnershipRequest": false,
- "google.generativeai.protos.TransferOwnershipRequest.__call__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__eq__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__ge__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__gt__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__init__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__le__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__lt__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__ne__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__new__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__or__": true,
- "google.generativeai.protos.TransferOwnershipRequest.__ror__": true,
- "google.generativeai.protos.TransferOwnershipRequest.copy_from": true,
- "google.generativeai.protos.TransferOwnershipRequest.deserialize": true,
- "google.generativeai.protos.TransferOwnershipRequest.email_address": true,
- "google.generativeai.protos.TransferOwnershipRequest.from_json": true,
- "google.generativeai.protos.TransferOwnershipRequest.mro": true,
- "google.generativeai.protos.TransferOwnershipRequest.name": true,
- "google.generativeai.protos.TransferOwnershipRequest.pb": true,
- "google.generativeai.protos.TransferOwnershipRequest.serialize": true,
- "google.generativeai.protos.TransferOwnershipRequest.to_dict": true,
- "google.generativeai.protos.TransferOwnershipRequest.to_json": true,
- "google.generativeai.protos.TransferOwnershipRequest.wrap": true,
- "google.generativeai.protos.TransferOwnershipResponse": false,
- "google.generativeai.protos.TransferOwnershipResponse.__call__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__eq__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__ge__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__gt__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__init__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__le__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__lt__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__ne__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__new__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__or__": true,
- "google.generativeai.protos.TransferOwnershipResponse.__ror__": true,
- "google.generativeai.protos.TransferOwnershipResponse.copy_from": true,
- "google.generativeai.protos.TransferOwnershipResponse.deserialize": true,
- "google.generativeai.protos.TransferOwnershipResponse.from_json": true,
- "google.generativeai.protos.TransferOwnershipResponse.mro": true,
- "google.generativeai.protos.TransferOwnershipResponse.pb": true,
- "google.generativeai.protos.TransferOwnershipResponse.serialize": true,
- "google.generativeai.protos.TransferOwnershipResponse.to_dict": true,
- "google.generativeai.protos.TransferOwnershipResponse.to_json": true,
- "google.generativeai.protos.TransferOwnershipResponse.wrap": true,
- "google.generativeai.protos.TunedModel": false,
- "google.generativeai.protos.TunedModel.State": false,
- "google.generativeai.protos.TunedModel.State.ACTIVE": true,
- "google.generativeai.protos.TunedModel.State.CREATING": true,
- "google.generativeai.protos.TunedModel.State.FAILED": true,
- "google.generativeai.protos.TunedModel.State.STATE_UNSPECIFIED": true,
- "google.generativeai.protos.TunedModel.State.__abs__": true,
- "google.generativeai.protos.TunedModel.State.__add__": true,
- "google.generativeai.protos.TunedModel.State.__and__": true,
- "google.generativeai.protos.TunedModel.State.__bool__": true,
- "google.generativeai.protos.TunedModel.State.__contains__": true,
- "google.generativeai.protos.TunedModel.State.__eq__": true,
- "google.generativeai.protos.TunedModel.State.__floordiv__": true,
- "google.generativeai.protos.TunedModel.State.__ge__": true,
- "google.generativeai.protos.TunedModel.State.__getitem__": true,
- "google.generativeai.protos.TunedModel.State.__gt__": true,
- "google.generativeai.protos.TunedModel.State.__init__": true,
- "google.generativeai.protos.TunedModel.State.__invert__": true,
- "google.generativeai.protos.TunedModel.State.__iter__": true,
- "google.generativeai.protos.TunedModel.State.__le__": true,
- "google.generativeai.protos.TunedModel.State.__len__": true,
- "google.generativeai.protos.TunedModel.State.__lshift__": true,
- "google.generativeai.protos.TunedModel.State.__lt__": true,
- "google.generativeai.protos.TunedModel.State.__mod__": true,
- "google.generativeai.protos.TunedModel.State.__mul__": true,
- "google.generativeai.protos.TunedModel.State.__ne__": true,
- "google.generativeai.protos.TunedModel.State.__neg__": true,
- "google.generativeai.protos.TunedModel.State.__new__": true,
- "google.generativeai.protos.TunedModel.State.__or__": true,
- "google.generativeai.protos.TunedModel.State.__pos__": true,
- "google.generativeai.protos.TunedModel.State.__pow__": true,
- "google.generativeai.protos.TunedModel.State.__radd__": true,
- "google.generativeai.protos.TunedModel.State.__rand__": true,
- "google.generativeai.protos.TunedModel.State.__rfloordiv__": true,
- "google.generativeai.protos.TunedModel.State.__rlshift__": true,
- "google.generativeai.protos.TunedModel.State.__rmod__": true,
- "google.generativeai.protos.TunedModel.State.__rmul__": true,
- "google.generativeai.protos.TunedModel.State.__ror__": true,
- "google.generativeai.protos.TunedModel.State.__rpow__": true,
- "google.generativeai.protos.TunedModel.State.__rrshift__": true,
- "google.generativeai.protos.TunedModel.State.__rshift__": true,
- "google.generativeai.protos.TunedModel.State.__rsub__": true,
- "google.generativeai.protos.TunedModel.State.__rtruediv__": true,
- "google.generativeai.protos.TunedModel.State.__rxor__": true,
- "google.generativeai.protos.TunedModel.State.__sub__": true,
- "google.generativeai.protos.TunedModel.State.__truediv__": true,
- "google.generativeai.protos.TunedModel.State.__xor__": true,
- "google.generativeai.protos.TunedModel.State.as_integer_ratio": true,
- "google.generativeai.protos.TunedModel.State.bit_count": true,
- "google.generativeai.protos.TunedModel.State.bit_length": true,
- "google.generativeai.protos.TunedModel.State.conjugate": true,
- "google.generativeai.protos.TunedModel.State.denominator": true,
- "google.generativeai.protos.TunedModel.State.from_bytes": true,
- "google.generativeai.protos.TunedModel.State.imag": true,
- "google.generativeai.protos.TunedModel.State.is_integer": true,
- "google.generativeai.protos.TunedModel.State.numerator": true,
- "google.generativeai.protos.TunedModel.State.real": true,
- "google.generativeai.protos.TunedModel.State.to_bytes": true,
- "google.generativeai.protos.TunedModel.__call__": true,
- "google.generativeai.protos.TunedModel.__eq__": true,
- "google.generativeai.protos.TunedModel.__ge__": true,
- "google.generativeai.protos.TunedModel.__gt__": true,
- "google.generativeai.protos.TunedModel.__init__": true,
- "google.generativeai.protos.TunedModel.__le__": true,
- "google.generativeai.protos.TunedModel.__lt__": true,
- "google.generativeai.protos.TunedModel.__ne__": true,
- "google.generativeai.protos.TunedModel.__new__": true,
- "google.generativeai.protos.TunedModel.__or__": true,
- "google.generativeai.protos.TunedModel.__ror__": true,
- "google.generativeai.protos.TunedModel.base_model": true,
- "google.generativeai.protos.TunedModel.copy_from": true,
- "google.generativeai.protos.TunedModel.create_time": true,
- "google.generativeai.protos.TunedModel.description": true,
- "google.generativeai.protos.TunedModel.deserialize": true,
- "google.generativeai.protos.TunedModel.display_name": true,
- "google.generativeai.protos.TunedModel.from_json": true,
- "google.generativeai.protos.TunedModel.mro": true,
- "google.generativeai.protos.TunedModel.name": true,
- "google.generativeai.protos.TunedModel.pb": true,
- "google.generativeai.protos.TunedModel.reader_project_numbers": true,
- "google.generativeai.protos.TunedModel.serialize": true,
- "google.generativeai.protos.TunedModel.state": true,
- "google.generativeai.protos.TunedModel.temperature": true,
- "google.generativeai.protos.TunedModel.to_dict": true,
- "google.generativeai.protos.TunedModel.to_json": true,
- "google.generativeai.protos.TunedModel.top_k": true,
- "google.generativeai.protos.TunedModel.top_p": true,
- "google.generativeai.protos.TunedModel.tuned_model_source": true,
- "google.generativeai.protos.TunedModel.tuning_task": true,
- "google.generativeai.protos.TunedModel.update_time": true,
- "google.generativeai.protos.TunedModel.wrap": true,
- "google.generativeai.protos.TunedModelSource": false,
- "google.generativeai.protos.TunedModelSource.__call__": true,
- "google.generativeai.protos.TunedModelSource.__eq__": true,
- "google.generativeai.protos.TunedModelSource.__ge__": true,
- "google.generativeai.protos.TunedModelSource.__gt__": true,
- "google.generativeai.protos.TunedModelSource.__init__": true,
- "google.generativeai.protos.TunedModelSource.__le__": true,
- "google.generativeai.protos.TunedModelSource.__lt__": true,
- "google.generativeai.protos.TunedModelSource.__ne__": true,
- "google.generativeai.protos.TunedModelSource.__new__": true,
- "google.generativeai.protos.TunedModelSource.__or__": true,
- "google.generativeai.protos.TunedModelSource.__ror__": true,
- "google.generativeai.protos.TunedModelSource.base_model": true,
- "google.generativeai.protos.TunedModelSource.copy_from": true,
- "google.generativeai.protos.TunedModelSource.deserialize": true,
- "google.generativeai.protos.TunedModelSource.from_json": true,
- "google.generativeai.protos.TunedModelSource.mro": true,
- "google.generativeai.protos.TunedModelSource.pb": true,
- "google.generativeai.protos.TunedModelSource.serialize": true,
- "google.generativeai.protos.TunedModelSource.to_dict": true,
- "google.generativeai.protos.TunedModelSource.to_json": true,
- "google.generativeai.protos.TunedModelSource.tuned_model": true,
- "google.generativeai.protos.TunedModelSource.wrap": true,
- "google.generativeai.protos.TuningExample": false,
- "google.generativeai.protos.TuningExample.__call__": true,
- "google.generativeai.protos.TuningExample.__eq__": true,
- "google.generativeai.protos.TuningExample.__ge__": true,
- "google.generativeai.protos.TuningExample.__gt__": true,
- "google.generativeai.protos.TuningExample.__init__": true,
- "google.generativeai.protos.TuningExample.__le__": true,
- "google.generativeai.protos.TuningExample.__lt__": true,
- "google.generativeai.protos.TuningExample.__ne__": true,
- "google.generativeai.protos.TuningExample.__new__": true,
- "google.generativeai.protos.TuningExample.__or__": true,
- "google.generativeai.protos.TuningExample.__ror__": true,
- "google.generativeai.protos.TuningExample.copy_from": true,
- "google.generativeai.protos.TuningExample.deserialize": true,
- "google.generativeai.protos.TuningExample.from_json": true,
- "google.generativeai.protos.TuningExample.mro": true,
- "google.generativeai.protos.TuningExample.output": true,
- "google.generativeai.protos.TuningExample.pb": true,
- "google.generativeai.protos.TuningExample.serialize": true,
- "google.generativeai.protos.TuningExample.text_input": true,
- "google.generativeai.protos.TuningExample.to_dict": true,
- "google.generativeai.protos.TuningExample.to_json": true,
- "google.generativeai.protos.TuningExample.wrap": true,
- "google.generativeai.protos.TuningExamples": false,
- "google.generativeai.protos.TuningExamples.__call__": true,
- "google.generativeai.protos.TuningExamples.__eq__": true,
- "google.generativeai.protos.TuningExamples.__ge__": true,
- "google.generativeai.protos.TuningExamples.__gt__": true,
- "google.generativeai.protos.TuningExamples.__init__": true,
- "google.generativeai.protos.TuningExamples.__le__": true,
- "google.generativeai.protos.TuningExamples.__lt__": true,
- "google.generativeai.protos.TuningExamples.__ne__": true,
- "google.generativeai.protos.TuningExamples.__new__": true,
- "google.generativeai.protos.TuningExamples.__or__": true,
- "google.generativeai.protos.TuningExamples.__ror__": true,
- "google.generativeai.protos.TuningExamples.copy_from": true,
- "google.generativeai.protos.TuningExamples.deserialize": true,
- "google.generativeai.protos.TuningExamples.examples": true,
- "google.generativeai.protos.TuningExamples.from_json": true,
- "google.generativeai.protos.TuningExamples.mro": true,
- "google.generativeai.protos.TuningExamples.pb": true,
- "google.generativeai.protos.TuningExamples.serialize": true,
- "google.generativeai.protos.TuningExamples.to_dict": true,
- "google.generativeai.protos.TuningExamples.to_json": true,
- "google.generativeai.protos.TuningExamples.wrap": true,
- "google.generativeai.protos.TuningSnapshot": false,
- "google.generativeai.protos.TuningSnapshot.__call__": true,
- "google.generativeai.protos.TuningSnapshot.__eq__": true,
- "google.generativeai.protos.TuningSnapshot.__ge__": true,
- "google.generativeai.protos.TuningSnapshot.__gt__": true,
- "google.generativeai.protos.TuningSnapshot.__init__": true,
- "google.generativeai.protos.TuningSnapshot.__le__": true,
- "google.generativeai.protos.TuningSnapshot.__lt__": true,
- "google.generativeai.protos.TuningSnapshot.__ne__": true,
- "google.generativeai.protos.TuningSnapshot.__new__": true,
- "google.generativeai.protos.TuningSnapshot.__or__": true,
- "google.generativeai.protos.TuningSnapshot.__ror__": true,
- "google.generativeai.protos.TuningSnapshot.compute_time": true,
- "google.generativeai.protos.TuningSnapshot.copy_from": true,
- "google.generativeai.protos.TuningSnapshot.deserialize": true,
- "google.generativeai.protos.TuningSnapshot.epoch": true,
- "google.generativeai.protos.TuningSnapshot.from_json": true,
- "google.generativeai.protos.TuningSnapshot.mean_loss": true,
- "google.generativeai.protos.TuningSnapshot.mro": true,
- "google.generativeai.protos.TuningSnapshot.pb": true,
- "google.generativeai.protos.TuningSnapshot.serialize": true,
- "google.generativeai.protos.TuningSnapshot.step": true,
- "google.generativeai.protos.TuningSnapshot.to_dict": true,
- "google.generativeai.protos.TuningSnapshot.to_json": true,
- "google.generativeai.protos.TuningSnapshot.wrap": true,
- "google.generativeai.protos.TuningTask": false,
- "google.generativeai.protos.TuningTask.__call__": true,
- "google.generativeai.protos.TuningTask.__eq__": true,
- "google.generativeai.protos.TuningTask.__ge__": true,
- "google.generativeai.protos.TuningTask.__gt__": true,
- "google.generativeai.protos.TuningTask.__init__": true,
- "google.generativeai.protos.TuningTask.__le__": true,
- "google.generativeai.protos.TuningTask.__lt__": true,
- "google.generativeai.protos.TuningTask.__ne__": true,
- "google.generativeai.protos.TuningTask.__new__": true,
- "google.generativeai.protos.TuningTask.__or__": true,
- "google.generativeai.protos.TuningTask.__ror__": true,
- "google.generativeai.protos.TuningTask.complete_time": true,
- "google.generativeai.protos.TuningTask.copy_from": true,
- "google.generativeai.protos.TuningTask.deserialize": true,
- "google.generativeai.protos.TuningTask.from_json": true,
- "google.generativeai.protos.TuningTask.hyperparameters": true,
- "google.generativeai.protos.TuningTask.mro": true,
- "google.generativeai.protos.TuningTask.pb": true,
- "google.generativeai.protos.TuningTask.serialize": true,
- "google.generativeai.protos.TuningTask.snapshots": true,
- "google.generativeai.protos.TuningTask.start_time": true,
- "google.generativeai.protos.TuningTask.to_dict": true,
- "google.generativeai.protos.TuningTask.to_json": true,
- "google.generativeai.protos.TuningTask.training_data": true,
- "google.generativeai.protos.TuningTask.wrap": true,
- "google.generativeai.protos.Type": false,
- "google.generativeai.protos.Type.ARRAY": true,
- "google.generativeai.protos.Type.BOOLEAN": true,
- "google.generativeai.protos.Type.INTEGER": true,
- "google.generativeai.protos.Type.NUMBER": true,
- "google.generativeai.protos.Type.OBJECT": true,
- "google.generativeai.protos.Type.STRING": true,
- "google.generativeai.protos.Type.TYPE_UNSPECIFIED": true,
- "google.generativeai.protos.Type.__abs__": true,
- "google.generativeai.protos.Type.__add__": true,
- "google.generativeai.protos.Type.__and__": true,
- "google.generativeai.protos.Type.__bool__": true,
- "google.generativeai.protos.Type.__contains__": true,
- "google.generativeai.protos.Type.__eq__": true,
- "google.generativeai.protos.Type.__floordiv__": true,
- "google.generativeai.protos.Type.__ge__": true,
- "google.generativeai.protos.Type.__getitem__": true,
- "google.generativeai.protos.Type.__gt__": true,
- "google.generativeai.protos.Type.__init__": true,
- "google.generativeai.protos.Type.__invert__": true,
- "google.generativeai.protos.Type.__iter__": true,
- "google.generativeai.protos.Type.__le__": true,
- "google.generativeai.protos.Type.__len__": true,
- "google.generativeai.protos.Type.__lshift__": true,
- "google.generativeai.protos.Type.__lt__": true,
- "google.generativeai.protos.Type.__mod__": true,
- "google.generativeai.protos.Type.__mul__": true,
- "google.generativeai.protos.Type.__ne__": true,
- "google.generativeai.protos.Type.__neg__": true,
- "google.generativeai.protos.Type.__new__": true,
- "google.generativeai.protos.Type.__or__": true,
- "google.generativeai.protos.Type.__pos__": true,
- "google.generativeai.protos.Type.__pow__": true,
- "google.generativeai.protos.Type.__radd__": true,
- "google.generativeai.protos.Type.__rand__": true,
- "google.generativeai.protos.Type.__rfloordiv__": true,
- "google.generativeai.protos.Type.__rlshift__": true,
- "google.generativeai.protos.Type.__rmod__": true,
- "google.generativeai.protos.Type.__rmul__": true,
- "google.generativeai.protos.Type.__ror__": true,
- "google.generativeai.protos.Type.__rpow__": true,
- "google.generativeai.protos.Type.__rrshift__": true,
- "google.generativeai.protos.Type.__rshift__": true,
- "google.generativeai.protos.Type.__rsub__": true,
- "google.generativeai.protos.Type.__rtruediv__": true,
- "google.generativeai.protos.Type.__rxor__": true,
- "google.generativeai.protos.Type.__sub__": true,
- "google.generativeai.protos.Type.__truediv__": true,
- "google.generativeai.protos.Type.__xor__": true,
- "google.generativeai.protos.Type.as_integer_ratio": true,
- "google.generativeai.protos.Type.bit_count": true,
- "google.generativeai.protos.Type.bit_length": true,
- "google.generativeai.protos.Type.conjugate": true,
- "google.generativeai.protos.Type.denominator": true,
- "google.generativeai.protos.Type.from_bytes": true,
- "google.generativeai.protos.Type.imag": true,
- "google.generativeai.protos.Type.is_integer": true,
- "google.generativeai.protos.Type.numerator": true,
- "google.generativeai.protos.Type.real": true,
- "google.generativeai.protos.Type.to_bytes": true,
- "google.generativeai.protos.UpdateCachedContentRequest": false,
- "google.generativeai.protos.UpdateCachedContentRequest.__call__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__eq__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__ge__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__gt__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__init__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__le__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__lt__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__ne__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__new__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__or__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.__ror__": true,
- "google.generativeai.protos.UpdateCachedContentRequest.cached_content": true,
- "google.generativeai.protos.UpdateCachedContentRequest.copy_from": true,
- "google.generativeai.protos.UpdateCachedContentRequest.deserialize": true,
- "google.generativeai.protos.UpdateCachedContentRequest.from_json": true,
- "google.generativeai.protos.UpdateCachedContentRequest.mro": true,
- "google.generativeai.protos.UpdateCachedContentRequest.pb": true,
- "google.generativeai.protos.UpdateCachedContentRequest.serialize": true,
- "google.generativeai.protos.UpdateCachedContentRequest.to_dict": true,
- "google.generativeai.protos.UpdateCachedContentRequest.to_json": true,
- "google.generativeai.protos.UpdateCachedContentRequest.update_mask": true,
- "google.generativeai.protos.UpdateCachedContentRequest.wrap": true,
- "google.generativeai.protos.UpdateChunkRequest": false,
- "google.generativeai.protos.UpdateChunkRequest.__call__": true,
- "google.generativeai.protos.UpdateChunkRequest.__eq__": true,
- "google.generativeai.protos.UpdateChunkRequest.__ge__": true,
- "google.generativeai.protos.UpdateChunkRequest.__gt__": true,
- "google.generativeai.protos.UpdateChunkRequest.__init__": true,
- "google.generativeai.protos.UpdateChunkRequest.__le__": true,
- "google.generativeai.protos.UpdateChunkRequest.__lt__": true,
- "google.generativeai.protos.UpdateChunkRequest.__ne__": true,
- "google.generativeai.protos.UpdateChunkRequest.__new__": true,
- "google.generativeai.protos.UpdateChunkRequest.__or__": true,
- "google.generativeai.protos.UpdateChunkRequest.__ror__": true,
- "google.generativeai.protos.UpdateChunkRequest.chunk": true,
- "google.generativeai.protos.UpdateChunkRequest.copy_from": true,
- "google.generativeai.protos.UpdateChunkRequest.deserialize": true,
- "google.generativeai.protos.UpdateChunkRequest.from_json": true,
- "google.generativeai.protos.UpdateChunkRequest.mro": true,
- "google.generativeai.protos.UpdateChunkRequest.pb": true,
- "google.generativeai.protos.UpdateChunkRequest.serialize": true,
- "google.generativeai.protos.UpdateChunkRequest.to_dict": true,
- "google.generativeai.protos.UpdateChunkRequest.to_json": true,
- "google.generativeai.protos.UpdateChunkRequest.update_mask": true,
- "google.generativeai.protos.UpdateChunkRequest.wrap": true,
- "google.generativeai.protos.UpdateCorpusRequest": false,
- "google.generativeai.protos.UpdateCorpusRequest.__call__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__eq__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__ge__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__gt__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__init__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__le__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__lt__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__ne__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__new__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__or__": true,
- "google.generativeai.protos.UpdateCorpusRequest.__ror__": true,
- "google.generativeai.protos.UpdateCorpusRequest.copy_from": true,
- "google.generativeai.protos.UpdateCorpusRequest.corpus": true,
- "google.generativeai.protos.UpdateCorpusRequest.deserialize": true,
- "google.generativeai.protos.UpdateCorpusRequest.from_json": true,
- "google.generativeai.protos.UpdateCorpusRequest.mro": true,
- "google.generativeai.protos.UpdateCorpusRequest.pb": true,
- "google.generativeai.protos.UpdateCorpusRequest.serialize": true,
- "google.generativeai.protos.UpdateCorpusRequest.to_dict": true,
- "google.generativeai.protos.UpdateCorpusRequest.to_json": true,
- "google.generativeai.protos.UpdateCorpusRequest.update_mask": true,
- "google.generativeai.protos.UpdateCorpusRequest.wrap": true,
- "google.generativeai.protos.UpdateDocumentRequest": false,
- "google.generativeai.protos.UpdateDocumentRequest.__call__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__eq__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__ge__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__gt__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__init__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__le__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__lt__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__ne__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__new__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__or__": true,
- "google.generativeai.protos.UpdateDocumentRequest.__ror__": true,
- "google.generativeai.protos.UpdateDocumentRequest.copy_from": true,
- "google.generativeai.protos.UpdateDocumentRequest.deserialize": true,
- "google.generativeai.protos.UpdateDocumentRequest.document": true,
- "google.generativeai.protos.UpdateDocumentRequest.from_json": true,
- "google.generativeai.protos.UpdateDocumentRequest.mro": true,
- "google.generativeai.protos.UpdateDocumentRequest.pb": true,
- "google.generativeai.protos.UpdateDocumentRequest.serialize": true,
- "google.generativeai.protos.UpdateDocumentRequest.to_dict": true,
- "google.generativeai.protos.UpdateDocumentRequest.to_json": true,
- "google.generativeai.protos.UpdateDocumentRequest.update_mask": true,
- "google.generativeai.protos.UpdateDocumentRequest.wrap": true,
- "google.generativeai.protos.UpdatePermissionRequest": false,
- "google.generativeai.protos.UpdatePermissionRequest.__call__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__eq__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__ge__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__gt__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__init__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__le__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__lt__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__ne__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__new__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__or__": true,
- "google.generativeai.protos.UpdatePermissionRequest.__ror__": true,
- "google.generativeai.protos.UpdatePermissionRequest.copy_from": true,
- "google.generativeai.protos.UpdatePermissionRequest.deserialize": true,
- "google.generativeai.protos.UpdatePermissionRequest.from_json": true,
- "google.generativeai.protos.UpdatePermissionRequest.mro": true,
- "google.generativeai.protos.UpdatePermissionRequest.pb": true,
- "google.generativeai.protos.UpdatePermissionRequest.permission": true,
- "google.generativeai.protos.UpdatePermissionRequest.serialize": true,
- "google.generativeai.protos.UpdatePermissionRequest.to_dict": true,
- "google.generativeai.protos.UpdatePermissionRequest.to_json": true,
- "google.generativeai.protos.UpdatePermissionRequest.update_mask": true,
- "google.generativeai.protos.UpdatePermissionRequest.wrap": true,
- "google.generativeai.protos.UpdateTunedModelRequest": false,
- "google.generativeai.protos.UpdateTunedModelRequest.__call__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__eq__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__ge__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__gt__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__init__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__le__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__lt__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__ne__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__new__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__or__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.__ror__": true,
- "google.generativeai.protos.UpdateTunedModelRequest.copy_from": true,
- "google.generativeai.protos.UpdateTunedModelRequest.deserialize": true,
- "google.generativeai.protos.UpdateTunedModelRequest.from_json": true,
- "google.generativeai.protos.UpdateTunedModelRequest.mro": true,
- "google.generativeai.protos.UpdateTunedModelRequest.pb": true,
- "google.generativeai.protos.UpdateTunedModelRequest.serialize": true,
- "google.generativeai.protos.UpdateTunedModelRequest.to_dict": true,
- "google.generativeai.protos.UpdateTunedModelRequest.to_json": true,
- "google.generativeai.protos.UpdateTunedModelRequest.tuned_model": true,
- "google.generativeai.protos.UpdateTunedModelRequest.update_mask": true,
- "google.generativeai.protos.UpdateTunedModelRequest.wrap": true,
- "google.generativeai.protos.VideoMetadata": false,
- "google.generativeai.protos.VideoMetadata.__call__": true,
- "google.generativeai.protos.VideoMetadata.__eq__": true,
- "google.generativeai.protos.VideoMetadata.__ge__": true,
- "google.generativeai.protos.VideoMetadata.__gt__": true,
- "google.generativeai.protos.VideoMetadata.__init__": true,
- "google.generativeai.protos.VideoMetadata.__le__": true,
- "google.generativeai.protos.VideoMetadata.__lt__": true,
- "google.generativeai.protos.VideoMetadata.__ne__": true,
- "google.generativeai.protos.VideoMetadata.__new__": true,
- "google.generativeai.protos.VideoMetadata.__or__": true,
- "google.generativeai.protos.VideoMetadata.__ror__": true,
- "google.generativeai.protos.VideoMetadata.copy_from": true,
- "google.generativeai.protos.VideoMetadata.deserialize": true,
- "google.generativeai.protos.VideoMetadata.from_json": true,
- "google.generativeai.protos.VideoMetadata.mro": true,
- "google.generativeai.protos.VideoMetadata.pb": true,
- "google.generativeai.protos.VideoMetadata.serialize": true,
- "google.generativeai.protos.VideoMetadata.to_dict": true,
- "google.generativeai.protos.VideoMetadata.to_json": true,
- "google.generativeai.protos.VideoMetadata.video_duration": true,
- "google.generativeai.protos.VideoMetadata.wrap": true,
- "google.generativeai.types": false,
- "google.generativeai.types.AnyModelNameOptions": false,
- "google.generativeai.types.AsyncGenerateContentResponse": false,
- "google.generativeai.types.AsyncGenerateContentResponse.__eq__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__ge__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__gt__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__init__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__le__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__lt__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__ne__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.__new__": true,
- "google.generativeai.types.AsyncGenerateContentResponse.candidates": true,
- "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": true,
- "google.generativeai.types.AsyncGenerateContentResponse.from_response": true,
- "google.generativeai.types.AsyncGenerateContentResponse.parts": true,
- "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback": true,
- "google.generativeai.types.AsyncGenerateContentResponse.resolve": true,
- "google.generativeai.types.AsyncGenerateContentResponse.text": true,
- "google.generativeai.types.AsyncGenerateContentResponse.to_dict": true,
- "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata": true,
- "google.generativeai.types.BaseModelNameOptions": false,
- "google.generativeai.types.BlobDict": false,
- "google.generativeai.types.BlobDict.__contains__": true,
- "google.generativeai.types.BlobDict.__eq__": true,
- "google.generativeai.types.BlobDict.__ge__": true,
- "google.generativeai.types.BlobDict.__getitem__": true,
- "google.generativeai.types.BlobDict.__gt__": true,
- "google.generativeai.types.BlobDict.__init__": true,
- "google.generativeai.types.BlobDict.__iter__": true,
- "google.generativeai.types.BlobDict.__le__": true,
- "google.generativeai.types.BlobDict.__len__": true,
- "google.generativeai.types.BlobDict.__lt__": true,
- "google.generativeai.types.BlobDict.__ne__": true,
- "google.generativeai.types.BlobDict.__new__": true,
- "google.generativeai.types.BlobDict.__or__": true,
- "google.generativeai.types.BlobDict.__ror__": true,
- "google.generativeai.types.BlobDict.clear": true,
- "google.generativeai.types.BlobDict.copy": true,
- "google.generativeai.types.BlobDict.fromkeys": true,
- "google.generativeai.types.BlobDict.get": true,
- "google.generativeai.types.BlobDict.items": true,
- "google.generativeai.types.BlobDict.keys": true,
- "google.generativeai.types.BlobDict.pop": true,
- "google.generativeai.types.BlobDict.popitem": true,
- "google.generativeai.types.BlobDict.setdefault": true,
- "google.generativeai.types.BlobDict.update": true,
- "google.generativeai.types.BlobDict.values": true,
- "google.generativeai.types.BlobType": false,
- "google.generativeai.types.BlockedPromptException": false,
- "google.generativeai.types.BlockedPromptException.__eq__": true,
- "google.generativeai.types.BlockedPromptException.__ge__": true,
- "google.generativeai.types.BlockedPromptException.__gt__": true,
- "google.generativeai.types.BlockedPromptException.__init__": true,
- "google.generativeai.types.BlockedPromptException.__le__": true,
- "google.generativeai.types.BlockedPromptException.__lt__": true,
- "google.generativeai.types.BlockedPromptException.__ne__": true,
- "google.generativeai.types.BlockedPromptException.__new__": true,
- "google.generativeai.types.BlockedPromptException.add_note": true,
- "google.generativeai.types.BlockedPromptException.args": true,
- "google.generativeai.types.BlockedPromptException.with_traceback": true,
- "google.generativeai.types.BlockedReason": false,
- "google.generativeai.types.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true,
- "google.generativeai.types.BlockedReason.OTHER": true,
- "google.generativeai.types.BlockedReason.SAFETY": true,
- "google.generativeai.types.BlockedReason.__abs__": true,
- "google.generativeai.types.BlockedReason.__add__": true,
- "google.generativeai.types.BlockedReason.__and__": true,
- "google.generativeai.types.BlockedReason.__bool__": true,
- "google.generativeai.types.BlockedReason.__contains__": true,
- "google.generativeai.types.BlockedReason.__eq__": true,
- "google.generativeai.types.BlockedReason.__floordiv__": true,
- "google.generativeai.types.BlockedReason.__ge__": true,
- "google.generativeai.types.BlockedReason.__getitem__": true,
- "google.generativeai.types.BlockedReason.__gt__": true,
- "google.generativeai.types.BlockedReason.__init__": true,
- "google.generativeai.types.BlockedReason.__invert__": true,
- "google.generativeai.types.BlockedReason.__iter__": true,
- "google.generativeai.types.BlockedReason.__le__": true,
- "google.generativeai.types.BlockedReason.__len__": true,
- "google.generativeai.types.BlockedReason.__lshift__": true,
- "google.generativeai.types.BlockedReason.__lt__": true,
- "google.generativeai.types.BlockedReason.__mod__": true,
- "google.generativeai.types.BlockedReason.__mul__": true,
- "google.generativeai.types.BlockedReason.__ne__": true,
- "google.generativeai.types.BlockedReason.__neg__": true,
- "google.generativeai.types.BlockedReason.__new__": true,
- "google.generativeai.types.BlockedReason.__or__": true,
- "google.generativeai.types.BlockedReason.__pos__": true,
- "google.generativeai.types.BlockedReason.__pow__": true,
- "google.generativeai.types.BlockedReason.__radd__": true,
- "google.generativeai.types.BlockedReason.__rand__": true,
- "google.generativeai.types.BlockedReason.__rfloordiv__": true,
- "google.generativeai.types.BlockedReason.__rlshift__": true,
- "google.generativeai.types.BlockedReason.__rmod__": true,
- "google.generativeai.types.BlockedReason.__rmul__": true,
- "google.generativeai.types.BlockedReason.__ror__": true,
- "google.generativeai.types.BlockedReason.__rpow__": true,
- "google.generativeai.types.BlockedReason.__rrshift__": true,
- "google.generativeai.types.BlockedReason.__rshift__": true,
- "google.generativeai.types.BlockedReason.__rsub__": true,
- "google.generativeai.types.BlockedReason.__rtruediv__": true,
- "google.generativeai.types.BlockedReason.__rxor__": true,
- "google.generativeai.types.BlockedReason.__sub__": true,
- "google.generativeai.types.BlockedReason.__truediv__": true,
- "google.generativeai.types.BlockedReason.__xor__": true,
- "google.generativeai.types.BlockedReason.as_integer_ratio": true,
- "google.generativeai.types.BlockedReason.bit_count": true,
- "google.generativeai.types.BlockedReason.bit_length": true,
- "google.generativeai.types.BlockedReason.conjugate": true,
- "google.generativeai.types.BlockedReason.denominator": true,
- "google.generativeai.types.BlockedReason.from_bytes": true,
- "google.generativeai.types.BlockedReason.imag": true,
- "google.generativeai.types.BlockedReason.is_integer": true,
- "google.generativeai.types.BlockedReason.numerator": true,
- "google.generativeai.types.BlockedReason.real": true,
- "google.generativeai.types.BlockedReason.to_bytes": true,
- "google.generativeai.types.BrokenResponseError": false,
- "google.generativeai.types.BrokenResponseError.__eq__": true,
- "google.generativeai.types.BrokenResponseError.__ge__": true,
- "google.generativeai.types.BrokenResponseError.__gt__": true,
- "google.generativeai.types.BrokenResponseError.__init__": true,
- "google.generativeai.types.BrokenResponseError.__le__": true,
- "google.generativeai.types.BrokenResponseError.__lt__": true,
- "google.generativeai.types.BrokenResponseError.__ne__": true,
- "google.generativeai.types.BrokenResponseError.__new__": true,
- "google.generativeai.types.BrokenResponseError.add_note": true,
- "google.generativeai.types.BrokenResponseError.args": true,
- "google.generativeai.types.BrokenResponseError.with_traceback": true,
- "google.generativeai.types.CallableFunctionDeclaration": false,
- "google.generativeai.types.CallableFunctionDeclaration.__call__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__eq__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__ge__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__gt__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__init__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__le__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__lt__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__ne__": true,
- "google.generativeai.types.CallableFunctionDeclaration.__new__": true,
- "google.generativeai.types.CallableFunctionDeclaration.description": true,
- "google.generativeai.types.CallableFunctionDeclaration.from_function": true,
- "google.generativeai.types.CallableFunctionDeclaration.from_proto": true,
- "google.generativeai.types.CallableFunctionDeclaration.name": true,
- "google.generativeai.types.CallableFunctionDeclaration.parameters": true,
- "google.generativeai.types.CallableFunctionDeclaration.to_proto": true,
- "google.generativeai.types.CitationMetadataDict": false,
- "google.generativeai.types.CitationMetadataDict.__contains__": true,
- "google.generativeai.types.CitationMetadataDict.__eq__": true,
- "google.generativeai.types.CitationMetadataDict.__ge__": true,
- "google.generativeai.types.CitationMetadataDict.__getitem__": true,
- "google.generativeai.types.CitationMetadataDict.__gt__": true,
- "google.generativeai.types.CitationMetadataDict.__init__": true,
- "google.generativeai.types.CitationMetadataDict.__iter__": true,
- "google.generativeai.types.CitationMetadataDict.__le__": true,
- "google.generativeai.types.CitationMetadataDict.__len__": true,
- "google.generativeai.types.CitationMetadataDict.__lt__": true,
- "google.generativeai.types.CitationMetadataDict.__ne__": true,
- "google.generativeai.types.CitationMetadataDict.__new__": true,
- "google.generativeai.types.CitationMetadataDict.__or__": true,
- "google.generativeai.types.CitationMetadataDict.__ror__": true,
- "google.generativeai.types.CitationMetadataDict.clear": true,
- "google.generativeai.types.CitationMetadataDict.copy": true,
- "google.generativeai.types.CitationMetadataDict.fromkeys": true,
- "google.generativeai.types.CitationMetadataDict.get": true,
- "google.generativeai.types.CitationMetadataDict.items": true,
- "google.generativeai.types.CitationMetadataDict.keys": true,
- "google.generativeai.types.CitationMetadataDict.pop": true,
- "google.generativeai.types.CitationMetadataDict.popitem": true,
- "google.generativeai.types.CitationMetadataDict.setdefault": true,
- "google.generativeai.types.CitationMetadataDict.update": true,
- "google.generativeai.types.CitationMetadataDict.values": true,
- "google.generativeai.types.CitationSourceDict": false,
- "google.generativeai.types.CitationSourceDict.__contains__": true,
- "google.generativeai.types.CitationSourceDict.__eq__": true,
- "google.generativeai.types.CitationSourceDict.__ge__": true,
- "google.generativeai.types.CitationSourceDict.__getitem__": true,
- "google.generativeai.types.CitationSourceDict.__gt__": true,
- "google.generativeai.types.CitationSourceDict.__init__": true,
- "google.generativeai.types.CitationSourceDict.__iter__": true,
- "google.generativeai.types.CitationSourceDict.__le__": true,
- "google.generativeai.types.CitationSourceDict.__len__": true,
- "google.generativeai.types.CitationSourceDict.__lt__": true,
- "google.generativeai.types.CitationSourceDict.__ne__": true,
- "google.generativeai.types.CitationSourceDict.__new__": true,
- "google.generativeai.types.CitationSourceDict.__or__": true,
- "google.generativeai.types.CitationSourceDict.__ror__": true,
- "google.generativeai.types.CitationSourceDict.clear": true,
- "google.generativeai.types.CitationSourceDict.copy": true,
- "google.generativeai.types.CitationSourceDict.fromkeys": true,
- "google.generativeai.types.CitationSourceDict.get": true,
- "google.generativeai.types.CitationSourceDict.items": true,
- "google.generativeai.types.CitationSourceDict.keys": true,
- "google.generativeai.types.CitationSourceDict.pop": true,
- "google.generativeai.types.CitationSourceDict.popitem": true,
- "google.generativeai.types.CitationSourceDict.setdefault": true,
- "google.generativeai.types.CitationSourceDict.update": true,
- "google.generativeai.types.CitationSourceDict.values": true,
- "google.generativeai.types.ContentDict": false,
- "google.generativeai.types.ContentDict.__contains__": true,
- "google.generativeai.types.ContentDict.__eq__": true,
- "google.generativeai.types.ContentDict.__ge__": true,
- "google.generativeai.types.ContentDict.__getitem__": true,
- "google.generativeai.types.ContentDict.__gt__": true,
- "google.generativeai.types.ContentDict.__init__": true,
- "google.generativeai.types.ContentDict.__iter__": true,
- "google.generativeai.types.ContentDict.__le__": true,
- "google.generativeai.types.ContentDict.__len__": true,
- "google.generativeai.types.ContentDict.__lt__": true,
- "google.generativeai.types.ContentDict.__ne__": true,
- "google.generativeai.types.ContentDict.__new__": true,
- "google.generativeai.types.ContentDict.__or__": true,
- "google.generativeai.types.ContentDict.__ror__": true,
- "google.generativeai.types.ContentDict.clear": true,
- "google.generativeai.types.ContentDict.copy": true,
- "google.generativeai.types.ContentDict.fromkeys": true,
- "google.generativeai.types.ContentDict.get": true,
- "google.generativeai.types.ContentDict.items": true,
- "google.generativeai.types.ContentDict.keys": true,
- "google.generativeai.types.ContentDict.pop": true,
- "google.generativeai.types.ContentDict.popitem": true,
- "google.generativeai.types.ContentDict.setdefault": true,
- "google.generativeai.types.ContentDict.update": true,
- "google.generativeai.types.ContentDict.values": true,
- "google.generativeai.types.ContentFilterDict": false,
- "google.generativeai.types.ContentFilterDict.__contains__": true,
- "google.generativeai.types.ContentFilterDict.__eq__": true,
- "google.generativeai.types.ContentFilterDict.__ge__": true,
- "google.generativeai.types.ContentFilterDict.__getitem__": true,
- "google.generativeai.types.ContentFilterDict.__gt__": true,
- "google.generativeai.types.ContentFilterDict.__init__": true,
- "google.generativeai.types.ContentFilterDict.__iter__": true,
- "google.generativeai.types.ContentFilterDict.__le__": true,
- "google.generativeai.types.ContentFilterDict.__len__": true,
- "google.generativeai.types.ContentFilterDict.__lt__": true,
- "google.generativeai.types.ContentFilterDict.__ne__": true,
- "google.generativeai.types.ContentFilterDict.__new__": true,
- "google.generativeai.types.ContentFilterDict.__or__": true,
- "google.generativeai.types.ContentFilterDict.__ror__": true,
- "google.generativeai.types.ContentFilterDict.clear": true,
- "google.generativeai.types.ContentFilterDict.copy": true,
- "google.generativeai.types.ContentFilterDict.fromkeys": true,
- "google.generativeai.types.ContentFilterDict.get": true,
- "google.generativeai.types.ContentFilterDict.items": true,
- "google.generativeai.types.ContentFilterDict.keys": true,
- "google.generativeai.types.ContentFilterDict.pop": true,
- "google.generativeai.types.ContentFilterDict.popitem": true,
- "google.generativeai.types.ContentFilterDict.setdefault": true,
- "google.generativeai.types.ContentFilterDict.update": true,
- "google.generativeai.types.ContentFilterDict.values": true,
- "google.generativeai.types.ContentType": false,
- "google.generativeai.types.ContentsType": false,
- "google.generativeai.types.File": false,
- "google.generativeai.types.File.__eq__": true,
- "google.generativeai.types.File.__ge__": true,
- "google.generativeai.types.File.__gt__": true,
- "google.generativeai.types.File.__init__": true,
- "google.generativeai.types.File.__le__": true,
- "google.generativeai.types.File.__lt__": true,
- "google.generativeai.types.File.__ne__": true,
- "google.generativeai.types.File.__new__": true,
- "google.generativeai.types.File.create_time": true,
- "google.generativeai.types.File.delete": true,
- "google.generativeai.types.File.display_name": true,
- "google.generativeai.types.File.error": true,
- "google.generativeai.types.File.expiration_time": true,
- "google.generativeai.types.File.mime_type": true,
- "google.generativeai.types.File.name": true,
- "google.generativeai.types.File.sha256_hash": true,
- "google.generativeai.types.File.size_bytes": true,
- "google.generativeai.types.File.state": true,
- "google.generativeai.types.File.to_dict": true,
- "google.generativeai.types.File.to_proto": true,
- "google.generativeai.types.File.update_time": true,
- "google.generativeai.types.File.uri": true,
- "google.generativeai.types.File.video_metadata": true,
- "google.generativeai.types.FileDataDict": false,
- "google.generativeai.types.FileDataDict.__contains__": true,
- "google.generativeai.types.FileDataDict.__eq__": true,
- "google.generativeai.types.FileDataDict.__ge__": true,
- "google.generativeai.types.FileDataDict.__getitem__": true,
- "google.generativeai.types.FileDataDict.__gt__": true,
- "google.generativeai.types.FileDataDict.__init__": true,
- "google.generativeai.types.FileDataDict.__iter__": true,
- "google.generativeai.types.FileDataDict.__le__": true,
- "google.generativeai.types.FileDataDict.__len__": true,
- "google.generativeai.types.FileDataDict.__lt__": true,
- "google.generativeai.types.FileDataDict.__ne__": true,
- "google.generativeai.types.FileDataDict.__new__": true,
- "google.generativeai.types.FileDataDict.__or__": true,
- "google.generativeai.types.FileDataDict.__ror__": true,
- "google.generativeai.types.FileDataDict.clear": true,
- "google.generativeai.types.FileDataDict.copy": true,
- "google.generativeai.types.FileDataDict.fromkeys": true,
- "google.generativeai.types.FileDataDict.get": true,
- "google.generativeai.types.FileDataDict.items": true,
- "google.generativeai.types.FileDataDict.keys": true,
- "google.generativeai.types.FileDataDict.pop": true,
- "google.generativeai.types.FileDataDict.popitem": true,
- "google.generativeai.types.FileDataDict.setdefault": true,
- "google.generativeai.types.FileDataDict.update": true,
- "google.generativeai.types.FileDataDict.values": true,
- "google.generativeai.types.FileDataType": false,
- "google.generativeai.types.FunctionDeclaration": false,
- "google.generativeai.types.FunctionDeclaration.__eq__": true,
- "google.generativeai.types.FunctionDeclaration.__ge__": true,
- "google.generativeai.types.FunctionDeclaration.__gt__": true,
- "google.generativeai.types.FunctionDeclaration.__init__": true,
- "google.generativeai.types.FunctionDeclaration.__le__": true,
- "google.generativeai.types.FunctionDeclaration.__lt__": true,
- "google.generativeai.types.FunctionDeclaration.__ne__": true,
- "google.generativeai.types.FunctionDeclaration.__new__": true,
- "google.generativeai.types.FunctionDeclaration.description": true,
- "google.generativeai.types.FunctionDeclaration.from_function": true,
- "google.generativeai.types.FunctionDeclaration.from_proto": true,
- "google.generativeai.types.FunctionDeclaration.name": true,
- "google.generativeai.types.FunctionDeclaration.parameters": true,
- "google.generativeai.types.FunctionDeclaration.to_proto": true,
- "google.generativeai.types.FunctionDeclarationType": false,
- "google.generativeai.types.FunctionLibrary": false,
- "google.generativeai.types.FunctionLibrary.__call__": true,
- "google.generativeai.types.FunctionLibrary.__eq__": true,
- "google.generativeai.types.FunctionLibrary.__ge__": true,
- "google.generativeai.types.FunctionLibrary.__getitem__": true,
- "google.generativeai.types.FunctionLibrary.__gt__": true,
- "google.generativeai.types.FunctionLibrary.__init__": true,
- "google.generativeai.types.FunctionLibrary.__le__": true,
- "google.generativeai.types.FunctionLibrary.__lt__": true,
- "google.generativeai.types.FunctionLibrary.__ne__": true,
- "google.generativeai.types.FunctionLibrary.__new__": true,
- "google.generativeai.types.FunctionLibrary.to_proto": true,
- "google.generativeai.types.FunctionLibraryType": false,
- "google.generativeai.types.GenerateContentResponse": false,
- "google.generativeai.types.GenerateContentResponse.__eq__": true,
- "google.generativeai.types.GenerateContentResponse.__ge__": true,
- "google.generativeai.types.GenerateContentResponse.__gt__": true,
- "google.generativeai.types.GenerateContentResponse.__init__": true,
- "google.generativeai.types.GenerateContentResponse.__iter__": true,
- "google.generativeai.types.GenerateContentResponse.__le__": true,
- "google.generativeai.types.GenerateContentResponse.__lt__": true,
- "google.generativeai.types.GenerateContentResponse.__ne__": true,
- "google.generativeai.types.GenerateContentResponse.__new__": true,
- "google.generativeai.types.GenerateContentResponse.candidates": true,
- "google.generativeai.types.GenerateContentResponse.from_iterator": true,
- "google.generativeai.types.GenerateContentResponse.from_response": true,
- "google.generativeai.types.GenerateContentResponse.parts": true,
- "google.generativeai.types.GenerateContentResponse.prompt_feedback": true,
- "google.generativeai.types.GenerateContentResponse.resolve": true,
- "google.generativeai.types.GenerateContentResponse.text": true,
- "google.generativeai.types.GenerateContentResponse.to_dict": true,
- "google.generativeai.types.GenerateContentResponse.usage_metadata": true,
- "google.generativeai.types.GenerationConfig": false,
- "google.generativeai.types.GenerationConfig.__eq__": true,
- "google.generativeai.types.GenerationConfig.__ge__": true,
- "google.generativeai.types.GenerationConfig.__gt__": true,
- "google.generativeai.types.GenerationConfig.__init__": true,
- "google.generativeai.types.GenerationConfig.__le__": true,
- "google.generativeai.types.GenerationConfig.__lt__": true,
- "google.generativeai.types.GenerationConfig.__ne__": true,
- "google.generativeai.types.GenerationConfig.__new__": true,
- "google.generativeai.types.GenerationConfig.candidate_count": true,
- "google.generativeai.types.GenerationConfig.frequency_penalty": true,
- "google.generativeai.types.GenerationConfig.logprobs": true,
- "google.generativeai.types.GenerationConfig.max_output_tokens": true,
- "google.generativeai.types.GenerationConfig.presence_penalty": true,
- "google.generativeai.types.GenerationConfig.response_logprobs": true,
- "google.generativeai.types.GenerationConfig.response_mime_type": true,
- "google.generativeai.types.GenerationConfig.response_schema": true,
- "google.generativeai.types.GenerationConfig.seed": true,
- "google.generativeai.types.GenerationConfig.stop_sequences": true,
- "google.generativeai.types.GenerationConfig.temperature": true,
- "google.generativeai.types.GenerationConfig.top_k": true,
- "google.generativeai.types.GenerationConfig.top_p": true,
- "google.generativeai.types.GenerationConfigDict": false,
- "google.generativeai.types.GenerationConfigDict.__contains__": true,
- "google.generativeai.types.GenerationConfigDict.__eq__": true,
- "google.generativeai.types.GenerationConfigDict.__ge__": true,
- "google.generativeai.types.GenerationConfigDict.__getitem__": true,
- "google.generativeai.types.GenerationConfigDict.__gt__": true,
- "google.generativeai.types.GenerationConfigDict.__init__": true,
- "google.generativeai.types.GenerationConfigDict.__iter__": true,
- "google.generativeai.types.GenerationConfigDict.__le__": true,
- "google.generativeai.types.GenerationConfigDict.__len__": true,
- "google.generativeai.types.GenerationConfigDict.__lt__": true,
- "google.generativeai.types.GenerationConfigDict.__ne__": true,
- "google.generativeai.types.GenerationConfigDict.__new__": true,
- "google.generativeai.types.GenerationConfigDict.__or__": true,
- "google.generativeai.types.GenerationConfigDict.__ror__": true,
- "google.generativeai.types.GenerationConfigDict.clear": true,
- "google.generativeai.types.GenerationConfigDict.copy": true,
- "google.generativeai.types.GenerationConfigDict.fromkeys": true,
- "google.generativeai.types.GenerationConfigDict.get": true,
- "google.generativeai.types.GenerationConfigDict.items": true,
- "google.generativeai.types.GenerationConfigDict.keys": true,
- "google.generativeai.types.GenerationConfigDict.pop": true,
- "google.generativeai.types.GenerationConfigDict.popitem": true,
- "google.generativeai.types.GenerationConfigDict.setdefault": true,
- "google.generativeai.types.GenerationConfigDict.update": true,
- "google.generativeai.types.GenerationConfigDict.values": true,
- "google.generativeai.types.GenerationConfigType": false,
- "google.generativeai.types.HarmBlockThreshold": false,
- "google.generativeai.types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true,
- "google.generativeai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true,
- "google.generativeai.types.HarmBlockThreshold.BLOCK_NONE": true,
- "google.generativeai.types.HarmBlockThreshold.BLOCK_ONLY_HIGH": true,
- "google.generativeai.types.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true,
- "google.generativeai.types.HarmBlockThreshold.OFF": true,
- "google.generativeai.types.HarmBlockThreshold.__abs__": true,
- "google.generativeai.types.HarmBlockThreshold.__add__": true,
- "google.generativeai.types.HarmBlockThreshold.__and__": true,
- "google.generativeai.types.HarmBlockThreshold.__bool__": true,
- "google.generativeai.types.HarmBlockThreshold.__contains__": true,
- "google.generativeai.types.HarmBlockThreshold.__eq__": true,
- "google.generativeai.types.HarmBlockThreshold.__floordiv__": true,
- "google.generativeai.types.HarmBlockThreshold.__ge__": true,
- "google.generativeai.types.HarmBlockThreshold.__getitem__": true,
- "google.generativeai.types.HarmBlockThreshold.__gt__": true,
- "google.generativeai.types.HarmBlockThreshold.__init__": true,
- "google.generativeai.types.HarmBlockThreshold.__invert__": true,
- "google.generativeai.types.HarmBlockThreshold.__iter__": true,
- "google.generativeai.types.HarmBlockThreshold.__le__": true,
- "google.generativeai.types.HarmBlockThreshold.__len__": true,
- "google.generativeai.types.HarmBlockThreshold.__lshift__": true,
- "google.generativeai.types.HarmBlockThreshold.__lt__": true,
- "google.generativeai.types.HarmBlockThreshold.__mod__": true,
- "google.generativeai.types.HarmBlockThreshold.__mul__": true,
- "google.generativeai.types.HarmBlockThreshold.__ne__": true,
- "google.generativeai.types.HarmBlockThreshold.__neg__": true,
- "google.generativeai.types.HarmBlockThreshold.__new__": true,
- "google.generativeai.types.HarmBlockThreshold.__or__": true,
- "google.generativeai.types.HarmBlockThreshold.__pos__": true,
- "google.generativeai.types.HarmBlockThreshold.__pow__": true,
- "google.generativeai.types.HarmBlockThreshold.__radd__": true,
- "google.generativeai.types.HarmBlockThreshold.__rand__": true,
- "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": true,
- "google.generativeai.types.HarmBlockThreshold.__rlshift__": true,
- "google.generativeai.types.HarmBlockThreshold.__rmod__": true,
- "google.generativeai.types.HarmBlockThreshold.__rmul__": true,
- "google.generativeai.types.HarmBlockThreshold.__ror__": true,
- "google.generativeai.types.HarmBlockThreshold.__rpow__": true,
- "google.generativeai.types.HarmBlockThreshold.__rrshift__": true,
- "google.generativeai.types.HarmBlockThreshold.__rshift__": true,
- "google.generativeai.types.HarmBlockThreshold.__rsub__": true,
- "google.generativeai.types.HarmBlockThreshold.__rtruediv__": true,
- "google.generativeai.types.HarmBlockThreshold.__rxor__": true,
- "google.generativeai.types.HarmBlockThreshold.__sub__": true,
- "google.generativeai.types.HarmBlockThreshold.__truediv__": true,
- "google.generativeai.types.HarmBlockThreshold.__xor__": true,
- "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": true,
- "google.generativeai.types.HarmBlockThreshold.bit_count": true,
- "google.generativeai.types.HarmBlockThreshold.bit_length": true,
- "google.generativeai.types.HarmBlockThreshold.conjugate": true,
- "google.generativeai.types.HarmBlockThreshold.denominator": true,
- "google.generativeai.types.HarmBlockThreshold.from_bytes": true,
- "google.generativeai.types.HarmBlockThreshold.imag": true,
- "google.generativeai.types.HarmBlockThreshold.is_integer": true,
- "google.generativeai.types.HarmBlockThreshold.numerator": true,
- "google.generativeai.types.HarmBlockThreshold.real": true,
- "google.generativeai.types.HarmBlockThreshold.to_bytes": true,
- "google.generativeai.types.HarmCategory": false,
- "google.generativeai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true,
- "google.generativeai.types.HarmCategory.HARM_CATEGORY_HARASSMENT": true,
- "google.generativeai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true,
- "google.generativeai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true,
- "google.generativeai.types.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true,
- "google.generativeai.types.HarmCategory.__abs__": true,
- "google.generativeai.types.HarmCategory.__add__": true,
- "google.generativeai.types.HarmCategory.__and__": true,
- "google.generativeai.types.HarmCategory.__bool__": true,
- "google.generativeai.types.HarmCategory.__contains__": true,
- "google.generativeai.types.HarmCategory.__eq__": true,
- "google.generativeai.types.HarmCategory.__floordiv__": true,
- "google.generativeai.types.HarmCategory.__ge__": true,
- "google.generativeai.types.HarmCategory.__getitem__": true,
- "google.generativeai.types.HarmCategory.__gt__": true,
- "google.generativeai.types.HarmCategory.__init__": true,
- "google.generativeai.types.HarmCategory.__invert__": true,
- "google.generativeai.types.HarmCategory.__iter__": true,
- "google.generativeai.types.HarmCategory.__le__": true,
- "google.generativeai.types.HarmCategory.__len__": true,
- "google.generativeai.types.HarmCategory.__lshift__": true,
- "google.generativeai.types.HarmCategory.__lt__": true,
- "google.generativeai.types.HarmCategory.__mod__": true,
- "google.generativeai.types.HarmCategory.__mul__": true,
- "google.generativeai.types.HarmCategory.__ne__": true,
- "google.generativeai.types.HarmCategory.__neg__": true,
- "google.generativeai.types.HarmCategory.__new__": true,
- "google.generativeai.types.HarmCategory.__or__": true,
- "google.generativeai.types.HarmCategory.__pos__": true,
- "google.generativeai.types.HarmCategory.__pow__": true,
- "google.generativeai.types.HarmCategory.__radd__": true,
- "google.generativeai.types.HarmCategory.__rand__": true,
- "google.generativeai.types.HarmCategory.__rfloordiv__": true,
- "google.generativeai.types.HarmCategory.__rlshift__": true,
- "google.generativeai.types.HarmCategory.__rmod__": true,
- "google.generativeai.types.HarmCategory.__rmul__": true,
- "google.generativeai.types.HarmCategory.__ror__": true,
- "google.generativeai.types.HarmCategory.__rpow__": true,
- "google.generativeai.types.HarmCategory.__rrshift__": true,
- "google.generativeai.types.HarmCategory.__rshift__": true,
- "google.generativeai.types.HarmCategory.__rsub__": true,
- "google.generativeai.types.HarmCategory.__rtruediv__": true,
- "google.generativeai.types.HarmCategory.__rxor__": true,
- "google.generativeai.types.HarmCategory.__sub__": true,
- "google.generativeai.types.HarmCategory.__truediv__": true,
- "google.generativeai.types.HarmCategory.__xor__": true,
- "google.generativeai.types.HarmCategory.as_integer_ratio": true,
- "google.generativeai.types.HarmCategory.bit_count": true,
- "google.generativeai.types.HarmCategory.bit_length": true,
- "google.generativeai.types.HarmCategory.conjugate": true,
- "google.generativeai.types.HarmCategory.denominator": true,
- "google.generativeai.types.HarmCategory.from_bytes": true,
- "google.generativeai.types.HarmCategory.imag": true,
- "google.generativeai.types.HarmCategory.is_integer": true,
- "google.generativeai.types.HarmCategory.numerator": true,
- "google.generativeai.types.HarmCategory.real": true,
- "google.generativeai.types.HarmCategory.to_bytes": true,
- "google.generativeai.types.HarmProbability": false,
- "google.generativeai.types.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true,
- "google.generativeai.types.HarmProbability.HIGH": true,
- "google.generativeai.types.HarmProbability.LOW": true,
- "google.generativeai.types.HarmProbability.MEDIUM": true,
- "google.generativeai.types.HarmProbability.NEGLIGIBLE": true,
- "google.generativeai.types.HarmProbability.__abs__": true,
- "google.generativeai.types.HarmProbability.__add__": true,
- "google.generativeai.types.HarmProbability.__and__": true,
- "google.generativeai.types.HarmProbability.__bool__": true,
- "google.generativeai.types.HarmProbability.__contains__": true,
- "google.generativeai.types.HarmProbability.__eq__": true,
- "google.generativeai.types.HarmProbability.__floordiv__": true,
- "google.generativeai.types.HarmProbability.__ge__": true,
- "google.generativeai.types.HarmProbability.__getitem__": true,
- "google.generativeai.types.HarmProbability.__gt__": true,
- "google.generativeai.types.HarmProbability.__init__": true,
- "google.generativeai.types.HarmProbability.__invert__": true,
- "google.generativeai.types.HarmProbability.__iter__": true,
- "google.generativeai.types.HarmProbability.__le__": true,
- "google.generativeai.types.HarmProbability.__len__": true,
- "google.generativeai.types.HarmProbability.__lshift__": true,
- "google.generativeai.types.HarmProbability.__lt__": true,
- "google.generativeai.types.HarmProbability.__mod__": true,
- "google.generativeai.types.HarmProbability.__mul__": true,
- "google.generativeai.types.HarmProbability.__ne__": true,
- "google.generativeai.types.HarmProbability.__neg__": true,
- "google.generativeai.types.HarmProbability.__new__": true,
- "google.generativeai.types.HarmProbability.__or__": true,
- "google.generativeai.types.HarmProbability.__pos__": true,
- "google.generativeai.types.HarmProbability.__pow__": true,
- "google.generativeai.types.HarmProbability.__radd__": true,
- "google.generativeai.types.HarmProbability.__rand__": true,
- "google.generativeai.types.HarmProbability.__rfloordiv__": true,
- "google.generativeai.types.HarmProbability.__rlshift__": true,
- "google.generativeai.types.HarmProbability.__rmod__": true,
- "google.generativeai.types.HarmProbability.__rmul__": true,
- "google.generativeai.types.HarmProbability.__ror__": true,
- "google.generativeai.types.HarmProbability.__rpow__": true,
- "google.generativeai.types.HarmProbability.__rrshift__": true,
- "google.generativeai.types.HarmProbability.__rshift__": true,
- "google.generativeai.types.HarmProbability.__rsub__": true,
- "google.generativeai.types.HarmProbability.__rtruediv__": true,
- "google.generativeai.types.HarmProbability.__rxor__": true,
- "google.generativeai.types.HarmProbability.__sub__": true,
- "google.generativeai.types.HarmProbability.__truediv__": true,
- "google.generativeai.types.HarmProbability.__xor__": true,
- "google.generativeai.types.HarmProbability.as_integer_ratio": true,
- "google.generativeai.types.HarmProbability.bit_count": true,
- "google.generativeai.types.HarmProbability.bit_length": true,
- "google.generativeai.types.HarmProbability.conjugate": true,
- "google.generativeai.types.HarmProbability.denominator": true,
- "google.generativeai.types.HarmProbability.from_bytes": true,
- "google.generativeai.types.HarmProbability.imag": true,
- "google.generativeai.types.HarmProbability.is_integer": true,
- "google.generativeai.types.HarmProbability.numerator": true,
- "google.generativeai.types.HarmProbability.real": true,
- "google.generativeai.types.HarmProbability.to_bytes": true,
- "google.generativeai.types.IncompleteIterationError": false,
- "google.generativeai.types.IncompleteIterationError.__eq__": true,
- "google.generativeai.types.IncompleteIterationError.__ge__": true,
- "google.generativeai.types.IncompleteIterationError.__gt__": true,
- "google.generativeai.types.IncompleteIterationError.__init__": true,
- "google.generativeai.types.IncompleteIterationError.__le__": true,
- "google.generativeai.types.IncompleteIterationError.__lt__": true,
- "google.generativeai.types.IncompleteIterationError.__ne__": true,
- "google.generativeai.types.IncompleteIterationError.__new__": true,
- "google.generativeai.types.IncompleteIterationError.add_note": true,
- "google.generativeai.types.IncompleteIterationError.args": true,
- "google.generativeai.types.IncompleteIterationError.with_traceback": true,
- "google.generativeai.types.Model": false,
- "google.generativeai.types.Model.__eq__": true,
- "google.generativeai.types.Model.__ge__": true,
- "google.generativeai.types.Model.__gt__": true,
- "google.generativeai.types.Model.__init__": true,
- "google.generativeai.types.Model.__le__": true,
- "google.generativeai.types.Model.__lt__": true,
- "google.generativeai.types.Model.__ne__": true,
- "google.generativeai.types.Model.__new__": true,
- "google.generativeai.types.Model.max_temperature": true,
- "google.generativeai.types.Model.temperature": true,
- "google.generativeai.types.Model.top_k": true,
- "google.generativeai.types.Model.top_p": true,
- "google.generativeai.types.ModelNameOptions": false,
- "google.generativeai.types.ModelsIterable": false,
- "google.generativeai.types.PartDict": false,
- "google.generativeai.types.PartDict.__contains__": true,
- "google.generativeai.types.PartDict.__eq__": true,
- "google.generativeai.types.PartDict.__ge__": true,
- "google.generativeai.types.PartDict.__getitem__": true,
- "google.generativeai.types.PartDict.__gt__": true,
- "google.generativeai.types.PartDict.__init__": true,
- "google.generativeai.types.PartDict.__iter__": true,
- "google.generativeai.types.PartDict.__le__": true,
- "google.generativeai.types.PartDict.__len__": true,
- "google.generativeai.types.PartDict.__lt__": true,
- "google.generativeai.types.PartDict.__ne__": true,
- "google.generativeai.types.PartDict.__new__": true,
- "google.generativeai.types.PartDict.__or__": true,
- "google.generativeai.types.PartDict.__ror__": true,
- "google.generativeai.types.PartDict.clear": true,
- "google.generativeai.types.PartDict.copy": true,
- "google.generativeai.types.PartDict.fromkeys": true,
- "google.generativeai.types.PartDict.get": true,
- "google.generativeai.types.PartDict.items": true,
- "google.generativeai.types.PartDict.keys": true,
- "google.generativeai.types.PartDict.pop": true,
- "google.generativeai.types.PartDict.popitem": true,
- "google.generativeai.types.PartDict.setdefault": true,
- "google.generativeai.types.PartDict.update": true,
- "google.generativeai.types.PartDict.values": true,
- "google.generativeai.types.PartType": false,
- "google.generativeai.types.Permission": false,
- "google.generativeai.types.Permission.__eq__": true,
- "google.generativeai.types.Permission.__ge__": true,
- "google.generativeai.types.Permission.__gt__": true,
- "google.generativeai.types.Permission.__init__": true,
- "google.generativeai.types.Permission.__le__": true,
- "google.generativeai.types.Permission.__lt__": true,
- "google.generativeai.types.Permission.__ne__": true,
- "google.generativeai.types.Permission.__new__": true,
- "google.generativeai.types.Permission.delete": true,
- "google.generativeai.types.Permission.delete_async": true,
- "google.generativeai.types.Permission.email_address": true,
- "google.generativeai.types.Permission.get": true,
- "google.generativeai.types.Permission.get_async": true,
- "google.generativeai.types.Permission.to_dict": true,
- "google.generativeai.types.Permission.update": true,
- "google.generativeai.types.Permission.update_async": true,
- "google.generativeai.types.Permissions": false,
- "google.generativeai.types.Permissions.__eq__": true,
- "google.generativeai.types.Permissions.__ge__": true,
- "google.generativeai.types.Permissions.__gt__": true,
- "google.generativeai.types.Permissions.__init__": true,
- "google.generativeai.types.Permissions.__iter__": true,
- "google.generativeai.types.Permissions.__le__": true,
- "google.generativeai.types.Permissions.__lt__": true,
- "google.generativeai.types.Permissions.__ne__": true,
- "google.generativeai.types.Permissions.__new__": true,
- "google.generativeai.types.Permissions.create": true,
- "google.generativeai.types.Permissions.create_async": true,
- "google.generativeai.types.Permissions.get": true,
- "google.generativeai.types.Permissions.get_async": true,
- "google.generativeai.types.Permissions.list": true,
- "google.generativeai.types.Permissions.list_async": true,
- "google.generativeai.types.Permissions.parent": true,
- "google.generativeai.types.Permissions.transfer_ownership": true,
- "google.generativeai.types.Permissions.transfer_ownership_async": true,
- "google.generativeai.types.RequestOptions": false,
- "google.generativeai.types.RequestOptions.__contains__": true,
- "google.generativeai.types.RequestOptions.__eq__": true,
- "google.generativeai.types.RequestOptions.__ge__": true,
- "google.generativeai.types.RequestOptions.__getitem__": true,
- "google.generativeai.types.RequestOptions.__gt__": true,
- "google.generativeai.types.RequestOptions.__init__": true,
- "google.generativeai.types.RequestOptions.__iter__": true,
- "google.generativeai.types.RequestOptions.__le__": true,
- "google.generativeai.types.RequestOptions.__len__": true,
- "google.generativeai.types.RequestOptions.__lt__": true,
- "google.generativeai.types.RequestOptions.__ne__": true,
- "google.generativeai.types.RequestOptions.__new__": true,
- "google.generativeai.types.RequestOptions.get": true,
- "google.generativeai.types.RequestOptions.items": true,
- "google.generativeai.types.RequestOptions.keys": true,
- "google.generativeai.types.RequestOptions.values": true,
- "google.generativeai.types.RequestOptionsType": false,
- "google.generativeai.types.SafetyFeedbackDict": false,
- "google.generativeai.types.SafetyFeedbackDict.__contains__": true,
- "google.generativeai.types.SafetyFeedbackDict.__eq__": true,
- "google.generativeai.types.SafetyFeedbackDict.__ge__": true,
- "google.generativeai.types.SafetyFeedbackDict.__getitem__": true,
- "google.generativeai.types.SafetyFeedbackDict.__gt__": true,
- "google.generativeai.types.SafetyFeedbackDict.__init__": true,
- "google.generativeai.types.SafetyFeedbackDict.__iter__": true,
- "google.generativeai.types.SafetyFeedbackDict.__le__": true,
- "google.generativeai.types.SafetyFeedbackDict.__len__": true,
- "google.generativeai.types.SafetyFeedbackDict.__lt__": true,
- "google.generativeai.types.SafetyFeedbackDict.__ne__": true,
- "google.generativeai.types.SafetyFeedbackDict.__new__": true,
- "google.generativeai.types.SafetyFeedbackDict.__or__": true,
- "google.generativeai.types.SafetyFeedbackDict.__ror__": true,
- "google.generativeai.types.SafetyFeedbackDict.clear": true,
- "google.generativeai.types.SafetyFeedbackDict.copy": true,
- "google.generativeai.types.SafetyFeedbackDict.fromkeys": true,
- "google.generativeai.types.SafetyFeedbackDict.get": true,
- "google.generativeai.types.SafetyFeedbackDict.items": true,
- "google.generativeai.types.SafetyFeedbackDict.keys": true,
- "google.generativeai.types.SafetyFeedbackDict.pop": true,
- "google.generativeai.types.SafetyFeedbackDict.popitem": true,
- "google.generativeai.types.SafetyFeedbackDict.setdefault": true,
- "google.generativeai.types.SafetyFeedbackDict.update": true,
- "google.generativeai.types.SafetyFeedbackDict.values": true,
- "google.generativeai.types.SafetyRatingDict": false,
- "google.generativeai.types.SafetyRatingDict.__contains__": true,
- "google.generativeai.types.SafetyRatingDict.__eq__": true,
- "google.generativeai.types.SafetyRatingDict.__ge__": true,
- "google.generativeai.types.SafetyRatingDict.__getitem__": true,
- "google.generativeai.types.SafetyRatingDict.__gt__": true,
- "google.generativeai.types.SafetyRatingDict.__init__": true,
- "google.generativeai.types.SafetyRatingDict.__iter__": true,
- "google.generativeai.types.SafetyRatingDict.__le__": true,
- "google.generativeai.types.SafetyRatingDict.__len__": true,
- "google.generativeai.types.SafetyRatingDict.__lt__": true,
- "google.generativeai.types.SafetyRatingDict.__ne__": true,
- "google.generativeai.types.SafetyRatingDict.__new__": true,
- "google.generativeai.types.SafetyRatingDict.__or__": true,
- "google.generativeai.types.SafetyRatingDict.__ror__": true,
- "google.generativeai.types.SafetyRatingDict.clear": true,
- "google.generativeai.types.SafetyRatingDict.copy": true,
- "google.generativeai.types.SafetyRatingDict.fromkeys": true,
- "google.generativeai.types.SafetyRatingDict.get": true,
- "google.generativeai.types.SafetyRatingDict.items": true,
- "google.generativeai.types.SafetyRatingDict.keys": true,
- "google.generativeai.types.SafetyRatingDict.pop": true,
- "google.generativeai.types.SafetyRatingDict.popitem": true,
- "google.generativeai.types.SafetyRatingDict.setdefault": true,
- "google.generativeai.types.SafetyRatingDict.update": true,
- "google.generativeai.types.SafetyRatingDict.values": true,
- "google.generativeai.types.SafetySettingDict": false,
- "google.generativeai.types.SafetySettingDict.__contains__": true,
- "google.generativeai.types.SafetySettingDict.__eq__": true,
- "google.generativeai.types.SafetySettingDict.__ge__": true,
- "google.generativeai.types.SafetySettingDict.__getitem__": true,
- "google.generativeai.types.SafetySettingDict.__gt__": true,
- "google.generativeai.types.SafetySettingDict.__init__": true,
- "google.generativeai.types.SafetySettingDict.__iter__": true,
- "google.generativeai.types.SafetySettingDict.__le__": true,
- "google.generativeai.types.SafetySettingDict.__len__": true,
- "google.generativeai.types.SafetySettingDict.__lt__": true,
- "google.generativeai.types.SafetySettingDict.__ne__": true,
- "google.generativeai.types.SafetySettingDict.__new__": true,
- "google.generativeai.types.SafetySettingDict.__or__": true,
- "google.generativeai.types.SafetySettingDict.__ror__": true,
- "google.generativeai.types.SafetySettingDict.clear": true,
- "google.generativeai.types.SafetySettingDict.copy": true,
- "google.generativeai.types.SafetySettingDict.fromkeys": true,
- "google.generativeai.types.SafetySettingDict.get": true,
- "google.generativeai.types.SafetySettingDict.items": true,
- "google.generativeai.types.SafetySettingDict.keys": true,
- "google.generativeai.types.SafetySettingDict.pop": true,
- "google.generativeai.types.SafetySettingDict.popitem": true,
- "google.generativeai.types.SafetySettingDict.setdefault": true,
- "google.generativeai.types.SafetySettingDict.update": true,
- "google.generativeai.types.SafetySettingDict.values": true,
- "google.generativeai.types.Status": false,
- "google.generativeai.types.Status.ByteSize": true,
- "google.generativeai.types.Status.Clear": true,
- "google.generativeai.types.Status.ClearExtension": true,
- "google.generativeai.types.Status.ClearField": true,
- "google.generativeai.types.Status.CopyFrom": true,
- "google.generativeai.types.Status.DESCRIPTOR": true,
- "google.generativeai.types.Status.DiscardUnknownFields": true,
- "google.generativeai.types.Status.Extensions": true,
- "google.generativeai.types.Status.FindInitializationErrors": true,
- "google.generativeai.types.Status.FromString": true,
- "google.generativeai.types.Status.HasExtension": true,
- "google.generativeai.types.Status.HasField": true,
- "google.generativeai.types.Status.IsInitialized": true,
- "google.generativeai.types.Status.ListFields": true,
- "google.generativeai.types.Status.MergeFrom": true,
- "google.generativeai.types.Status.MergeFromString": true,
- "google.generativeai.types.Status.ParseFromString": true,
- "google.generativeai.types.Status.RegisterExtension": true,
- "google.generativeai.types.Status.SerializePartialToString": true,
- "google.generativeai.types.Status.SerializeToString": true,
- "google.generativeai.types.Status.SetInParent": true,
- "google.generativeai.types.Status.UnknownFields": true,
- "google.generativeai.types.Status.WhichOneof": true,
- "google.generativeai.types.Status.__eq__": true,
- "google.generativeai.types.Status.__ge__": true,
- "google.generativeai.types.Status.__gt__": true,
- "google.generativeai.types.Status.__init__": true,
- "google.generativeai.types.Status.__le__": true,
- "google.generativeai.types.Status.__lt__": true,
- "google.generativeai.types.Status.__ne__": true,
- "google.generativeai.types.Status.__new__": true,
- "google.generativeai.types.Status.code": true,
- "google.generativeai.types.Status.details": true,
- "google.generativeai.types.Status.message": true,
- "google.generativeai.types.StopCandidateException": false,
- "google.generativeai.types.StopCandidateException.__eq__": true,
- "google.generativeai.types.StopCandidateException.__ge__": true,
- "google.generativeai.types.StopCandidateException.__gt__": true,
- "google.generativeai.types.StopCandidateException.__init__": true,
- "google.generativeai.types.StopCandidateException.__le__": true,
- "google.generativeai.types.StopCandidateException.__lt__": true,
- "google.generativeai.types.StopCandidateException.__ne__": true,
- "google.generativeai.types.StopCandidateException.__new__": true,
- "google.generativeai.types.StopCandidateException.add_note": true,
- "google.generativeai.types.StopCandidateException.args": true,
- "google.generativeai.types.StopCandidateException.with_traceback": true,
- "google.generativeai.types.StrictContentType": false,
- "google.generativeai.types.Tool": false,
- "google.generativeai.types.Tool.__call__": true,
- "google.generativeai.types.Tool.__eq__": true,
- "google.generativeai.types.Tool.__ge__": true,
- "google.generativeai.types.Tool.__getitem__": true,
- "google.generativeai.types.Tool.__gt__": true,
- "google.generativeai.types.Tool.__init__": true,
- "google.generativeai.types.Tool.__le__": true,
- "google.generativeai.types.Tool.__lt__": true,
- "google.generativeai.types.Tool.__ne__": true,
- "google.generativeai.types.Tool.__new__": true,
- "google.generativeai.types.Tool.code_execution": true,
- "google.generativeai.types.Tool.function_declarations": true,
- "google.generativeai.types.Tool.google_search_retrieval": true,
- "google.generativeai.types.Tool.to_proto": true,
- "google.generativeai.types.ToolDict": false,
- "google.generativeai.types.ToolDict.__contains__": true,
- "google.generativeai.types.ToolDict.__eq__": true,
- "google.generativeai.types.ToolDict.__ge__": true,
- "google.generativeai.types.ToolDict.__getitem__": true,
- "google.generativeai.types.ToolDict.__gt__": true,
- "google.generativeai.types.ToolDict.__init__": true,
- "google.generativeai.types.ToolDict.__iter__": true,
- "google.generativeai.types.ToolDict.__le__": true,
- "google.generativeai.types.ToolDict.__len__": true,
- "google.generativeai.types.ToolDict.__lt__": true,
- "google.generativeai.types.ToolDict.__ne__": true,
- "google.generativeai.types.ToolDict.__new__": true,
- "google.generativeai.types.ToolDict.__or__": true,
- "google.generativeai.types.ToolDict.__ror__": true,
- "google.generativeai.types.ToolDict.clear": true,
- "google.generativeai.types.ToolDict.copy": true,
- "google.generativeai.types.ToolDict.fromkeys": true,
- "google.generativeai.types.ToolDict.get": true,
- "google.generativeai.types.ToolDict.items": true,
- "google.generativeai.types.ToolDict.keys": true,
- "google.generativeai.types.ToolDict.pop": true,
- "google.generativeai.types.ToolDict.popitem": true,
- "google.generativeai.types.ToolDict.setdefault": true,
- "google.generativeai.types.ToolDict.update": true,
- "google.generativeai.types.ToolDict.values": true,
- "google.generativeai.types.ToolsType": false,
- "google.generativeai.types.TunedModel": false,
- "google.generativeai.types.TunedModel.__eq__": true,
- "google.generativeai.types.TunedModel.__ge__": true,
- "google.generativeai.types.TunedModel.__gt__": true,
- "google.generativeai.types.TunedModel.__init__": true,
- "google.generativeai.types.TunedModel.__le__": true,
- "google.generativeai.types.TunedModel.__lt__": true,
- "google.generativeai.types.TunedModel.__ne__": true,
- "google.generativeai.types.TunedModel.__new__": true,
- "google.generativeai.types.TunedModel.base_model": true,
- "google.generativeai.types.TunedModel.create_time": true,
- "google.generativeai.types.TunedModel.description": true,
- "google.generativeai.types.TunedModel.display_name": true,
- "google.generativeai.types.TunedModel.name": true,
- "google.generativeai.types.TunedModel.permissions": true,
- "google.generativeai.types.TunedModel.reader_project_numbers": true,
- "google.generativeai.types.TunedModel.source_model": true,
- "google.generativeai.types.TunedModel.state": true,
- "google.generativeai.types.TunedModel.temperature": true,
- "google.generativeai.types.TunedModel.top_k": true,
- "google.generativeai.types.TunedModel.top_p": true,
- "google.generativeai.types.TunedModel.tuning_task": true,
- "google.generativeai.types.TunedModel.update_time": true,
- "google.generativeai.types.TunedModelNameOptions": false,
- "google.generativeai.types.TunedModelState": false,
- "google.generativeai.types.TunedModelState.ACTIVE": true,
- "google.generativeai.types.TunedModelState.CREATING": true,
- "google.generativeai.types.TunedModelState.FAILED": true,
- "google.generativeai.types.TunedModelState.STATE_UNSPECIFIED": true,
- "google.generativeai.types.TunedModelState.__abs__": true,
- "google.generativeai.types.TunedModelState.__add__": true,
- "google.generativeai.types.TunedModelState.__and__": true,
- "google.generativeai.types.TunedModelState.__bool__": true,
- "google.generativeai.types.TunedModelState.__contains__": true,
- "google.generativeai.types.TunedModelState.__eq__": true,
- "google.generativeai.types.TunedModelState.__floordiv__": true,
- "google.generativeai.types.TunedModelState.__ge__": true,
- "google.generativeai.types.TunedModelState.__getitem__": true,
- "google.generativeai.types.TunedModelState.__gt__": true,
- "google.generativeai.types.TunedModelState.__init__": true,
- "google.generativeai.types.TunedModelState.__invert__": true,
- "google.generativeai.types.TunedModelState.__iter__": true,
- "google.generativeai.types.TunedModelState.__le__": true,
- "google.generativeai.types.TunedModelState.__len__": true,
- "google.generativeai.types.TunedModelState.__lshift__": true,
- "google.generativeai.types.TunedModelState.__lt__": true,
- "google.generativeai.types.TunedModelState.__mod__": true,
- "google.generativeai.types.TunedModelState.__mul__": true,
- "google.generativeai.types.TunedModelState.__ne__": true,
- "google.generativeai.types.TunedModelState.__neg__": true,
- "google.generativeai.types.TunedModelState.__new__": true,
- "google.generativeai.types.TunedModelState.__or__": true,
- "google.generativeai.types.TunedModelState.__pos__": true,
- "google.generativeai.types.TunedModelState.__pow__": true,
- "google.generativeai.types.TunedModelState.__radd__": true,
- "google.generativeai.types.TunedModelState.__rand__": true,
- "google.generativeai.types.TunedModelState.__rfloordiv__": true,
- "google.generativeai.types.TunedModelState.__rlshift__": true,
- "google.generativeai.types.TunedModelState.__rmod__": true,
- "google.generativeai.types.TunedModelState.__rmul__": true,
- "google.generativeai.types.TunedModelState.__ror__": true,
- "google.generativeai.types.TunedModelState.__rpow__": true,
- "google.generativeai.types.TunedModelState.__rrshift__": true,
- "google.generativeai.types.TunedModelState.__rshift__": true,
- "google.generativeai.types.TunedModelState.__rsub__": true,
- "google.generativeai.types.TunedModelState.__rtruediv__": true,
- "google.generativeai.types.TunedModelState.__rxor__": true,
- "google.generativeai.types.TunedModelState.__sub__": true,
- "google.generativeai.types.TunedModelState.__truediv__": true,
- "google.generativeai.types.TunedModelState.__xor__": true,
- "google.generativeai.types.TunedModelState.as_integer_ratio": true,
- "google.generativeai.types.TunedModelState.bit_count": true,
- "google.generativeai.types.TunedModelState.bit_length": true,
- "google.generativeai.types.TunedModelState.conjugate": true,
- "google.generativeai.types.TunedModelState.denominator": true,
- "google.generativeai.types.TunedModelState.from_bytes": true,
- "google.generativeai.types.TunedModelState.imag": true,
- "google.generativeai.types.TunedModelState.is_integer": true,
- "google.generativeai.types.TunedModelState.numerator": true,
- "google.generativeai.types.TunedModelState.real": true,
- "google.generativeai.types.TunedModelState.to_bytes": true,
- "google.generativeai.types.TypedDict": false,
- "google.generativeai.types.annotations": true,
- "google.generativeai.types.get_default_file_client": false,
- "google.generativeai.types.to_file_data": false,
- "google.generativeai.update_tuned_model": false,
- "google.generativeai.upload_file": false
- },
- "link_prefix": null,
- "physical_path": {
- "google.generativeai": "google.generativeai",
- "google.generativeai.ChatSession": "google.generativeai.generative_models.ChatSession",
- "google.generativeai.ChatSession.__init__": "google.generativeai.generative_models.ChatSession.__init__",
- "google.generativeai.ChatSession.rewind": "google.generativeai.generative_models.ChatSession.rewind",
- "google.generativeai.ChatSession.send_message": "google.generativeai.generative_models.ChatSession.send_message",
- "google.generativeai.ChatSession.send_message_async": "google.generativeai.generative_models.ChatSession.send_message_async",
- "google.generativeai.GenerativeModel": "google.generativeai.generative_models.GenerativeModel",
- "google.generativeai.GenerativeModel.__init__": "google.generativeai.generative_models.GenerativeModel.__init__",
- "google.generativeai.GenerativeModel.count_tokens": "google.generativeai.generative_models.GenerativeModel.count_tokens",
- "google.generativeai.GenerativeModel.count_tokens_async": "google.generativeai.generative_models.GenerativeModel.count_tokens_async",
- "google.generativeai.GenerativeModel.from_cached_content": "google.generativeai.generative_models.GenerativeModel.from_cached_content",
- "google.generativeai.GenerativeModel.generate_content": "google.generativeai.generative_models.GenerativeModel.generate_content",
- "google.generativeai.GenerativeModel.generate_content_async": "google.generativeai.generative_models.GenerativeModel.generate_content_async",
- "google.generativeai.GenerativeModel.start_chat": "google.generativeai.generative_models.GenerativeModel.start_chat",
- "google.generativeai.caching": "google.generativeai.caching",
- "google.generativeai.caching.CachedContent": "google.generativeai.caching.CachedContent",
- "google.generativeai.caching.CachedContent.__init__": "google.generativeai.caching.CachedContent.__init__",
- "google.generativeai.caching.CachedContent.create": "google.generativeai.caching.CachedContent.create",
- "google.generativeai.caching.CachedContent.delete": "google.generativeai.caching.CachedContent.delete",
- "google.generativeai.caching.CachedContent.get": "google.generativeai.caching.CachedContent.get",
- "google.generativeai.caching.CachedContent.list": "google.generativeai.caching.CachedContent.list",
- "google.generativeai.caching.CachedContent.update": "google.generativeai.caching.CachedContent.update",
- "google.generativeai.caching.get_default_cache_client": "google.generativeai.client.get_default_cache_client",
- "google.generativeai.configure": "google.generativeai.client.configure",
- "google.generativeai.create_tuned_model": "google.generativeai.models.create_tuned_model",
- "google.generativeai.delete_file": "google.generativeai.files.delete_file",
- "google.generativeai.delete_tuned_model": "google.generativeai.models.delete_tuned_model",
- "google.generativeai.embed_content": "google.generativeai.embedding.embed_content",
- "google.generativeai.embed_content_async": "google.generativeai.embedding.embed_content_async",
- "google.generativeai.get_base_model": "google.generativeai.models.get_base_model",
- "google.generativeai.get_file": "google.generativeai.files.get_file",
- "google.generativeai.get_model": "google.generativeai.models.get_model",
- "google.generativeai.get_operation": "google.generativeai.operations.get_operation",
- "google.generativeai.get_tuned_model": "google.generativeai.models.get_tuned_model",
- "google.generativeai.list_files": "google.generativeai.files.list_files",
- "google.generativeai.list_models": "google.generativeai.models.list_models",
- "google.generativeai.list_operations": "google.generativeai.operations.list_operations",
- "google.generativeai.list_tuned_models": "google.generativeai.models.list_tuned_models",
- "google.generativeai.protos": "google.generativeai.protos",
- "google.generativeai.protos.AttributionSourceId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.GroundingPassageId",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": "proto.message.Message.__eq__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": "proto.message.Message.__init__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": "proto.message.Message.__ne__",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.SemanticRetrieverChunk",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.AttributionSourceId.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.AttributionSourceId.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.AttributionSourceId.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.AttributionSourceId.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.AttributionSourceId.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.AttributionSourceId.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.AttributionSourceId.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.AttributionSourceId.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchCreateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksRequest",
- "google.generativeai.protos.BatchCreateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchCreateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchCreateChunksRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchCreateChunksRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchCreateChunksRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchCreateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchCreateChunksRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchCreateChunksRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchCreateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksResponse",
- "google.generativeai.protos.BatchCreateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchCreateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchCreateChunksResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchCreateChunksResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchCreateChunksResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchCreateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchCreateChunksResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchCreateChunksResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchDeleteChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchDeleteChunksRequest",
- "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchDeleteChunksRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchDeleteChunksRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchDeleteChunksRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchDeleteChunksRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchDeleteChunksRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchEmbedContentsRequest": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsRequest",
- "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchEmbedContentsRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchEmbedContentsRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchEmbedContentsRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchEmbedContentsRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchEmbedContentsRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchEmbedContentsResponse": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsResponse",
- "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchEmbedContentsResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchEmbedContentsResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchEmbedContentsResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchEmbedContentsResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchEmbedContentsResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchEmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextRequest",
- "google.generativeai.protos.BatchEmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchEmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchEmbedTextRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchEmbedTextRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchEmbedTextRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchEmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchEmbedTextRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchEmbedTextRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchEmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextResponse",
- "google.generativeai.protos.BatchEmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchEmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchEmbedTextResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchEmbedTextResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchEmbedTextResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchEmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchEmbedTextResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchEmbedTextResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchUpdateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksRequest",
- "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchUpdateChunksRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchUpdateChunksRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchUpdateChunksRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchUpdateChunksRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchUpdateChunksRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.BatchUpdateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksResponse",
- "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.BatchUpdateChunksResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.BatchUpdateChunksResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.BatchUpdateChunksResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.BatchUpdateChunksResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.BatchUpdateChunksResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Blob": "google.ai.generativelanguage_v1beta.types.content.Blob",
- "google.generativeai.protos.Blob.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Blob.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Blob.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Blob.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Blob.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Blob.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Blob.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Blob.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CachedContent": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent",
- "google.generativeai.protos.CachedContent.UsageMetadata": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent.UsageMetadata",
- "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CachedContent.UsageMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CachedContent.UsageMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CachedContent.UsageMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CachedContent.UsageMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CachedContent.UsageMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CachedContent.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CachedContent.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CachedContent.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CachedContent.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CachedContent.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CachedContent.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CachedContent.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CachedContent.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate",
- "google.generativeai.protos.Candidate.FinishReason": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate.FinishReason",
- "google.generativeai.protos.Candidate.FinishReason.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.Candidate.FinishReason.__eq__": "proto.enums.Enum.__eq__",
- "google.generativeai.protos.Candidate.FinishReason.__ge__": "proto.enums.Enum.__ge__",
- "google.generativeai.protos.Candidate.FinishReason.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.Candidate.FinishReason.__gt__": "proto.enums.Enum.__gt__",
- "google.generativeai.protos.Candidate.FinishReason.__init__": "enum.Enum.__init__",
- "google.generativeai.protos.Candidate.FinishReason.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.Candidate.FinishReason.__le__": "proto.enums.Enum.__le__",
- "google.generativeai.protos.Candidate.FinishReason.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.Candidate.FinishReason.__lt__": "proto.enums.Enum.__lt__",
- "google.generativeai.protos.Candidate.FinishReason.__ne__": "proto.enums.Enum.__ne__",
- "google.generativeai.protos.Candidate.FinishReason.__new__": "enum.Enum.__new__",
- "google.generativeai.protos.Candidate.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Candidate.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Candidate.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Candidate.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Candidate.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Candidate.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Candidate.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Candidate.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Chunk": "google.ai.generativelanguage_v1beta.types.retriever.Chunk",
- "google.generativeai.protos.Chunk.State": "google.ai.generativelanguage_v1beta.types.retriever.Chunk.State",
- "google.generativeai.protos.Chunk.State.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.Chunk.State.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.Chunk.State.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.Chunk.State.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.Chunk.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Chunk.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Chunk.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Chunk.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Chunk.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Chunk.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Chunk.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Chunk.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ChunkData": "google.ai.generativelanguage_v1beta.types.retriever.ChunkData",
- "google.generativeai.protos.ChunkData.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ChunkData.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ChunkData.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ChunkData.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ChunkData.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ChunkData.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ChunkData.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ChunkData.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CitationMetadata": "google.ai.generativelanguage_v1beta.types.citation.CitationMetadata",
- "google.generativeai.protos.CitationMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CitationMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CitationMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CitationMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CitationMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CitationMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CitationMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CitationMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CitationSource": "google.ai.generativelanguage_v1beta.types.citation.CitationSource",
- "google.generativeai.protos.CitationSource.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CitationSource.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CitationSource.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CitationSource.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CitationSource.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CitationSource.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CitationSource.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CitationSource.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CodeExecution": "google.ai.generativelanguage_v1beta.types.content.CodeExecution",
- "google.generativeai.protos.CodeExecution.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CodeExecution.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CodeExecution.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CodeExecution.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CodeExecution.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CodeExecution.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CodeExecution.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CodeExecution.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CodeExecutionResult": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult",
- "google.generativeai.protos.CodeExecutionResult.Outcome": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult.Outcome",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.CodeExecutionResult.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CodeExecutionResult.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CodeExecutionResult.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CodeExecutionResult.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CodeExecutionResult.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CodeExecutionResult.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CodeExecutionResult.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CodeExecutionResult.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Condition": "google.ai.generativelanguage_v1beta.types.retriever.Condition",
- "google.generativeai.protos.Condition.Operator": "google.ai.generativelanguage_v1beta.types.retriever.Condition.Operator",
- "google.generativeai.protos.Condition.Operator.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.Condition.Operator.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.Condition.Operator.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.Condition.Operator.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.Condition.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Condition.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Condition.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Condition.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Condition.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Condition.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Condition.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Condition.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Content": "google.ai.generativelanguage_v1beta.types.content.Content",
- "google.generativeai.protos.Content.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Content.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Content.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Content.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Content.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Content.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Content.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Content.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ContentEmbedding": "google.ai.generativelanguage_v1beta.types.generative_service.ContentEmbedding",
- "google.generativeai.protos.ContentEmbedding.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ContentEmbedding.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ContentEmbedding.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ContentEmbedding.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ContentEmbedding.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ContentEmbedding.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ContentEmbedding.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ContentEmbedding.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ContentFilter": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter",
- "google.generativeai.protos.ContentFilter.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ContentFilter.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ContentFilter.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ContentFilter.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ContentFilter.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ContentFilter.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ContentFilter.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ContentFilter.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Corpus": "google.ai.generativelanguage_v1beta.types.retriever.Corpus",
- "google.generativeai.protos.Corpus.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Corpus.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Corpus.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Corpus.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Corpus.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Corpus.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Corpus.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Corpus.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CountMessageTokensRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensRequest",
- "google.generativeai.protos.CountMessageTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CountMessageTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CountMessageTokensRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CountMessageTokensRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CountMessageTokensRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CountMessageTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CountMessageTokensRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CountMessageTokensRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CountMessageTokensResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensResponse",
- "google.generativeai.protos.CountMessageTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CountMessageTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CountMessageTokensResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CountMessageTokensResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CountMessageTokensResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CountMessageTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CountMessageTokensResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CountMessageTokensResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CountTextTokensRequest": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensRequest",
- "google.generativeai.protos.CountTextTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CountTextTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CountTextTokensRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CountTextTokensRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CountTextTokensRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CountTextTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CountTextTokensRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CountTextTokensRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CountTextTokensResponse": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensResponse",
- "google.generativeai.protos.CountTextTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CountTextTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CountTextTokensResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CountTextTokensResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CountTextTokensResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CountTextTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CountTextTokensResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CountTextTokensResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CountTokensRequest": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensRequest",
- "google.generativeai.protos.CountTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CountTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CountTokensRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CountTokensRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CountTokensRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CountTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CountTokensRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CountTokensRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CountTokensResponse": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensResponse",
- "google.generativeai.protos.CountTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CountTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CountTokensResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CountTokensResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CountTokensResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CountTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CountTokensResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CountTokensResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.CreateCachedContentRequest",
- "google.generativeai.protos.CreateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateCachedContentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateChunkRequest",
- "google.generativeai.protos.CreateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateChunkRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateChunkRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateChunkRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateChunkRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateChunkRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateCorpusRequest",
- "google.generativeai.protos.CreateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateCorpusRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateDocumentRequest",
- "google.generativeai.protos.CreateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateDocumentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileRequest",
- "google.generativeai.protos.CreateFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateFileRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateFileRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateFileRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateFileRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateFileRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateFileResponse": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileResponse",
- "google.generativeai.protos.CreateFileResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateFileResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateFileResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateFileResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateFileResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateFileResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateFileResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateFileResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.CreatePermissionRequest",
- "google.generativeai.protos.CreatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreatePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreatePermissionRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreatePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreatePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreatePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateTunedModelMetadata": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelMetadata",
- "google.generativeai.protos.CreateTunedModelMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateTunedModelMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateTunedModelMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateTunedModelMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateTunedModelMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateTunedModelMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateTunedModelMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateTunedModelMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CreateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelRequest",
- "google.generativeai.protos.CreateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CreateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CreateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CreateTunedModelRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CreateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CreateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CreateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CreateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.CustomMetadata": "google.ai.generativelanguage_v1beta.types.retriever.CustomMetadata",
- "google.generativeai.protos.CustomMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.CustomMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.CustomMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.CustomMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.CustomMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.CustomMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.CustomMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.CustomMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Dataset": "google.ai.generativelanguage_v1beta.types.tuned_model.Dataset",
- "google.generativeai.protos.Dataset.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Dataset.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Dataset.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Dataset.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Dataset.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Dataset.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Dataset.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Dataset.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeleteCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.DeleteCachedContentRequest",
- "google.generativeai.protos.DeleteCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeleteCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeleteCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeleteCachedContentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeleteCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeleteCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeleteCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeleteCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeleteChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteChunkRequest",
- "google.generativeai.protos.DeleteChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeleteChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeleteChunkRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeleteChunkRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeleteChunkRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeleteChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeleteChunkRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeleteChunkRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeleteCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteCorpusRequest",
- "google.generativeai.protos.DeleteCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeleteCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeleteCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeleteCorpusRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeleteCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeleteCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeleteCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeleteCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeleteDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteDocumentRequest",
- "google.generativeai.protos.DeleteDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeleteDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeleteDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeleteDocumentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeleteDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeleteDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeleteDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeleteDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeleteFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.DeleteFileRequest",
- "google.generativeai.protos.DeleteFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeleteFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeleteFileRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeleteFileRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeleteFileRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeleteFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeleteFileRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeleteFileRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeletePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.DeletePermissionRequest",
- "google.generativeai.protos.DeletePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeletePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeletePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeletePermissionRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeletePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeletePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeletePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeletePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DeleteTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.DeleteTunedModelRequest",
- "google.generativeai.protos.DeleteTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DeleteTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DeleteTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DeleteTunedModelRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DeleteTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DeleteTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DeleteTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DeleteTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Document": "google.ai.generativelanguage_v1beta.types.retriever.Document",
- "google.generativeai.protos.Document.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Document.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Document.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Document.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Document.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Document.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Document.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Document.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.DynamicRetrievalConfig": "google.ai.generativelanguage_v1beta.types.content.DynamicRetrievalConfig",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.DynamicRetrievalConfig.Mode",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.DynamicRetrievalConfig.Mode.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.DynamicRetrievalConfig.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.DynamicRetrievalConfig.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.DynamicRetrievalConfig.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.DynamicRetrievalConfig.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.DynamicRetrievalConfig.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.DynamicRetrievalConfig.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.DynamicRetrievalConfig.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.DynamicRetrievalConfig.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.EmbedContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentRequest",
- "google.generativeai.protos.EmbedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.EmbedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.EmbedContentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.EmbedContentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.EmbedContentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.EmbedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.EmbedContentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.EmbedContentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.EmbedContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentResponse",
- "google.generativeai.protos.EmbedContentResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.EmbedContentResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.EmbedContentResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.EmbedContentResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.EmbedContentResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.EmbedContentResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.EmbedContentResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.EmbedContentResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.EmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextRequest",
- "google.generativeai.protos.EmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.EmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.EmbedTextRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.EmbedTextRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.EmbedTextRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.EmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.EmbedTextRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.EmbedTextRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.EmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextResponse",
- "google.generativeai.protos.EmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.EmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.EmbedTextResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.EmbedTextResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.EmbedTextResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.EmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.EmbedTextResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.EmbedTextResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Embedding": "google.ai.generativelanguage_v1beta.types.text_service.Embedding",
- "google.generativeai.protos.Embedding.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Embedding.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Embedding.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Embedding.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Embedding.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Embedding.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Embedding.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Embedding.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Example": "google.ai.generativelanguage_v1beta.types.discuss_service.Example",
- "google.generativeai.protos.Example.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Example.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Example.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Example.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Example.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Example.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Example.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Example.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ExecutableCode": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode",
- "google.generativeai.protos.ExecutableCode.Language": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode.Language",
- "google.generativeai.protos.ExecutableCode.Language.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.ExecutableCode.Language.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.ExecutableCode.Language.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.ExecutableCode.Language.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.ExecutableCode.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ExecutableCode.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ExecutableCode.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ExecutableCode.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ExecutableCode.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ExecutableCode.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ExecutableCode.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ExecutableCode.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.File": "google.ai.generativelanguage_v1beta.types.file.File",
- "google.generativeai.protos.File.State": "google.ai.generativelanguage_v1beta.types.file.File.State",
- "google.generativeai.protos.File.State.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.File.State.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.File.State.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.File.State.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.File.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.File.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.File.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.File.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.File.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.File.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.File.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.File.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.FileData": "google.ai.generativelanguage_v1beta.types.content.FileData",
- "google.generativeai.protos.FileData.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.FileData.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.FileData.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.FileData.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.FileData.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.FileData.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.FileData.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.FileData.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.FunctionCall": "google.ai.generativelanguage_v1beta.types.content.FunctionCall",
- "google.generativeai.protos.FunctionCall.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.FunctionCall.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.FunctionCall.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.FunctionCall.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.FunctionCall.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.FunctionCall.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.FunctionCall.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.FunctionCall.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.FunctionCallingConfig": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig",
- "google.generativeai.protos.FunctionCallingConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig.Mode",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.FunctionCallingConfig.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.FunctionCallingConfig.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.FunctionCallingConfig.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.FunctionCallingConfig.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.FunctionCallingConfig.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.FunctionCallingConfig.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.FunctionCallingConfig.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.FunctionCallingConfig.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.FunctionDeclaration": "google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration",
- "google.generativeai.protos.FunctionDeclaration.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.FunctionDeclaration.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.FunctionDeclaration.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.FunctionDeclaration.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.FunctionDeclaration.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.FunctionDeclaration.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.FunctionDeclaration.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.FunctionDeclaration.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.FunctionResponse": "google.ai.generativelanguage_v1beta.types.content.FunctionResponse",
- "google.generativeai.protos.FunctionResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.FunctionResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.FunctionResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.FunctionResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.FunctionResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.FunctionResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.FunctionResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.FunctionResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateAnswerRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest.AnswerStyle",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.GenerateAnswerRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateAnswerRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateAnswerRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateAnswerRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateAnswerRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateAnswerRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateAnswerRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateAnswerRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateAnswerResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback.BlockReason",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateAnswerResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateAnswerResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateAnswerResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateAnswerResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateAnswerResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateAnswerResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateAnswerResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateAnswerResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentRequest",
- "google.generativeai.protos.GenerateContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateContentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateContentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateContentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateContentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateContentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback.BlockReason",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.UsageMetadata",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateContentResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateContentResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateContentResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateContentResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateContentResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateContentResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateContentResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateContentResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateMessageRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageRequest",
- "google.generativeai.protos.GenerateMessageRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateMessageRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateMessageRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateMessageRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateMessageRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateMessageRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateMessageRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateMessageRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateMessageResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageResponse",
- "google.generativeai.protos.GenerateMessageResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateMessageResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateMessageResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateMessageResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateMessageResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateMessageResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateMessageResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateMessageResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextRequest",
- "google.generativeai.protos.GenerateTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateTextRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateTextRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateTextRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateTextRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateTextRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerateTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextResponse",
- "google.generativeai.protos.GenerateTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerateTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerateTextResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerateTextResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerateTextResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerateTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerateTextResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerateTextResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GenerationConfig": "google.ai.generativelanguage_v1beta.types.generative_service.GenerationConfig",
- "google.generativeai.protos.GenerationConfig.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GenerationConfig.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GenerationConfig.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GenerationConfig.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GenerationConfig.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GenerationConfig.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GenerationConfig.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GenerationConfig.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.GetCachedContentRequest",
- "google.generativeai.protos.GetCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetCachedContentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetChunkRequest",
- "google.generativeai.protos.GetChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetChunkRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetChunkRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetChunkRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetChunkRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetChunkRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetCorpusRequest",
- "google.generativeai.protos.GetCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetCorpusRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetDocumentRequest",
- "google.generativeai.protos.GetDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetDocumentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.GetFileRequest",
- "google.generativeai.protos.GetFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetFileRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetFileRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetFileRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetFileRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetFileRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetModelRequest",
- "google.generativeai.protos.GetModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetModelRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetModelRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetModelRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetModelRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetModelRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetPermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.GetPermissionRequest",
- "google.generativeai.protos.GetPermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetPermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetPermissionRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetPermissionRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetPermissionRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetPermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetPermissionRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetPermissionRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GetTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetTunedModelRequest",
- "google.generativeai.protos.GetTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GetTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GetTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GetTunedModelRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GetTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GetTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GetTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GetTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GoogleSearchRetrieval": "google.ai.generativelanguage_v1beta.types.content.GoogleSearchRetrieval",
- "google.generativeai.protos.GoogleSearchRetrieval.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GoogleSearchRetrieval.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GoogleSearchRetrieval.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GoogleSearchRetrieval.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GoogleSearchRetrieval.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GoogleSearchRetrieval.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GoogleSearchRetrieval.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GoogleSearchRetrieval.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingAttribution": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingAttribution",
- "google.generativeai.protos.GroundingAttribution.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingAttribution.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingAttribution.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingAttribution.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingAttribution.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingAttribution.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingAttribution.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingAttribution.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingChunk": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingChunk",
- "google.generativeai.protos.GroundingChunk.Web": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingChunk.Web",
- "google.generativeai.protos.GroundingChunk.Web.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingChunk.Web.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingChunk.Web.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingChunk.Web.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingChunk.Web.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingChunk.Web.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingChunk.Web.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingChunk.Web.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingChunk.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingChunk.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingChunk.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingChunk.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingChunk.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingChunk.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingChunk.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingChunk.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingMetadata",
- "google.generativeai.protos.GroundingMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingPassage": "google.ai.generativelanguage_v1beta.types.content.GroundingPassage",
- "google.generativeai.protos.GroundingPassage.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingPassage.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingPassage.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingPassage.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingPassage.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingPassage.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingPassage.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingPassage.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingPassages": "google.ai.generativelanguage_v1beta.types.content.GroundingPassages",
- "google.generativeai.protos.GroundingPassages.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingPassages.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingPassages.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingPassages.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingPassages.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingPassages.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingPassages.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingPassages.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.GroundingSupport": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingSupport",
- "google.generativeai.protos.GroundingSupport.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.GroundingSupport.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.GroundingSupport.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.GroundingSupport.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.GroundingSupport.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.GroundingSupport.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.GroundingSupport.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.GroundingSupport.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.HarmCategory": "google.ai.generativelanguage_v1beta.types.safety.HarmCategory",
- "google.generativeai.protos.HarmCategory.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.HarmCategory.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.HarmCategory.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.HarmCategory.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.Hyperparameters": "google.ai.generativelanguage_v1beta.types.tuned_model.Hyperparameters",
- "google.generativeai.protos.Hyperparameters.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Hyperparameters.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Hyperparameters.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Hyperparameters.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Hyperparameters.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Hyperparameters.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Hyperparameters.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Hyperparameters.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListCachedContentsRequest": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsRequest",
- "google.generativeai.protos.ListCachedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListCachedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListCachedContentsRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListCachedContentsRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListCachedContentsRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListCachedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListCachedContentsRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListCachedContentsRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListCachedContentsResponse": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsResponse",
- "google.generativeai.protos.ListCachedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListCachedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListCachedContentsResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListCachedContentsResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListCachedContentsResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListCachedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListCachedContentsResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListCachedContentsResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksRequest",
- "google.generativeai.protos.ListChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListChunksRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListChunksRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListChunksRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListChunksRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListChunksRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksResponse",
- "google.generativeai.protos.ListChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListChunksResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListChunksResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListChunksResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListChunksResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListChunksResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListCorporaRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaRequest",
- "google.generativeai.protos.ListCorporaRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListCorporaRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListCorporaRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListCorporaRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListCorporaRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListCorporaRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListCorporaRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListCorporaRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListCorporaResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaResponse",
- "google.generativeai.protos.ListCorporaResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListCorporaResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListCorporaResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListCorporaResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListCorporaResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListCorporaResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListCorporaResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListCorporaResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListDocumentsRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsRequest",
- "google.generativeai.protos.ListDocumentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListDocumentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListDocumentsRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListDocumentsRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListDocumentsRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListDocumentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListDocumentsRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListDocumentsRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListDocumentsResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsResponse",
- "google.generativeai.protos.ListDocumentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListDocumentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListDocumentsResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListDocumentsResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListDocumentsResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListDocumentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListDocumentsResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListDocumentsResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListFilesRequest": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesRequest",
- "google.generativeai.protos.ListFilesRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListFilesRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListFilesRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListFilesRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListFilesRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListFilesRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListFilesRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListFilesRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListFilesResponse": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesResponse",
- "google.generativeai.protos.ListFilesResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListFilesResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListFilesResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListFilesResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListFilesResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListFilesResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListFilesResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListFilesResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsRequest",
- "google.generativeai.protos.ListModelsRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListModelsRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListModelsRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListModelsRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListModelsRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListModelsRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListModelsRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListModelsRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsResponse",
- "google.generativeai.protos.ListModelsResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListModelsResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListModelsResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListModelsResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListModelsResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListModelsResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListModelsResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListModelsResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListPermissionsRequest": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsRequest",
- "google.generativeai.protos.ListPermissionsRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListPermissionsRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListPermissionsRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListPermissionsRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListPermissionsRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListPermissionsRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListPermissionsRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListPermissionsRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListPermissionsResponse": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsResponse",
- "google.generativeai.protos.ListPermissionsResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListPermissionsResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListPermissionsResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListPermissionsResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListPermissionsResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListPermissionsResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListPermissionsResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListPermissionsResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListTunedModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsRequest",
- "google.generativeai.protos.ListTunedModelsRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListTunedModelsRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListTunedModelsRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListTunedModelsRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListTunedModelsRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListTunedModelsRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListTunedModelsRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListTunedModelsRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ListTunedModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsResponse",
- "google.generativeai.protos.ListTunedModelsResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ListTunedModelsResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ListTunedModelsResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ListTunedModelsResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ListTunedModelsResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ListTunedModelsResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ListTunedModelsResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ListTunedModelsResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.LogprobsResult": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult",
- "google.generativeai.protos.LogprobsResult.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult.Candidate",
- "google.generativeai.protos.LogprobsResult.Candidate.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.LogprobsResult.Candidate.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.LogprobsResult.Candidate.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.LogprobsResult.Candidate.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.LogprobsResult.Candidate.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.LogprobsResult.Candidate.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.LogprobsResult.Candidate.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.LogprobsResult.Candidate.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.LogprobsResult.TopCandidates": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult.TopCandidates",
- "google.generativeai.protos.LogprobsResult.TopCandidates.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.LogprobsResult.TopCandidates.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.LogprobsResult.TopCandidates.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.LogprobsResult.TopCandidates.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.LogprobsResult.TopCandidates.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.LogprobsResult.TopCandidates.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.LogprobsResult.TopCandidates.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.LogprobsResult.TopCandidates.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.LogprobsResult.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.LogprobsResult.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.LogprobsResult.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.LogprobsResult.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.LogprobsResult.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.LogprobsResult.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.LogprobsResult.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.LogprobsResult.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Message": "google.ai.generativelanguage_v1beta.types.discuss_service.Message",
- "google.generativeai.protos.Message.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Message.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Message.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Message.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Message.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Message.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Message.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Message.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.MessagePrompt": "google.ai.generativelanguage_v1beta.types.discuss_service.MessagePrompt",
- "google.generativeai.protos.MessagePrompt.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.MessagePrompt.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.MessagePrompt.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.MessagePrompt.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.MessagePrompt.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.MessagePrompt.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.MessagePrompt.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.MessagePrompt.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.MetadataFilter": "google.ai.generativelanguage_v1beta.types.retriever.MetadataFilter",
- "google.generativeai.protos.MetadataFilter.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.MetadataFilter.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.MetadataFilter.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.MetadataFilter.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.MetadataFilter.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.MetadataFilter.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.MetadataFilter.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.MetadataFilter.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Model": "google.ai.generativelanguage_v1beta.types.model.Model",
- "google.generativeai.protos.Model.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Model.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Model.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Model.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Model.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Model.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Model.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Model.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Part": "google.ai.generativelanguage_v1beta.types.content.Part",
- "google.generativeai.protos.Part.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Part.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Part.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Part.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Part.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Part.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Part.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Part.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Permission": "google.ai.generativelanguage_v1beta.types.permission.Permission",
- "google.generativeai.protos.Permission.GranteeType": "google.ai.generativelanguage_v1beta.types.permission.Permission.GranteeType",
- "google.generativeai.protos.Permission.GranteeType.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.Permission.GranteeType.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.Permission.GranteeType.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.Permission.GranteeType.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.Permission.Role": "google.ai.generativelanguage_v1beta.types.permission.Permission.Role",
- "google.generativeai.protos.Permission.Role.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.Permission.Role.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.Permission.Role.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.Permission.Role.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.Permission.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Permission.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Permission.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Permission.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Permission.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Permission.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Permission.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Permission.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.PredictRequest": "google.ai.generativelanguage_v1beta.types.prediction_service.PredictRequest",
- "google.generativeai.protos.PredictRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.PredictRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.PredictRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.PredictRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.PredictRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.PredictRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.PredictRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.PredictRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.PredictResponse": "google.ai.generativelanguage_v1beta.types.prediction_service.PredictResponse",
- "google.generativeai.protos.PredictResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.PredictResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.PredictResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.PredictResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.PredictResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.PredictResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.PredictResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.PredictResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.QueryCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusRequest",
- "google.generativeai.protos.QueryCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.QueryCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.QueryCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.QueryCorpusRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.QueryCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.QueryCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.QueryCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.QueryCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.QueryCorpusResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusResponse",
- "google.generativeai.protos.QueryCorpusResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.QueryCorpusResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.QueryCorpusResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.QueryCorpusResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.QueryCorpusResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.QueryCorpusResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.QueryCorpusResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.QueryCorpusResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.QueryDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentRequest",
- "google.generativeai.protos.QueryDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.QueryDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.QueryDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.QueryDocumentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.QueryDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.QueryDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.QueryDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.QueryDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.QueryDocumentResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentResponse",
- "google.generativeai.protos.QueryDocumentResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.QueryDocumentResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.QueryDocumentResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.QueryDocumentResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.QueryDocumentResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.QueryDocumentResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.QueryDocumentResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.QueryDocumentResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.RelevantChunk": "google.ai.generativelanguage_v1beta.types.retriever_service.RelevantChunk",
- "google.generativeai.protos.RelevantChunk.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.RelevantChunk.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.RelevantChunk.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.RelevantChunk.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.RelevantChunk.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.RelevantChunk.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.RelevantChunk.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.RelevantChunk.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.RetrievalMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.RetrievalMetadata",
- "google.generativeai.protos.RetrievalMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.RetrievalMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.RetrievalMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.RetrievalMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.RetrievalMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.RetrievalMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.RetrievalMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.RetrievalMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.SafetyFeedback": "google.ai.generativelanguage_v1beta.types.safety.SafetyFeedback",
- "google.generativeai.protos.SafetyFeedback.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.SafetyFeedback.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.SafetyFeedback.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.SafetyFeedback.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.SafetyFeedback.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.SafetyFeedback.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.SafetyFeedback.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.SafetyFeedback.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.SafetyRating": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating",
- "google.generativeai.protos.SafetyRating.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.SafetyRating.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.SafetyRating.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.SafetyRating.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.SafetyRating.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.SafetyRating.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.SafetyRating.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.SafetyRating.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.SafetySetting": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting",
- "google.generativeai.protos.SafetySetting.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.SafetySetting.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.SafetySetting.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.SafetySetting.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.SafetySetting.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.SafetySetting.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.SafetySetting.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.SafetySetting.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Schema": "google.ai.generativelanguage_v1beta.types.content.Schema",
- "google.generativeai.protos.Schema.PropertiesEntry": "google.ai.generativelanguage_v1beta.types.content.Schema.PropertiesEntry",
- "google.generativeai.protos.Schema.PropertiesEntry.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Schema.PropertiesEntry.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Schema.PropertiesEntry.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Schema.PropertiesEntry.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Schema.PropertiesEntry.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Schema.PropertiesEntry.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Schema.PropertiesEntry.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Schema.PropertiesEntry.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Schema.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Schema.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Schema.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Schema.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Schema.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Schema.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Schema.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Schema.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.SearchEntryPoint": "google.ai.generativelanguage_v1beta.types.generative_service.SearchEntryPoint",
- "google.generativeai.protos.SearchEntryPoint.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.SearchEntryPoint.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.SearchEntryPoint.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.SearchEntryPoint.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.SearchEntryPoint.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.SearchEntryPoint.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.SearchEntryPoint.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.SearchEntryPoint.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Segment": "google.ai.generativelanguage_v1beta.types.generative_service.Segment",
- "google.generativeai.protos.Segment.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Segment.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Segment.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Segment.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Segment.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Segment.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Segment.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Segment.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.SemanticRetrieverConfig": "google.ai.generativelanguage_v1beta.types.generative_service.SemanticRetrieverConfig",
- "google.generativeai.protos.SemanticRetrieverConfig.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.SemanticRetrieverConfig.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.SemanticRetrieverConfig.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.SemanticRetrieverConfig.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.SemanticRetrieverConfig.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.SemanticRetrieverConfig.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.SemanticRetrieverConfig.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.SemanticRetrieverConfig.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.StringList": "google.ai.generativelanguage_v1beta.types.retriever.StringList",
- "google.generativeai.protos.StringList.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.StringList.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.StringList.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.StringList.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.StringList.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.StringList.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.StringList.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.StringList.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TaskType": "google.ai.generativelanguage_v1beta.types.generative_service.TaskType",
- "google.generativeai.protos.TaskType.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.TaskType.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.TaskType.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.TaskType.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.TextCompletion": "google.ai.generativelanguage_v1beta.types.text_service.TextCompletion",
- "google.generativeai.protos.TextCompletion.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TextCompletion.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TextCompletion.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TextCompletion.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TextCompletion.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TextCompletion.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TextCompletion.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TextCompletion.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TextPrompt": "google.ai.generativelanguage_v1beta.types.text_service.TextPrompt",
- "google.generativeai.protos.TextPrompt.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TextPrompt.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TextPrompt.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TextPrompt.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TextPrompt.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TextPrompt.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TextPrompt.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TextPrompt.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Tool": "google.ai.generativelanguage_v1beta.types.content.Tool",
- "google.generativeai.protos.Tool.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.Tool.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.Tool.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.Tool.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.Tool.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.Tool.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.Tool.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.Tool.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.ToolConfig": "google.ai.generativelanguage_v1beta.types.content.ToolConfig",
- "google.generativeai.protos.ToolConfig.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.ToolConfig.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.ToolConfig.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.ToolConfig.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.ToolConfig.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.ToolConfig.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.ToolConfig.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.ToolConfig.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TransferOwnershipRequest": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipRequest",
- "google.generativeai.protos.TransferOwnershipRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TransferOwnershipRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TransferOwnershipRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TransferOwnershipRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TransferOwnershipRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TransferOwnershipRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TransferOwnershipRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TransferOwnershipRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TransferOwnershipResponse": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipResponse",
- "google.generativeai.protos.TransferOwnershipResponse.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TransferOwnershipResponse.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TransferOwnershipResponse.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TransferOwnershipResponse.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TransferOwnershipResponse.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TransferOwnershipResponse.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TransferOwnershipResponse.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TransferOwnershipResponse.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TunedModel": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel",
- "google.generativeai.protos.TunedModel.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TunedModel.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TunedModel.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TunedModel.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TunedModel.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TunedModel.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TunedModel.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TunedModel.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TunedModelSource": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModelSource",
- "google.generativeai.protos.TunedModelSource.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TunedModelSource.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TunedModelSource.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TunedModelSource.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TunedModelSource.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TunedModelSource.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TunedModelSource.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TunedModelSource.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TuningExample": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExample",
- "google.generativeai.protos.TuningExample.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TuningExample.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TuningExample.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TuningExample.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TuningExample.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TuningExample.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TuningExample.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TuningExample.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TuningExamples": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExamples",
- "google.generativeai.protos.TuningExamples.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TuningExamples.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TuningExamples.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TuningExamples.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TuningExamples.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TuningExamples.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TuningExamples.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TuningExamples.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TuningSnapshot": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningSnapshot",
- "google.generativeai.protos.TuningSnapshot.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TuningSnapshot.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TuningSnapshot.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TuningSnapshot.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TuningSnapshot.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TuningSnapshot.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TuningSnapshot.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TuningSnapshot.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.TuningTask": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningTask",
- "google.generativeai.protos.TuningTask.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.TuningTask.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.TuningTask.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.TuningTask.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.TuningTask.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.TuningTask.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.TuningTask.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.TuningTask.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.Type": "google.ai.generativelanguage_v1beta.types.content.Type",
- "google.generativeai.protos.Type.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.protos.Type.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.protos.Type.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.protos.Type.__len__": "enum.EnumType.__len__",
- "google.generativeai.protos.UpdateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.UpdateCachedContentRequest",
- "google.generativeai.protos.UpdateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.UpdateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.UpdateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.UpdateCachedContentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.UpdateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.UpdateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.UpdateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.UpdateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.UpdateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateChunkRequest",
- "google.generativeai.protos.UpdateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.UpdateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.UpdateChunkRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.UpdateChunkRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.UpdateChunkRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.UpdateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.UpdateChunkRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.UpdateChunkRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.UpdateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateCorpusRequest",
- "google.generativeai.protos.UpdateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.UpdateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.UpdateCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.UpdateCorpusRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.UpdateCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.UpdateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.UpdateCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.UpdateCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.UpdateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateDocumentRequest",
- "google.generativeai.protos.UpdateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.UpdateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.UpdateDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.UpdateDocumentRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.UpdateDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.UpdateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.UpdateDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.UpdateDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.UpdatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.UpdatePermissionRequest",
- "google.generativeai.protos.UpdatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.UpdatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.UpdatePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.UpdatePermissionRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.UpdatePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.UpdatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.UpdatePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.UpdatePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.UpdateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.UpdateTunedModelRequest",
- "google.generativeai.protos.UpdateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.UpdateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.UpdateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.UpdateTunedModelRequest.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.UpdateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.UpdateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.UpdateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.UpdateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.protos.VideoMetadata": "google.ai.generativelanguage_v1beta.types.file.VideoMetadata",
- "google.generativeai.protos.VideoMetadata.copy_from": "proto.message.MessageMeta.copy_from",
- "google.generativeai.protos.VideoMetadata.deserialize": "proto.message.MessageMeta.deserialize",
- "google.generativeai.protos.VideoMetadata.from_json": "proto.message.MessageMeta.from_json",
- "google.generativeai.protos.VideoMetadata.pb": "proto.message.MessageMeta.pb",
- "google.generativeai.protos.VideoMetadata.serialize": "proto.message.MessageMeta.serialize",
- "google.generativeai.protos.VideoMetadata.to_dict": "proto.message.MessageMeta.to_dict",
- "google.generativeai.protos.VideoMetadata.to_json": "proto.message.MessageMeta.to_json",
- "google.generativeai.protos.VideoMetadata.wrap": "proto.message.MessageMeta.wrap",
- "google.generativeai.types": "google.generativeai.types",
- "google.generativeai.types.AsyncGenerateContentResponse": "google.generativeai.types.generation_types.AsyncGenerateContentResponse",
- "google.generativeai.types.AsyncGenerateContentResponse.__init__": "google.generativeai.types.generation_types.BaseGenerateContentResponse.__init__",
- "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_aiterator",
- "google.generativeai.types.AsyncGenerateContentResponse.from_response": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_response",
- "google.generativeai.types.AsyncGenerateContentResponse.resolve": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.resolve",
- "google.generativeai.types.AsyncGenerateContentResponse.to_dict": "google.generativeai.types.generation_types.BaseGenerateContentResponse.to_dict",
- "google.generativeai.types.BlobDict": "google.generativeai.types.content_types.BlobDict",
- "google.generativeai.types.BlockedPromptException": "google.generativeai.types.generation_types.BlockedPromptException",
- "google.generativeai.types.BlockedReason": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter.BlockedReason",
- "google.generativeai.types.BlockedReason.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.types.BlockedReason.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.types.BlockedReason.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.types.BlockedReason.__len__": "enum.EnumType.__len__",
- "google.generativeai.types.BrokenResponseError": "google.generativeai.types.generation_types.BrokenResponseError",
- "google.generativeai.types.CallableFunctionDeclaration": "google.generativeai.types.content_types.CallableFunctionDeclaration",
- "google.generativeai.types.CallableFunctionDeclaration.__call__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__call__",
- "google.generativeai.types.CallableFunctionDeclaration.__init__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__init__",
- "google.generativeai.types.CallableFunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto",
- "google.generativeai.types.CitationMetadataDict": "google.generativeai.types.citation_types.CitationMetadataDict",
- "google.generativeai.types.CitationSourceDict": "google.generativeai.types.citation_types.CitationSourceDict",
- "google.generativeai.types.ContentDict": "google.generativeai.types.content_types.ContentDict",
- "google.generativeai.types.ContentFilterDict": "google.generativeai.types.safety_types.ContentFilterDict",
- "google.generativeai.types.File": "google.generativeai.types.file_types.File",
- "google.generativeai.types.File.__init__": "google.generativeai.types.file_types.File.__init__",
- "google.generativeai.types.File.delete": "google.generativeai.types.file_types.File.delete",
- "google.generativeai.types.File.to_dict": "google.generativeai.types.file_types.File.to_dict",
- "google.generativeai.types.File.to_proto": "google.generativeai.types.file_types.File.to_proto",
- "google.generativeai.types.FileDataDict": "google.generativeai.types.file_types.FileDataDict",
- "google.generativeai.types.FunctionDeclaration": "google.generativeai.types.content_types.FunctionDeclaration",
- "google.generativeai.types.FunctionDeclaration.__init__": "google.generativeai.types.content_types.FunctionDeclaration.__init__",
- "google.generativeai.types.FunctionDeclaration.from_function": "google.generativeai.types.content_types.FunctionDeclaration.from_function",
- "google.generativeai.types.FunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto",
- "google.generativeai.types.FunctionDeclaration.to_proto": "google.generativeai.types.content_types.FunctionDeclaration.to_proto",
- "google.generativeai.types.FunctionLibrary": "google.generativeai.types.content_types.FunctionLibrary",
- "google.generativeai.types.FunctionLibrary.__call__": "google.generativeai.types.content_types.FunctionLibrary.__call__",
- "google.generativeai.types.FunctionLibrary.__getitem__": "google.generativeai.types.content_types.FunctionLibrary.__getitem__",
- "google.generativeai.types.FunctionLibrary.__init__": "google.generativeai.types.content_types.FunctionLibrary.__init__",
- "google.generativeai.types.FunctionLibrary.to_proto": "google.generativeai.types.content_types.FunctionLibrary.to_proto",
- "google.generativeai.types.GenerateContentResponse": "google.generativeai.types.generation_types.GenerateContentResponse",
- "google.generativeai.types.GenerateContentResponse.__iter__": "google.generativeai.types.generation_types.GenerateContentResponse.__iter__",
- "google.generativeai.types.GenerateContentResponse.from_iterator": "google.generativeai.types.generation_types.GenerateContentResponse.from_iterator",
- "google.generativeai.types.GenerateContentResponse.from_response": "google.generativeai.types.generation_types.GenerateContentResponse.from_response",
- "google.generativeai.types.GenerateContentResponse.resolve": "google.generativeai.types.generation_types.GenerateContentResponse.resolve",
- "google.generativeai.types.GenerationConfig": "google.generativeai.types.generation_types.GenerationConfig",
- "google.generativeai.types.GenerationConfig.__eq__": "google.generativeai.types.generation_types.GenerationConfig.__eq__",
- "google.generativeai.types.GenerationConfig.__init__": "google.generativeai.types.generation_types.GenerationConfig.__init__",
- "google.generativeai.types.GenerationConfigDict": "google.generativeai.types.generation_types.GenerationConfigDict",
- "google.generativeai.types.HarmBlockThreshold": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting.HarmBlockThreshold",
- "google.generativeai.types.HarmBlockThreshold.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.types.HarmBlockThreshold.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.types.HarmBlockThreshold.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.types.HarmBlockThreshold.__len__": "enum.EnumType.__len__",
- "google.generativeai.types.HarmCategory": "google.generativeai.types.safety_types.HarmCategory",
- "google.generativeai.types.HarmCategory.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.types.HarmCategory.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.types.HarmCategory.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.types.HarmCategory.__len__": "enum.EnumType.__len__",
- "google.generativeai.types.HarmProbability": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating.HarmProbability",
- "google.generativeai.types.HarmProbability.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.types.HarmProbability.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.types.HarmProbability.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.types.HarmProbability.__len__": "enum.EnumType.__len__",
- "google.generativeai.types.IncompleteIterationError": "google.generativeai.types.generation_types.IncompleteIterationError",
- "google.generativeai.types.Model": "google.generativeai.types.model_types.Model",
- "google.generativeai.types.Model.__eq__": "google.generativeai.types.model_types.Model.__eq__",
- "google.generativeai.types.Model.__init__": "google.generativeai.types.model_types.Model.__init__",
- "google.generativeai.types.PartDict": "google.generativeai.types.content_types.PartDict",
- "google.generativeai.types.Permission": "google.generativeai.types.permission_types.Permission",
- "google.generativeai.types.Permission.__eq__": "google.generativeai.types.permission_types.Permission.__eq__",
- "google.generativeai.types.Permission.__init__": "google.generativeai.types.permission_types.Permission.__init__",
- "google.generativeai.types.Permission.delete": "google.generativeai.types.permission_types.Permission.delete",
- "google.generativeai.types.Permission.delete_async": "google.generativeai.types.permission_types.Permission.delete_async",
- "google.generativeai.types.Permission.get": "google.generativeai.types.permission_types.Permission.get",
- "google.generativeai.types.Permission.get_async": "google.generativeai.types.permission_types.Permission.get_async",
- "google.generativeai.types.Permission.to_dict": "google.generativeai.types.permission_types.Permission.to_dict",
- "google.generativeai.types.Permission.update": "google.generativeai.types.permission_types.Permission.update",
- "google.generativeai.types.Permission.update_async": "google.generativeai.types.permission_types.Permission.update_async",
- "google.generativeai.types.Permissions": "google.generativeai.types.permission_types.Permissions",
- "google.generativeai.types.Permissions.__init__": "google.generativeai.types.permission_types.Permissions.__init__",
- "google.generativeai.types.Permissions.__iter__": "google.generativeai.types.permission_types.Permissions.__iter__",
- "google.generativeai.types.Permissions.create": "google.generativeai.types.permission_types.Permissions.create",
- "google.generativeai.types.Permissions.create_async": "google.generativeai.types.permission_types.Permissions.create_async",
- "google.generativeai.types.Permissions.get": "google.generativeai.types.permission_types.Permissions.get",
- "google.generativeai.types.Permissions.get_async": "google.generativeai.types.permission_types.Permissions.get_async",
- "google.generativeai.types.Permissions.list": "google.generativeai.types.permission_types.Permissions.list",
- "google.generativeai.types.Permissions.list_async": "google.generativeai.types.permission_types.Permissions.list_async",
- "google.generativeai.types.Permissions.transfer_ownership": "google.generativeai.types.permission_types.Permissions.transfer_ownership",
- "google.generativeai.types.Permissions.transfer_ownership_async": "google.generativeai.types.permission_types.Permissions.transfer_ownership_async",
- "google.generativeai.types.RequestOptions": "google.generativeai.types.helper_types.RequestOptions",
- "google.generativeai.types.RequestOptions.__contains__": "collections.abc.Mapping.__contains__",
- "google.generativeai.types.RequestOptions.__eq__": "google.generativeai.types.helper_types.RequestOptions.__eq__",
- "google.generativeai.types.RequestOptions.__getitem__": "google.generativeai.types.helper_types.RequestOptions.__getitem__",
- "google.generativeai.types.RequestOptions.__init__": "google.generativeai.types.helper_types.RequestOptions.__init__",
- "google.generativeai.types.RequestOptions.__iter__": "google.generativeai.types.helper_types.RequestOptions.__iter__",
- "google.generativeai.types.RequestOptions.__len__": "google.generativeai.types.helper_types.RequestOptions.__len__",
- "google.generativeai.types.RequestOptions.get": "collections.abc.Mapping.get",
- "google.generativeai.types.RequestOptions.items": "collections.abc.Mapping.items",
- "google.generativeai.types.RequestOptions.keys": "collections.abc.Mapping.keys",
- "google.generativeai.types.RequestOptions.values": "collections.abc.Mapping.values",
- "google.generativeai.types.SafetyFeedbackDict": "google.generativeai.types.safety_types.SafetyFeedbackDict",
- "google.generativeai.types.SafetyRatingDict": "google.generativeai.types.safety_types.SafetyRatingDict",
- "google.generativeai.types.SafetySettingDict": "google.generativeai.types.safety_types.SafetySettingDict",
- "google.generativeai.types.Status": "google.rpc.status_pb2.Status",
- "google.generativeai.types.Status.RegisterExtension": "google.protobuf.message.Message.RegisterExtension",
- "google.generativeai.types.StopCandidateException": "google.generativeai.types.generation_types.StopCandidateException",
- "google.generativeai.types.Tool": "google.generativeai.types.content_types.Tool",
- "google.generativeai.types.Tool.__call__": "google.generativeai.types.content_types.Tool.__call__",
- "google.generativeai.types.Tool.__getitem__": "google.generativeai.types.content_types.Tool.__getitem__",
- "google.generativeai.types.Tool.__init__": "google.generativeai.types.content_types.Tool.__init__",
- "google.generativeai.types.Tool.to_proto": "google.generativeai.types.content_types.Tool.to_proto",
- "google.generativeai.types.ToolDict": "google.generativeai.types.content_types.ToolDict",
- "google.generativeai.types.TunedModel": "google.generativeai.types.model_types.TunedModel",
- "google.generativeai.types.TunedModel.__eq__": "google.generativeai.types.model_types.TunedModel.__eq__",
- "google.generativeai.types.TunedModel.__init__": "google.generativeai.types.model_types.TunedModel.__init__",
- "google.generativeai.types.TunedModelState": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel.State",
- "google.generativeai.types.TunedModelState.__contains__": "enum.EnumType.__contains__",
- "google.generativeai.types.TunedModelState.__getitem__": "enum.EnumType.__getitem__",
- "google.generativeai.types.TunedModelState.__iter__": "enum.EnumType.__iter__",
- "google.generativeai.types.TunedModelState.__len__": "enum.EnumType.__len__",
- "google.generativeai.types.TypedDict": "typing_extensions.TypedDict",
- "google.generativeai.types.get_default_file_client": "google.generativeai.client.get_default_file_client",
- "google.generativeai.types.to_file_data": "google.generativeai.types.file_types.to_file_data",
- "google.generativeai.update_tuned_model": "google.generativeai.models.update_tuned_model",
- "google.generativeai.upload_file": "google.generativeai.files.upload_file"
- },
- "py_module_names": {
- "google.generativeai": "google.generativeai"
- }
-}
diff --git a/docs/api/google/generativeai/_redirects.yaml b/docs/api/google/generativeai/_redirects.yaml
deleted file mode 100644
index cea696430..000000000
--- a/docs/api/google/generativeai/_redirects.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-redirects:
-- from: /api/python/google/generativeai/GenerationConfig
- to: /api/python/google/generativeai/types/GenerationConfig
-- from: /api/python/google/generativeai/protos/ContentFilter/BlockedReason
- to: /api/python/google/generativeai/types/BlockedReason
-- from: /api/python/google/generativeai/protos/SafetyRating/HarmProbability
- to: /api/python/google/generativeai/types/HarmProbability
-- from: /api/python/google/generativeai/protos/SafetySetting/HarmBlockThreshold
- to: /api/python/google/generativeai/types/HarmBlockThreshold
-- from: /api/python/google/generativeai/protos/TunedModel/State
- to: /api/python/google/generativeai/types/TunedModelState
-- from: /api/python/google/generativeai/types/ModelNameOptions
- to: /api/python/google/generativeai/types/AnyModelNameOptions
diff --git a/docs/api/google/generativeai/_toc.yaml b/docs/api/google/generativeai/_toc.yaml
deleted file mode 100644
index 7d18dbf66..000000000
--- a/docs/api/google/generativeai/_toc.yaml
+++ /dev/null
@@ -1,509 +0,0 @@
-toc:
-- title: google.generativeai
- section:
- - title: Overview
- path: /api/python/google/generativeai
- - title: ChatSession
- path: /api/python/google/generativeai/ChatSession
- - title: GenerativeModel
- path: /api/python/google/generativeai/GenerativeModel
- - title: configure
- path: /api/python/google/generativeai/configure
- - title: create_tuned_model
- path: /api/python/google/generativeai/create_tuned_model
- - title: delete_file
- path: /api/python/google/generativeai/delete_file
- - title: delete_tuned_model
- path: /api/python/google/generativeai/delete_tuned_model
- - title: embed_content
- path: /api/python/google/generativeai/embed_content
- - title: embed_content_async
- path: /api/python/google/generativeai/embed_content_async
- - title: get_base_model
- path: /api/python/google/generativeai/get_base_model
- - title: get_file
- path: /api/python/google/generativeai/get_file
- - title: get_model
- path: /api/python/google/generativeai/get_model
- - title: get_operation
- path: /api/python/google/generativeai/get_operation
- - title: get_tuned_model
- path: /api/python/google/generativeai/get_tuned_model
- - title: list_files
- path: /api/python/google/generativeai/list_files
- - title: list_models
- path: /api/python/google/generativeai/list_models
- - title: list_operations
- path: /api/python/google/generativeai/list_operations
- - title: list_tuned_models
- path: /api/python/google/generativeai/list_tuned_models
- - title: update_tuned_model
- path: /api/python/google/generativeai/update_tuned_model
- - title: upload_file
- path: /api/python/google/generativeai/upload_file
- - title: caching
- section:
- - title: Overview
- path: /api/python/google/generativeai/caching
- - title: CachedContent
- path: /api/python/google/generativeai/caching/CachedContent
- - title: get_default_cache_client
- path: /api/python/google/generativeai/caching/get_default_cache_client
- - title: protos
- section:
- - title: Overview
- path: /api/python/google/generativeai/protos
- - title: AttributionSourceId
- path: /api/python/google/generativeai/protos/AttributionSourceId
- - title: AttributionSourceId.GroundingPassageId
- path: /api/python/google/generativeai/protos/AttributionSourceId/GroundingPassageId
- - title: AttributionSourceId.SemanticRetrieverChunk
- path: /api/python/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk
- - title: BatchCreateChunksRequest
- path: /api/python/google/generativeai/protos/BatchCreateChunksRequest
- - title: BatchCreateChunksResponse
- path: /api/python/google/generativeai/protos/BatchCreateChunksResponse
- - title: BatchDeleteChunksRequest
- path: /api/python/google/generativeai/protos/BatchDeleteChunksRequest
- - title: BatchEmbedContentsRequest
- path: /api/python/google/generativeai/protos/BatchEmbedContentsRequest
- - title: BatchEmbedContentsResponse
- path: /api/python/google/generativeai/protos/BatchEmbedContentsResponse
- - title: BatchEmbedTextRequest
- path: /api/python/google/generativeai/protos/BatchEmbedTextRequest
- - title: BatchEmbedTextResponse
- path: /api/python/google/generativeai/protos/BatchEmbedTextResponse
- - title: BatchUpdateChunksRequest
- path: /api/python/google/generativeai/protos/BatchUpdateChunksRequest
- - title: BatchUpdateChunksResponse
- path: /api/python/google/generativeai/protos/BatchUpdateChunksResponse
- - title: Blob
- path: /api/python/google/generativeai/protos/Blob
- - title: CachedContent
- path: /api/python/google/generativeai/protos/CachedContent
- - title: CachedContent.UsageMetadata
- path: /api/python/google/generativeai/protos/CachedContent/UsageMetadata
- - title: Candidate
- path: /api/python/google/generativeai/protos/Candidate
- - title: Candidate.FinishReason
- path: /api/python/google/generativeai/protos/Candidate/FinishReason
- - title: Chunk
- path: /api/python/google/generativeai/protos/Chunk
- - title: Chunk.State
- path: /api/python/google/generativeai/protos/Chunk/State
- - title: ChunkData
- path: /api/python/google/generativeai/protos/ChunkData
- - title: CitationMetadata
- path: /api/python/google/generativeai/protos/CitationMetadata
- - title: CitationSource
- path: /api/python/google/generativeai/protos/CitationSource
- - title: CodeExecution
- path: /api/python/google/generativeai/protos/CodeExecution
- - title: CodeExecutionResult
- path: /api/python/google/generativeai/protos/CodeExecutionResult
- - title: CodeExecutionResult.Outcome
- path: /api/python/google/generativeai/protos/CodeExecutionResult/Outcome
- - title: Condition
- path: /api/python/google/generativeai/protos/Condition
- - title: Condition.Operator
- path: /api/python/google/generativeai/protos/Condition/Operator
- - title: Content
- path: /api/python/google/generativeai/protos/Content
- - title: ContentEmbedding
- path: /api/python/google/generativeai/protos/ContentEmbedding
- - title: ContentFilter
- path: /api/python/google/generativeai/protos/ContentFilter
- - title: Corpus
- path: /api/python/google/generativeai/protos/Corpus
- - title: CountMessageTokensRequest
- path: /api/python/google/generativeai/protos/CountMessageTokensRequest
- - title: CountMessageTokensResponse
- path: /api/python/google/generativeai/protos/CountMessageTokensResponse
- - title: CountTextTokensRequest
- path: /api/python/google/generativeai/protos/CountTextTokensRequest
- - title: CountTextTokensResponse
- path: /api/python/google/generativeai/protos/CountTextTokensResponse
- - title: CountTokensRequest
- path: /api/python/google/generativeai/protos/CountTokensRequest
- - title: CountTokensResponse
- path: /api/python/google/generativeai/protos/CountTokensResponse
- - title: CreateCachedContentRequest
- path: /api/python/google/generativeai/protos/CreateCachedContentRequest
- - title: CreateChunkRequest
- path: /api/python/google/generativeai/protos/CreateChunkRequest
- - title: CreateCorpusRequest
- path: /api/python/google/generativeai/protos/CreateCorpusRequest
- - title: CreateDocumentRequest
- path: /api/python/google/generativeai/protos/CreateDocumentRequest
- - title: CreateFileRequest
- path: /api/python/google/generativeai/protos/CreateFileRequest
- - title: CreateFileResponse
- path: /api/python/google/generativeai/protos/CreateFileResponse
- - title: CreatePermissionRequest
- path: /api/python/google/generativeai/protos/CreatePermissionRequest
- - title: CreateTunedModelMetadata
- path: /api/python/google/generativeai/protos/CreateTunedModelMetadata
- - title: CreateTunedModelRequest
- path: /api/python/google/generativeai/protos/CreateTunedModelRequest
- - title: CustomMetadata
- path: /api/python/google/generativeai/protos/CustomMetadata
- - title: Dataset
- path: /api/python/google/generativeai/protos/Dataset
- - title: DeleteCachedContentRequest
- path: /api/python/google/generativeai/protos/DeleteCachedContentRequest
- - title: DeleteChunkRequest
- path: /api/python/google/generativeai/protos/DeleteChunkRequest
- - title: DeleteCorpusRequest
- path: /api/python/google/generativeai/protos/DeleteCorpusRequest
- - title: DeleteDocumentRequest
- path: /api/python/google/generativeai/protos/DeleteDocumentRequest
- - title: DeleteFileRequest
- path: /api/python/google/generativeai/protos/DeleteFileRequest
- - title: DeletePermissionRequest
- path: /api/python/google/generativeai/protos/DeletePermissionRequest
- - title: DeleteTunedModelRequest
- path: /api/python/google/generativeai/protos/DeleteTunedModelRequest
- - title: Document
- path: /api/python/google/generativeai/protos/Document
- - title: DynamicRetrievalConfig
- path: /api/python/google/generativeai/protos/DynamicRetrievalConfig
- - title: DynamicRetrievalConfig.Mode
- path: /api/python/google/generativeai/protos/DynamicRetrievalConfig/Mode
- - title: EmbedContentRequest
- path: /api/python/google/generativeai/protos/EmbedContentRequest
- - title: EmbedContentResponse
- path: /api/python/google/generativeai/protos/EmbedContentResponse
- - title: EmbedTextRequest
- path: /api/python/google/generativeai/protos/EmbedTextRequest
- - title: EmbedTextResponse
- path: /api/python/google/generativeai/protos/EmbedTextResponse
- - title: Embedding
- path: /api/python/google/generativeai/protos/Embedding
- - title: Example
- path: /api/python/google/generativeai/protos/Example
- - title: ExecutableCode
- path: /api/python/google/generativeai/protos/ExecutableCode
- - title: ExecutableCode.Language
- path: /api/python/google/generativeai/protos/ExecutableCode/Language
- - title: File
- path: /api/python/google/generativeai/protos/File
- - title: File.State
- path: /api/python/google/generativeai/protos/File/State
- - title: FileData
- path: /api/python/google/generativeai/protos/FileData
- - title: FunctionCall
- path: /api/python/google/generativeai/protos/FunctionCall
- - title: FunctionCallingConfig
- path: /api/python/google/generativeai/protos/FunctionCallingConfig
- - title: FunctionCallingConfig.Mode
- path: /api/python/google/generativeai/protos/FunctionCallingConfig/Mode
- - title: FunctionDeclaration
- path: /api/python/google/generativeai/protos/FunctionDeclaration
- - title: FunctionResponse
- path: /api/python/google/generativeai/protos/FunctionResponse
- - title: GenerateAnswerRequest
- path: /api/python/google/generativeai/protos/GenerateAnswerRequest
- - title: GenerateAnswerRequest.AnswerStyle
- path: /api/python/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle
- - title: GenerateAnswerResponse
- path: /api/python/google/generativeai/protos/GenerateAnswerResponse
- - title: GenerateAnswerResponse.InputFeedback
- path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback
- - title: GenerateAnswerResponse.InputFeedback.BlockReason
- path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason
- - title: GenerateContentRequest
- path: /api/python/google/generativeai/protos/GenerateContentRequest
- - title: GenerateContentResponse
- path: /api/python/google/generativeai/protos/GenerateContentResponse
- - title: GenerateContentResponse.PromptFeedback
- path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback
- - title: GenerateContentResponse.PromptFeedback.BlockReason
- path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason
- - title: GenerateContentResponse.UsageMetadata
- path: /api/python/google/generativeai/protos/GenerateContentResponse/UsageMetadata
- - title: GenerateMessageRequest
- path: /api/python/google/generativeai/protos/GenerateMessageRequest
- - title: GenerateMessageResponse
- path: /api/python/google/generativeai/protos/GenerateMessageResponse
- - title: GenerateTextRequest
- path: /api/python/google/generativeai/protos/GenerateTextRequest
- - title: GenerateTextResponse
- path: /api/python/google/generativeai/protos/GenerateTextResponse
- - title: GenerationConfig
- path: /api/python/google/generativeai/protos/GenerationConfig
- - title: GetCachedContentRequest
- path: /api/python/google/generativeai/protos/GetCachedContentRequest
- - title: GetChunkRequest
- path: /api/python/google/generativeai/protos/GetChunkRequest
- - title: GetCorpusRequest
- path: /api/python/google/generativeai/protos/GetCorpusRequest
- - title: GetDocumentRequest
- path: /api/python/google/generativeai/protos/GetDocumentRequest
- - title: GetFileRequest
- path: /api/python/google/generativeai/protos/GetFileRequest
- - title: GetModelRequest
- path: /api/python/google/generativeai/protos/GetModelRequest
- - title: GetPermissionRequest
- path: /api/python/google/generativeai/protos/GetPermissionRequest
- - title: GetTunedModelRequest
- path: /api/python/google/generativeai/protos/GetTunedModelRequest
- - title: GoogleSearchRetrieval
- path: /api/python/google/generativeai/protos/GoogleSearchRetrieval
- - title: GroundingAttribution
- path: /api/python/google/generativeai/protos/GroundingAttribution
- - title: GroundingChunk
- path: /api/python/google/generativeai/protos/GroundingChunk
- - title: GroundingChunk.Web
- path: /api/python/google/generativeai/protos/GroundingChunk/Web
- - title: GroundingMetadata
- path: /api/python/google/generativeai/protos/GroundingMetadata
- - title: GroundingPassage
- path: /api/python/google/generativeai/protos/GroundingPassage
- - title: GroundingPassages
- path: /api/python/google/generativeai/protos/GroundingPassages
- - title: GroundingSupport
- path: /api/python/google/generativeai/protos/GroundingSupport
- - title: HarmCategory
- path: /api/python/google/generativeai/protos/HarmCategory
- - title: Hyperparameters
- path: /api/python/google/generativeai/protos/Hyperparameters
- - title: ListCachedContentsRequest
- path: /api/python/google/generativeai/protos/ListCachedContentsRequest
- - title: ListCachedContentsResponse
- path: /api/python/google/generativeai/protos/ListCachedContentsResponse
- - title: ListChunksRequest
- path: /api/python/google/generativeai/protos/ListChunksRequest
- - title: ListChunksResponse
- path: /api/python/google/generativeai/protos/ListChunksResponse
- - title: ListCorporaRequest
- path: /api/python/google/generativeai/protos/ListCorporaRequest
- - title: ListCorporaResponse
- path: /api/python/google/generativeai/protos/ListCorporaResponse
- - title: ListDocumentsRequest
- path: /api/python/google/generativeai/protos/ListDocumentsRequest
- - title: ListDocumentsResponse
- path: /api/python/google/generativeai/protos/ListDocumentsResponse
- - title: ListFilesRequest
- path: /api/python/google/generativeai/protos/ListFilesRequest
- - title: ListFilesResponse
- path: /api/python/google/generativeai/protos/ListFilesResponse
- - title: ListModelsRequest
- path: /api/python/google/generativeai/protos/ListModelsRequest
- - title: ListModelsResponse
- path: /api/python/google/generativeai/protos/ListModelsResponse
- - title: ListPermissionsRequest
- path: /api/python/google/generativeai/protos/ListPermissionsRequest
- - title: ListPermissionsResponse
- path: /api/python/google/generativeai/protos/ListPermissionsResponse
- - title: ListTunedModelsRequest
- path: /api/python/google/generativeai/protos/ListTunedModelsRequest
- - title: ListTunedModelsResponse
- path: /api/python/google/generativeai/protos/ListTunedModelsResponse
- - title: LogprobsResult
- path: /api/python/google/generativeai/protos/LogprobsResult
- - title: LogprobsResult.Candidate
- path: /api/python/google/generativeai/protos/LogprobsResult/Candidate
- - title: LogprobsResult.TopCandidates
- path: /api/python/google/generativeai/protos/LogprobsResult/TopCandidates
- - title: Message
- path: /api/python/google/generativeai/protos/Message
- - title: MessagePrompt
- path: /api/python/google/generativeai/protos/MessagePrompt
- - title: MetadataFilter
- path: /api/python/google/generativeai/protos/MetadataFilter
- - title: Model
- path: /api/python/google/generativeai/protos/Model
- - title: Part
- path: /api/python/google/generativeai/protos/Part
- - title: Permission
- path: /api/python/google/generativeai/protos/Permission
- - title: Permission.GranteeType
- path: /api/python/google/generativeai/protos/Permission/GranteeType
- - title: Permission.Role
- path: /api/python/google/generativeai/protos/Permission/Role
- - title: PredictRequest
- path: /api/python/google/generativeai/protos/PredictRequest
- - title: PredictResponse
- path: /api/python/google/generativeai/protos/PredictResponse
- - title: QueryCorpusRequest
- path: /api/python/google/generativeai/protos/QueryCorpusRequest
- - title: QueryCorpusResponse
- path: /api/python/google/generativeai/protos/QueryCorpusResponse
- - title: QueryDocumentRequest
- path: /api/python/google/generativeai/protos/QueryDocumentRequest
- - title: QueryDocumentResponse
- path: /api/python/google/generativeai/protos/QueryDocumentResponse
- - title: RelevantChunk
- path: /api/python/google/generativeai/protos/RelevantChunk
- - title: RetrievalMetadata
- path: /api/python/google/generativeai/protos/RetrievalMetadata
- - title: SafetyFeedback
- path: /api/python/google/generativeai/protos/SafetyFeedback
- - title: SafetyRating
- path: /api/python/google/generativeai/protos/SafetyRating
- - title: SafetySetting
- path: /api/python/google/generativeai/protos/SafetySetting
- - title: Schema
- path: /api/python/google/generativeai/protos/Schema
- - title: Schema.PropertiesEntry
- path: /api/python/google/generativeai/protos/Schema/PropertiesEntry
- - title: SearchEntryPoint
- path: /api/python/google/generativeai/protos/SearchEntryPoint
- - title: Segment
- path: /api/python/google/generativeai/protos/Segment
- - title: SemanticRetrieverConfig
- path: /api/python/google/generativeai/protos/SemanticRetrieverConfig
- - title: StringList
- path: /api/python/google/generativeai/protos/StringList
- - title: TaskType
- path: /api/python/google/generativeai/protos/TaskType
- - title: TextCompletion
- path: /api/python/google/generativeai/protos/TextCompletion
- - title: TextPrompt
- path: /api/python/google/generativeai/protos/TextPrompt
- - title: Tool
- path: /api/python/google/generativeai/protos/Tool
- - title: ToolConfig
- path: /api/python/google/generativeai/protos/ToolConfig
- - title: TransferOwnershipRequest
- path: /api/python/google/generativeai/protos/TransferOwnershipRequest
- - title: TransferOwnershipResponse
- path: /api/python/google/generativeai/protos/TransferOwnershipResponse
- - title: TunedModel
- path: /api/python/google/generativeai/protos/TunedModel
- - title: TunedModelSource
- path: /api/python/google/generativeai/protos/TunedModelSource
- - title: TuningExample
- path: /api/python/google/generativeai/protos/TuningExample
- - title: TuningExamples
- path: /api/python/google/generativeai/protos/TuningExamples
- - title: TuningSnapshot
- path: /api/python/google/generativeai/protos/TuningSnapshot
- - title: TuningTask
- path: /api/python/google/generativeai/protos/TuningTask
- - title: Type
- path: /api/python/google/generativeai/protos/Type
- - title: UpdateCachedContentRequest
- path: /api/python/google/generativeai/protos/UpdateCachedContentRequest
- - title: UpdateChunkRequest
- path: /api/python/google/generativeai/protos/UpdateChunkRequest
- - title: UpdateCorpusRequest
- path: /api/python/google/generativeai/protos/UpdateCorpusRequest
- - title: UpdateDocumentRequest
- path: /api/python/google/generativeai/protos/UpdateDocumentRequest
- - title: UpdatePermissionRequest
- path: /api/python/google/generativeai/protos/UpdatePermissionRequest
- - title: UpdateTunedModelRequest
- path: /api/python/google/generativeai/protos/UpdateTunedModelRequest
- - title: VideoMetadata
- path: /api/python/google/generativeai/protos/VideoMetadata
- - title: types
- section:
- - title: Overview
- path: /api/python/google/generativeai/types
- - title: AnyModelNameOptions
- path: /api/python/google/generativeai/types/AnyModelNameOptions
- - title: AsyncGenerateContentResponse
- path: /api/python/google/generativeai/types/AsyncGenerateContentResponse
- - title: BaseModelNameOptions
- path: /api/python/google/generativeai/types/BaseModelNameOptions
- - title: BlobDict
- path: /api/python/google/generativeai/types/BlobDict
- - title: BlobType
- path: /api/python/google/generativeai/types/BlobType
- - title: BlockedPromptException
- path: /api/python/google/generativeai/types/BlockedPromptException
- - title: BlockedReason
- path: /api/python/google/generativeai/types/BlockedReason
- - title: BrokenResponseError
- path: /api/python/google/generativeai/types/BrokenResponseError
- - title: CallableFunctionDeclaration
- path: /api/python/google/generativeai/types/CallableFunctionDeclaration
- - title: CitationMetadataDict
- path: /api/python/google/generativeai/types/CitationMetadataDict
- - title: CitationSourceDict
- path: /api/python/google/generativeai/types/CitationSourceDict
- - title: ContentDict
- path: /api/python/google/generativeai/types/ContentDict
- - title: ContentFilterDict
- path: /api/python/google/generativeai/types/ContentFilterDict
- - title: ContentType
- path: /api/python/google/generativeai/types/ContentType
- - title: ContentsType
- path: /api/python/google/generativeai/types/ContentsType
- - title: File
- path: /api/python/google/generativeai/types/File
- - title: FileDataDict
- path: /api/python/google/generativeai/types/FileDataDict
- - title: FileDataType
- path: /api/python/google/generativeai/types/FileDataType
- - title: FunctionDeclaration
- path: /api/python/google/generativeai/types/FunctionDeclaration
- - title: FunctionDeclarationType
- path: /api/python/google/generativeai/types/FunctionDeclarationType
- - title: FunctionLibrary
- path: /api/python/google/generativeai/types/FunctionLibrary
- - title: FunctionLibraryType
- path: /api/python/google/generativeai/types/FunctionLibraryType
- - title: GenerateContentResponse
- path: /api/python/google/generativeai/types/GenerateContentResponse
- - title: GenerationConfig
- path: /api/python/google/generativeai/types/GenerationConfig
- - title: GenerationConfigDict
- path: /api/python/google/generativeai/types/GenerationConfigDict
- - title: GenerationConfigType
- path: /api/python/google/generativeai/types/GenerationConfigType
- - title: HarmBlockThreshold
- path: /api/python/google/generativeai/types/HarmBlockThreshold
- - title: HarmCategory
- path: /api/python/google/generativeai/types/HarmCategory
- - title: HarmProbability
- path: /api/python/google/generativeai/types/HarmProbability
- - title: IncompleteIterationError
- path: /api/python/google/generativeai/types/IncompleteIterationError
- - title: Model
- path: /api/python/google/generativeai/types/Model
- - title: ModelsIterable
- path: /api/python/google/generativeai/types/ModelsIterable
- - title: PartDict
- path: /api/python/google/generativeai/types/PartDict
- - title: PartType
- path: /api/python/google/generativeai/types/PartType
- - title: Permission
- path: /api/python/google/generativeai/types/Permission
- - title: Permissions
- path: /api/python/google/generativeai/types/Permissions
- - title: RequestOptions
- path: /api/python/google/generativeai/types/RequestOptions
- - title: RequestOptionsType
- path: /api/python/google/generativeai/types/RequestOptionsType
- - title: SafetyFeedbackDict
- path: /api/python/google/generativeai/types/SafetyFeedbackDict
- - title: SafetyRatingDict
- path: /api/python/google/generativeai/types/SafetyRatingDict
- - title: SafetySettingDict
- path: /api/python/google/generativeai/types/SafetySettingDict
- - title: Status
- path: /api/python/google/generativeai/types/Status
- - title: StopCandidateException
- path: /api/python/google/generativeai/types/StopCandidateException
- - title: StrictContentType
- path: /api/python/google/generativeai/types/StrictContentType
- - title: Tool
- path: /api/python/google/generativeai/types/Tool
- - title: ToolDict
- path: /api/python/google/generativeai/types/ToolDict
- - title: ToolsType
- path: /api/python/google/generativeai/types/ToolsType
- - title: TunedModel
- path: /api/python/google/generativeai/types/TunedModel
- - title: TunedModelNameOptions
- path: /api/python/google/generativeai/types/TunedModelNameOptions
- - title: TunedModelState
- path: /api/python/google/generativeai/types/TunedModelState
- - title: TypedDict
- path: /api/python/google/generativeai/types/TypedDict
- - title: get_default_file_client
- path: /api/python/google/generativeai/types/get_default_file_client
- - title: to_file_data
- path: /api/python/google/generativeai/types/to_file_data
diff --git a/docs/api/google/generativeai/all_symbols.md b/docs/api/google/generativeai/all_symbols.md
deleted file mode 100644
index bc673a13e..000000000
--- a/docs/api/google/generativeai/all_symbols.md
+++ /dev/null
@@ -1,261 +0,0 @@
-# All symbols in Generative AI - Python
-
-
-
-## Primary symbols
-* google.generativeai
-* google.generativeai.ChatSession
-* google.generativeai.GenerationConfig
-* google.generativeai.GenerativeModel
-* google.generativeai.caching
-* google.generativeai.caching.CachedContent
-* google.generativeai.caching.get_default_cache_client
-* google.generativeai.configure
-* google.generativeai.create_tuned_model
-* google.generativeai.delete_file
-* google.generativeai.delete_tuned_model
-* google.generativeai.embed_content
-* google.generativeai.embed_content_async
-* google.generativeai.get_base_model
-* google.generativeai.get_file
-* google.generativeai.get_model
-* google.generativeai.get_operation
-* google.generativeai.get_tuned_model
-* google.generativeai.list_files
-* google.generativeai.list_models
-* google.generativeai.list_operations
-* google.generativeai.list_tuned_models
-* google.generativeai.protos
-* google.generativeai.protos.AttributionSourceId
-* google.generativeai.protos.AttributionSourceId.GroundingPassageId
-* google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk
-* google.generativeai.protos.BatchCreateChunksRequest
-* google.generativeai.protos.BatchCreateChunksResponse
-* google.generativeai.protos.BatchDeleteChunksRequest
-* google.generativeai.protos.BatchEmbedContentsRequest
-* google.generativeai.protos.BatchEmbedContentsResponse
-* google.generativeai.protos.BatchEmbedTextRequest
-* google.generativeai.protos.BatchEmbedTextResponse
-* google.generativeai.protos.BatchUpdateChunksRequest
-* google.generativeai.protos.BatchUpdateChunksResponse
-* google.generativeai.protos.Blob
-* google.generativeai.protos.CachedContent
-* google.generativeai.protos.CachedContent.UsageMetadata
-* google.generativeai.protos.Candidate
-* google.generativeai.protos.Candidate.FinishReason
-* google.generativeai.protos.Chunk
-* google.generativeai.protos.Chunk.State
-* google.generativeai.protos.ChunkData
-* google.generativeai.protos.CitationMetadata
-* google.generativeai.protos.CitationSource
-* google.generativeai.protos.CodeExecution
-* google.generativeai.protos.CodeExecutionResult
-* google.generativeai.protos.CodeExecutionResult.Outcome
-* google.generativeai.protos.Condition
-* google.generativeai.protos.Condition.Operator
-* google.generativeai.protos.Content
-* google.generativeai.protos.ContentEmbedding
-* google.generativeai.protos.ContentFilter
-* google.generativeai.protos.ContentFilter.BlockedReason
-* google.generativeai.protos.Corpus
-* google.generativeai.protos.CountMessageTokensRequest
-* google.generativeai.protos.CountMessageTokensResponse
-* google.generativeai.protos.CountTextTokensRequest
-* google.generativeai.protos.CountTextTokensResponse
-* google.generativeai.protos.CountTokensRequest
-* google.generativeai.protos.CountTokensResponse
-* google.generativeai.protos.CreateCachedContentRequest
-* google.generativeai.protos.CreateChunkRequest
-* google.generativeai.protos.CreateCorpusRequest
-* google.generativeai.protos.CreateDocumentRequest
-* google.generativeai.protos.CreateFileRequest
-* google.generativeai.protos.CreateFileResponse
-* google.generativeai.protos.CreatePermissionRequest
-* google.generativeai.protos.CreateTunedModelMetadata
-* google.generativeai.protos.CreateTunedModelRequest
-* google.generativeai.protos.CustomMetadata
-* google.generativeai.protos.Dataset
-* google.generativeai.protos.DeleteCachedContentRequest
-* google.generativeai.protos.DeleteChunkRequest
-* google.generativeai.protos.DeleteCorpusRequest
-* google.generativeai.protos.DeleteDocumentRequest
-* google.generativeai.protos.DeleteFileRequest
-* google.generativeai.protos.DeletePermissionRequest
-* google.generativeai.protos.DeleteTunedModelRequest
-* google.generativeai.protos.Document
-* google.generativeai.protos.DynamicRetrievalConfig
-* google.generativeai.protos.DynamicRetrievalConfig.Mode
-* google.generativeai.protos.EmbedContentRequest
-* google.generativeai.protos.EmbedContentResponse
-* google.generativeai.protos.EmbedTextRequest
-* google.generativeai.protos.EmbedTextResponse
-* google.generativeai.protos.Embedding
-* google.generativeai.protos.Example
-* google.generativeai.protos.ExecutableCode
-* google.generativeai.protos.ExecutableCode.Language
-* google.generativeai.protos.File
-* google.generativeai.protos.File.State
-* google.generativeai.protos.FileData
-* google.generativeai.protos.FunctionCall
-* google.generativeai.protos.FunctionCallingConfig
-* google.generativeai.protos.FunctionCallingConfig.Mode
-* google.generativeai.protos.FunctionDeclaration
-* google.generativeai.protos.FunctionResponse
-* google.generativeai.protos.GenerateAnswerRequest
-* google.generativeai.protos.GenerateAnswerRequest.AnswerStyle
-* google.generativeai.protos.GenerateAnswerResponse
-* google.generativeai.protos.GenerateAnswerResponse.InputFeedback
-* google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason
-* google.generativeai.protos.GenerateContentRequest
-* google.generativeai.protos.GenerateContentResponse
-* google.generativeai.protos.GenerateContentResponse.PromptFeedback
-* google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason
-* google.generativeai.protos.GenerateContentResponse.UsageMetadata
-* google.generativeai.protos.GenerateMessageRequest
-* google.generativeai.protos.GenerateMessageResponse
-* google.generativeai.protos.GenerateTextRequest
-* google.generativeai.protos.GenerateTextResponse
-* google.generativeai.protos.GenerationConfig
-* google.generativeai.protos.GetCachedContentRequest
-* google.generativeai.protos.GetChunkRequest
-* google.generativeai.protos.GetCorpusRequest
-* google.generativeai.protos.GetDocumentRequest
-* google.generativeai.protos.GetFileRequest
-* google.generativeai.protos.GetModelRequest
-* google.generativeai.protos.GetPermissionRequest
-* google.generativeai.protos.GetTunedModelRequest
-* google.generativeai.protos.GoogleSearchRetrieval
-* google.generativeai.protos.GroundingAttribution
-* google.generativeai.protos.GroundingChunk
-* google.generativeai.protos.GroundingChunk.Web
-* google.generativeai.protos.GroundingMetadata
-* google.generativeai.protos.GroundingPassage
-* google.generativeai.protos.GroundingPassages
-* google.generativeai.protos.GroundingSupport
-* google.generativeai.protos.HarmCategory
-* google.generativeai.protos.Hyperparameters
-* google.generativeai.protos.ListCachedContentsRequest
-* google.generativeai.protos.ListCachedContentsResponse
-* google.generativeai.protos.ListChunksRequest
-* google.generativeai.protos.ListChunksResponse
-* google.generativeai.protos.ListCorporaRequest
-* google.generativeai.protos.ListCorporaResponse
-* google.generativeai.protos.ListDocumentsRequest
-* google.generativeai.protos.ListDocumentsResponse
-* google.generativeai.protos.ListFilesRequest
-* google.generativeai.protos.ListFilesResponse
-* google.generativeai.protos.ListModelsRequest
-* google.generativeai.protos.ListModelsResponse
-* google.generativeai.protos.ListPermissionsRequest
-* google.generativeai.protos.ListPermissionsResponse
-* google.generativeai.protos.ListTunedModelsRequest
-* google.generativeai.protos.ListTunedModelsResponse
-* google.generativeai.protos.LogprobsResult
-* google.generativeai.protos.LogprobsResult.Candidate
-* google.generativeai.protos.LogprobsResult.TopCandidates
-* google.generativeai.protos.Message
-* google.generativeai.protos.MessagePrompt
-* google.generativeai.protos.MetadataFilter
-* google.generativeai.protos.Model
-* google.generativeai.protos.Part
-* google.generativeai.protos.Permission
-* google.generativeai.protos.Permission.GranteeType
-* google.generativeai.protos.Permission.Role
-* google.generativeai.protos.PredictRequest
-* google.generativeai.protos.PredictResponse
-* google.generativeai.protos.QueryCorpusRequest
-* google.generativeai.protos.QueryCorpusResponse
-* google.generativeai.protos.QueryDocumentRequest
-* google.generativeai.protos.QueryDocumentResponse
-* google.generativeai.protos.RelevantChunk
-* google.generativeai.protos.RetrievalMetadata
-* google.generativeai.protos.SafetyFeedback
-* google.generativeai.protos.SafetyRating
-* google.generativeai.protos.SafetyRating.HarmProbability
-* google.generativeai.protos.SafetySetting
-* google.generativeai.protos.SafetySetting.HarmBlockThreshold
-* google.generativeai.protos.Schema
-* google.generativeai.protos.Schema.PropertiesEntry
-* google.generativeai.protos.SearchEntryPoint
-* google.generativeai.protos.Segment
-* google.generativeai.protos.SemanticRetrieverConfig
-* google.generativeai.protos.StringList
-* google.generativeai.protos.TaskType
-* google.generativeai.protos.TextCompletion
-* google.generativeai.protos.TextPrompt
-* google.generativeai.protos.Tool
-* google.generativeai.protos.ToolConfig
-* google.generativeai.protos.TransferOwnershipRequest
-* google.generativeai.protos.TransferOwnershipResponse
-* google.generativeai.protos.TunedModel
-* google.generativeai.protos.TunedModel.State
-* google.generativeai.protos.TunedModelSource
-* google.generativeai.protos.TuningExample
-* google.generativeai.protos.TuningExamples
-* google.generativeai.protos.TuningSnapshot
-* google.generativeai.protos.TuningTask
-* google.generativeai.protos.Type
-* google.generativeai.protos.UpdateCachedContentRequest
-* google.generativeai.protos.UpdateChunkRequest
-* google.generativeai.protos.UpdateCorpusRequest
-* google.generativeai.protos.UpdateDocumentRequest
-* google.generativeai.protos.UpdatePermissionRequest
-* google.generativeai.protos.UpdateTunedModelRequest
-* google.generativeai.protos.VideoMetadata
-* google.generativeai.types
-* google.generativeai.types.AnyModelNameOptions
-* google.generativeai.types.AsyncGenerateContentResponse
-* google.generativeai.types.BaseModelNameOptions
-* google.generativeai.types.BlobDict
-* google.generativeai.types.BlobType
-* google.generativeai.types.BlockedPromptException
-* google.generativeai.types.BlockedReason
-* google.generativeai.types.BrokenResponseError
-* google.generativeai.types.CallableFunctionDeclaration
-* google.generativeai.types.CitationMetadataDict
-* google.generativeai.types.CitationSourceDict
-* google.generativeai.types.ContentDict
-* google.generativeai.types.ContentFilterDict
-* google.generativeai.types.ContentType
-* google.generativeai.types.ContentsType
-* google.generativeai.types.File
-* google.generativeai.types.FileDataDict
-* google.generativeai.types.FileDataType
-* google.generativeai.types.FunctionDeclaration
-* google.generativeai.types.FunctionDeclarationType
-* google.generativeai.types.FunctionLibrary
-* google.generativeai.types.FunctionLibraryType
-* google.generativeai.types.GenerateContentResponse
-* google.generativeai.types.GenerationConfig
-* google.generativeai.types.GenerationConfigDict
-* google.generativeai.types.GenerationConfigType
-* google.generativeai.types.HarmBlockThreshold
-* google.generativeai.types.HarmCategory
-* google.generativeai.types.HarmProbability
-* google.generativeai.types.IncompleteIterationError
-* google.generativeai.types.Model
-* google.generativeai.types.ModelNameOptions
-* google.generativeai.types.ModelsIterable
-* google.generativeai.types.PartDict
-* google.generativeai.types.PartType
-* google.generativeai.types.Permission
-* google.generativeai.types.Permissions
-* google.generativeai.types.RequestOptions
-* google.generativeai.types.RequestOptionsType
-* google.generativeai.types.SafetyFeedbackDict
-* google.generativeai.types.SafetyRatingDict
-* google.generativeai.types.SafetySettingDict
-* google.generativeai.types.Status
-* google.generativeai.types.StopCandidateException
-* google.generativeai.types.StrictContentType
-* google.generativeai.types.Tool
-* google.generativeai.types.ToolDict
-* google.generativeai.types.ToolsType
-* google.generativeai.types.TunedModel
-* google.generativeai.types.TunedModelNameOptions
-* google.generativeai.types.TunedModelState
-* google.generativeai.types.TypedDict
-* google.generativeai.types.get_default_file_client
-* google.generativeai.types.to_file_data
-* google.generativeai.update_tuned_model
-* google.generativeai.upload_file
\ No newline at end of file
diff --git a/docs/api/google/generativeai/api_report.pb b/docs/api/google/generativeai/api_report.pb
deleted file mode 100644
index bd0f3970f40d4a0bc343cb3857d7592d9ef6848a..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 54075
zcmc&-&5tBUR?o5ovJzGvu^_R-el*?>%q~;gGdmdLUFhtonekdXmYeDJEU1d43dw1LP%WpxCah>T5&|;A0XmIWL0El#EXx5
z)nmz4t8LHky%+Jme(@qQ{@EA*>A%iC`RPj^|Ih#Zd+XDuPC>sPLBUb*_}-nGrw
zuU~)t>h<}J#HARk?dE`R1z
z5&P>Gq9{o|$A9`V`#ZV9|M7Cn{?$4E?U!zRmH#~7V*gnbz0QAz|0RlUd|HEWc@%#|
z^J^8jJ;@#oM*T~z%RdEwCSYCK1_=O5{`uElc+mmxS^R4@ylFP-caNFY>0!oDUn*5N
z=eo{?t~(t6$3oXA0l8}UiC*6)@iia(ZaO^~^`!3S0{cu@AJN@_{WSif4*QM8ioccj
zv!S}s0xBsNIuQcQx$-~L5eqGpw=eYLc+&yA-OFdAqFdzmv(Z$leI`(&D0sViP*n{O>+
z-@R=$_OW=NEQm>jKWN3jsuO|lgTd&q-GTkkk=sFu!SgNV4(Hr8MO+ec$jGe52K%O9
zYoYZ`tqkqnBuk4*V2rle&WyIf5E=ohhA<_9!bb)crZSL0NeLQcyXqgrSDdZSQe$QP
z=UY4<2)oa&sJH!aFg1#B9xB2!AIs0i{rc)FC4j~Z${HBb(tG-lR9b*p
zz1zbGPCz>A_N
zs#O4nis!Mu;t_7uEoLJY{*NIME68K+Vmrdce#C?SzbVM`t*VQeVogt9bnl(2|qHI&4rbtXsD7$W!e*9SzYICRT
zSamb(7K8V3-5NfiswK8dfLa6nmf?W;7#bhe>d43<>&{T!X8LlQLsi{i5nogdgBNZ;
z{=9DCc00Y*IgJ{)?xHT^!3uj7{_x08U$i>RlbknCq)#VJ^
z@=^R{qYag{qu261eL*^&1c
zUP%%z&G=up;%%MYX886S6V|ZxS*5y@Lc?s9NV6FBjrO;#_?D+cyV-F%Dh9o~88cS)
zFq?FaW~2KiJKbw~w_Pe$w0{GU>PA}hjyg~#1NQW8_F$H=%FVD=XU)pZE(0stwVj+?
zD~$}(aXy-64IyN*7x5l|DRaT99w=WB>tydscIlx;OMxy-&Af7ptD-L@lYLg34B
z8nP!0mAtwoK#0BUaben%vK|ZC4j;r
zgkiqlhy#%*cM`tw5+y;qB#wviE6BokY8*y2VI`ugfk1rIGhzKvE7o#*QqKx-`yEc`
zzLgaz>tNG{`Z5egA^Bhue+gBeB}nL+>+yd>s1BJiN&G@D?rWV?VzzG&MuX|m-7K8~
zhYz+Fv4s^2bFt>4n)ZzSj7q%WqIE@rb~^127`1trO}hM)A(tF)6hYMjvG+g4=5szm
z_#pnKza0Am#wKvCStCG|$Fm22#^HP`OoCxHIxLQ=B8bJf2pcex`Q6!}pmP)=EyxAJ
zYygz^{z;LQT)&!q{@XJcgo!X%Y)IWQc>2UMJ*mA2M>?l$7kO7>0$7Egc$#1oZ(tiy
zTL_`rDEZZdlRXWf-07i_-mM=
z0~RM3v0*FFHbM%tNBTpy(rG2M;8e;`6_t#NERqrq>|3P`@xqwV{I?!wJz#Vgu9Af3
z9Bs!l9OLX!M@74vP1%7;L)S&LE9_;Etr%~@x8I%>z5Lji9Uuq#W{1CM
z#cyEy%q+$_(gib05-1I2{`g~@+0$%|a&2f6$_nP7%rGmMgEZx=AO~pB2apv(*E1YsM)2)3-onF
zC`&cnumA)Es@D0v_%%NRke0XJs#C@wZ`Da7k2gALym^AtcFjFU7-h!#{*ss3STkX1;ji*NGl`Evi?*?%zPt^g_sr4w+F)_TST@6u2dQLsE<5V
z&h^La3nNd)vuRUNQGAN3&lAvc9M<7f-&RA>*fag30nb4)`LFo7?te6Y9nfGSLY>3f>
zzl!!x^j928?*r{w$j?mJQujoZxPGTnF3T>k4Lg4oa9i8~&3CfN@c?eLnDk%8_Lc)^
zvG{v@#uM&Z7|E_vkqYM_WS5f}mNK}Q!I$>3@NKhckslj%p9|g3`Tp~5=kVf=Q<_sY
z#9c4$RB!iH)e>&y^vm-Nx5@~PX|+`o(1u$Hj)vP+Tn-Pn0{!`gTYaBFdO0d9|Y
zt1P~g2=-{RiU<$%qoFe4m22PddKqJ(-PjG7D|JF8dZ(!!9NeTS0p=0lvfr~AW(ey9H_lRXIh*<6-j`J+JrRv_*-|M!WSlz00Qh27~q;
zZNVe9d_peU9!(!*6BA){E)Mp9Ht{{!W3duzC$$2!g*dy#$+EJpv)=QNtob*s_$?%G
zTZLIi{I(<$yG)c}+;S}7CJb3LU)eO2L49S@kjD0vr6CRR?4?BPwGaEYA&m6+6kS~u
zL&Qy#iALWV`7iOAe7ywiH%H@HaXZWU`)SYk=*ekF&Hi_-_&Xj!PgtZXPy5C&@7=#k
ze;56<6@Q;lwspj3X1;eQMWhVZf8UC~OD!8&^rki(WiY2U9BFK)7LGL3|F9MBQEs^l
zkNk|4;1>l)6%Md`BR@Mc&^{6^S(!5OFC=hCK80teVR;G6AVPbGJz+eyrk_oo
zetgN2e{ie0YeuO_VQX^7r<
zJ;LW#=#593is;27O-7EH(!+ZR)6rkyH^@z)*z($yGGpx<>)bjcdD+U8AI}snXifQX
zApxErPdNkRONZe+V7VsPJA0SJ+g}?Ytr$K%RYk{@4@!184JqOIv5|i4Ri0^|bG!S_
z_E<}!qVndOY4^!#;~?_yar{*@O%?DOj|F)xb}u^UkiAqo2pYfF^F~&bQ`YOy8z1Om
z;QVOR0-S{Kyw;_LOFS0jAbZ<9yl@`sUk|+VKz|Ut<*)%hRJ5C=lOB855FYtWo5#~~
zw+Hna7VDQ!9xKl=n@-2!tiw0KTK0Ph4{@deTzWa*@4kuE;^h_K_V+U58=??beZld;
z*u^{3wrgXs;e4?z8Ld=YzH!~Kxja~djOM%8j%rJ2&82zqI>2d2d~;NLcQzj9ljgO#
zlHCs3YWU#Ex6{dShdmp3m`_ehYYPRJ_f_aMlaulhjn4uVQt)`3KEQa9nk$4$l+6kxjff<>+NEww7hbYx5dBu71$U
z<24J%=iQ)Db-C_Ih|k--O+w>puL&D^QJdat^7IlLS3Ia0!r4Z?-pf|lsJ)DH^N>}y
zP)-dAo{J5Z{rK~;K}CW!*Z^;e@8pN5x>kS33rp`XI%6D^-T2m19lQo
z96OtEcX)wu$MXOj-)M)EVBU$AL|>=a5}64
zj;kKjlHhb0pqG4@i3^ma<*6w3{Mb004y(bhL4CYGy34-3`jC~h`lI4VEP9aHaV~6e
zEMGQCWi=f^@yx4f(S-(8)3}50rUzMZV!HwZy^?QJh#y)C;JfTAMw&<5Wn6)J;T6yJ
z1z%$UyPFkST)D=e?%1>U#vgIt?H#c%EjJcce$x_~!!u7ncK-|efUH2|Bi-X{QVg=`
ztx*Bz!=L#SZ^>$(!M?(NB{n!1-8ZV?JhPZS0sNhuebBOD1_?b6i77bCvYQ?9m!lhq
zbD?YyB5Mb*8wbr#YU?NaPNfLEMfT+(aAIJWjroiYIoI0MTSStLkiHy$abqu?-p8nb
zAV|CnEg{!Y^*=yAb+bUG?Dp|jR4R~Ub-tsTlm)({nxvJzBPMA{KSp1cUo7W^)$)A4
zy2EZ94KwV!P>}m*!x0BIkj`ph<2&7qbFq7Q-W+pGM93AP$g{4%LoSEw=;OT95puk5
zUbP&j>@fy*dXP=tel%ipBS(X=sc4r8HmG>D#Neg~i^5&`LSqt=+AStud>EjS<#rT`
z-}mhdyp&lU+!?-qDJ>n8?eNl-L9?uGc?0y6+u7HB@fM+iYdj&_45=twfOkjfczTp;
zN*UqI5U_mx%DS_drFn#I>?~Qd1%X-u7IKUd6g%)qH(si(qi#AzKOPIHlr_@`yO;J~-AuaiI9+K5?`cVqO2N5mc
z5bAvcA5K3?aP(r7OmIQyIR7WmZT#aZ_?FIH2(TpE*RcRjA}p(nb%=nM8Q&fBv)sl+
z2CY7O8t+xzWx#@^qG@}3dNS&jw=r2ME#8Y*)RO!;P2p|{BSd8yJ`eFf_EeDe-6Xph
zZHTq$fK9Qa1^c>f3%)^K_ApUn)Zy9-FGlu{zx|*U-wZR0kY`f9U`Hd)hlZB}9n8Zd
z$%hX!X_qK^IR>h+VHdki*a(RBTfYuMuzbOftg1r5Mv-44oo@m6$rh(&7vw^dq`!$lh%I%TD5xbR|vC97CH)TOLkJ=7(wU_Ee2D_Iu@7OvDLtY{7Nt=v=-{?XR*
zfzw-)34D-!)tF%_q6H>@{F;9*qCPS16f&t5@Xj|EoCnS
zwD3Jo4;;pyH&?CvYLDd3=3AVR+~)o+SlOWZ2(>9kYxmjoMnO?~j^eN2ig8crH%d&6
zT5gV#>%S@>)(})9_RM!}3i1yp5N%Kc*&&saQMj2T65iOUNkU^?dr{3&)i;r)Y@)9v
zIk1jjdyU>`10jVPi&t$ipJ9!+CRJ1C0t0JWwZNUh{v?G76<(r9l|^x2_+Z6+>&B-w
z>wEc$em+ABN3E*i_JqLpq&MJ9+QDF336Lrv4y+l0bR@O5w;h7XGwMW7@)p6YL3qS$0?hp*z4
zl!ve4q`ZeOI4SvIxHw^4au(Y5Pl{|RdtONdfVcfbNq3P0dtPA0toodb$;Tn0`W~ZM
ztzEvyv<~M+sU}rr(#uFFkJvyZb7*&EDP7rJSyCr_s+Fvw>HCUashl_~w$OJ!15_ChB0
zX7mJv2WrK!c=t<{iBa4)M?EomoV^LB=d9JBI*>4o)roDbJ^>-Smn9P!l={&VVg3RH
zQ43@E{JcuAC?25jUtJk!oZZvbDzmCwr)zS_c
z`qd5fea9U&gV{Tiu_4b_YxJtIO-$g4{)${fX#pQ(Zxgg(#JCP!4phT2$$X1*5Aw2S
z6figQ%x#gU?cO9~vyQ60=dihryvNes35?GKFt1C0F7Yb{mz;|xD7$U7r~1j`hQYaK
zRTSSM@M=c-eBC9aOe=o9h%9po8Q#;+LN^UCiD$p!1?8D*i0q`}xMZK?
zpu=W!)$LlM^EWlj``HNvB-*UkARnf~*@9)0dTMsJT~Cxxuf8E5%iroKhuld}H2J{uG@dQEqT1TqSiJ;3LdgP2m|GwBTseXTfNz$N?{+Rb1XBkgXyFSEw}a
ziGdtV%C|=Zl)hVH@~v7<0!54LG8a4bEM%vm^j!`3!s)v@{)N$ZI^c_;@9M}GM&H$e
zFOI&eBVQbSS4SqK@9xK6G&eAN?21kwqSXs<=jBwX)AEj5r&SBoDAD(cXkX5mkA8NL
z&W1(zfQ|Nbd&2>L!$m-^I+u!$_c40{BD)BfN<4ErURBkUbqsitaVYpD_U>`crWZ>*
tVOmXwtKuaF(>{p5pj*e;c$lYs(2fBu8qZT+=gZZU{Ag`8)z(iB`Tx|Sx~KpE
diff --git a/docs/api/google/generativeai/caching.md b/docs/api/google/generativeai/caching.md
deleted file mode 100644
index 784ab1b80..000000000
--- a/docs/api/google/generativeai/caching.md
+++ /dev/null
@@ -1,49 +0,0 @@
-
-# Module: google.generativeai.caching
-
-
-
-
-
-
-
-
-
-
-
-## Classes
-
-[`class CachedContent`](../../google/generativeai/caching/CachedContent.md): Cached content resource.
-
-## Functions
-
-[`get_default_cache_client(...)`](../../google/generativeai/caching/get_default_cache_client.md)
-
-
-
-
-
-
-Other Members |
-
-
-
-
-annotations
-
- |
-
-
-Instance of `__future__._Feature`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/caching/CachedContent.md b/docs/api/google/generativeai/caching/CachedContent.md
deleted file mode 100644
index a0efa473d..000000000
--- a/docs/api/google/generativeai/caching/CachedContent.md
+++ /dev/null
@@ -1,448 +0,0 @@
-
-# google.generativeai.caching.CachedContent
-
-
-
-
-
-
-
-Cached content resource.
-
-
-google.generativeai.caching.CachedContent(
- name
-)
-
-
-
-
-
-
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The resource name referring to the cached content.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`create_time`
-
- |
-
-
-
-
- |
-
-
-
-`display_name`
-
- |
-
-
-
-
- |
-
-
-
-`expire_time`
-
- |
-
-
-
-
- |
-
-
-
-`model`
-
- |
-
-
-
-
- |
-
-
-
-`name`
-
- |
-
-
-
-
- |
-
-
-
-`update_time`
-
- |
-
-
-
-
- |
-
-
-
-`usage_metadata`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-create
-
-View source
-
-
-@classmethod
-create(
- model: str,
- *,
- display_name: (str | None) = None,
- system_instruction: Optional[content_types.ContentType] = None,
- contents: Optional[content_types.ContentsType] = None,
- tools: Optional[content_types.FunctionLibraryType] = None,
- tool_config: Optional[content_types.ToolConfigType] = None,
- ttl: Optional[caching_types.TTLTypes] = None,
- expire_time: Optional[caching_types.ExpireTimeTypes] = None
-) -> CachedContent
-
-
-Creates `CachedContent` resource.
-
-
-
-
-
-Args |
-
-
-
-
-`model`
-
- |
-
-
-The name of the `model` to use for cached content creation.
-Any `CachedContent` resource can be only used with the
-`model` it was created for.
-
- |
-
-
-
-`display_name`
-
- |
-
-
-The user-generated meaningful display name
-of the cached content. `display_name` must be no
-more than 128 unicode characters.
-
- |
-
-
-
-`system_instruction`
-
- |
-
-
-Developer set system instruction.
-
- |
-
-
-
-`contents`
-
- |
-
-
-Contents to cache.
-
- |
-
-
-
-`tools`
-
- |
-
-
-A list of `Tools` the model may use to generate response.
-
- |
-
-
-
-`tool_config`
-
- |
-
-
-Config to apply to all tools.
-
- |
-
-
-
-`ttl`
-
- |
-
-
-TTL for cached resource (in seconds). Defaults to 1 hour.
-`ttl` and `expire_time` are exclusive arguments.
-
- |
-
-
-
-`expire_time`
-
- |
-
-
-Expiration time for cached resource.
-`ttl` and `expire_time` are exclusive arguments.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-`CachedContent` resource with specified name.
-
- |
-
-
-
-
-
-
-delete
-
-View source
-
-
-delete() -> None
-
-
-Deletes `CachedContent` resource.
-
-
-get
-
-View source
-
-
-@classmethod
-get(
- name: str
-) -> CachedContent
-
-
-Fetches required `CachedContent` resource.
-
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The resource name referring to the cached content.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-`CachedContent` resource with specified `name`.
-
- |
-
-
-
-
-
-
-list
-
-View source
-
-
-@classmethod
-list(
- page_size: Optional[int] = 1
-) -> Iterable[CachedContent]
-
-
-Lists `CachedContent` objects associated with the project.
-
-
-
-
-
-Args |
-
-
-
-
-`page_size`
-
- |
-
-
-The maximum number of permissions to return (per page).
-The service may return fewer `CachedContent` objects.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-A paginated list of `CachedContent` objects.
-
- |
-
-
-
-
-
-
-update
-
-View source
-
-
-update(
- *,
- ttl: Optional[caching_types.TTLTypes] = None,
- expire_time: Optional[caching_types.ExpireTimeTypes] = None
-) -> None
-
-
-Updates requested `CachedContent` resource.
-
-
-
-
-
-Args |
-
-
-
-
-`ttl`
-
- |
-
-
-TTL for cached resource (in seconds). Defaults to 1 hour.
-`ttl` and `expire_time` are exclusive arguments.
-
- |
-
-
-
-`expire_time`
-
- |
-
-
-Expiration time for cached resource.
-`ttl` and `expire_time` are exclusive arguments.
-
- |
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/caching/get_default_cache_client.md b/docs/api/google/generativeai/caching/get_default_cache_client.md
deleted file mode 100644
index 8457f5c14..000000000
--- a/docs/api/google/generativeai/caching/get_default_cache_client.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# google.generativeai.caching.get_default_cache_client
-
-
-
-
-
-
-
-
-
-
-
-google.generativeai.caching.get_default_cache_client() -> glm.CacheServiceClient
-
-
-
-
-
diff --git a/docs/api/google/generativeai/configure.md b/docs/api/google/generativeai/configure.md
deleted file mode 100644
index 81c9e19be..000000000
--- a/docs/api/google/generativeai/configure.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# google.generativeai.configure
-
-
-
-
-
-
-
-Captures default client configuration.
-
-
-
-google.generativeai.configure(
- *,
- api_key: (str | None) = None,
- credentials: (ga_credentials.Credentials | dict | None) = None,
- transport: (str | None) = None,
- client_options: (client_options_lib.ClientOptions | dict | None) = None,
- client_info: (gapic_v1.client_info.ClientInfo | None) = None,
- default_metadata: Sequence[tuple[str, str]] = ()
-)
-
-
-
-
-
-
-If no API key has been provided (either directly, or on `client_options`) and the
-`GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
-
-Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in
-`google.ai.generativelanguage` for details on the other arguments.
-
-
-
-
-Args |
-
-
-
-
-`transport`
-
- |
-
-
-A string, one of: [`rest`, `grpc`, `grpc_asyncio`].
-
- |
-
-
-
-`api_key`
-
- |
-
-
-The API-Key to use when creating the default clients (each service uses
-a separate client). This is a shortcut for `client_options={"api_key": api_key}`.
-If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be
-used.
-
- |
-
-
-
-`default_metadata`
-
- |
-
-
-Default (key, value) metadata pairs to send with every request.
-when using `transport="rest"` these are sent as HTTP headers.
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/create_tuned_model.md b/docs/api/google/generativeai/create_tuned_model.md
deleted file mode 100644
index 04ce93d75..000000000
--- a/docs/api/google/generativeai/create_tuned_model.md
+++ /dev/null
@@ -1,246 +0,0 @@
-
-# google.generativeai.create_tuned_model
-
-
-
-
-
-
-
-Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
-
-
-
-google.generativeai.create_tuned_model(
- source_model: model_types.AnyModelNameOptions,
- training_data: model_types.TuningDataOptions,
- *,
- id: (str | None) = None,
- display_name: (str | None) = None,
- description: (str | None) = None,
- temperature: (float | None) = None,
- top_p: (float | None) = None,
- top_k: (int | None) = None,
- epoch_count: (int | None) = None,
- batch_size: (int | None) = None,
- learning_rate: (float | None) = None,
- input_key: str = 'text_input',
- output_key: str = 'output',
- client: (glm.ModelServiceClient | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> operations.CreateTunedModelOperation
-
-
-
-
-
-
-Since tuning a model can take significant time, this API doesn't wait for the tuning to complete.
-Instead, it returns a `google.api_core.operation.Operation` object that lets you check on the
-status of the tuning job, or wait for it to complete, and check the result.
-
-After the job completes you can either find the resulting `TunedModel` object in
-`Operation.result()` or `palm.list_tuned_models` or `palm.get_tuned_model(model_id)`.
-
-```
-my_id = "my-tuned-model-id"
-operation = palm.create_tuned_model(
- id = my_id,
- source_model="models/text-bison-001",
- training_data=[{'text_input': 'example input', 'output': 'example output'},...]
-)
-tuned_model=operation.result() # Wait for tuning to finish
-
-palm.generate_text(f"tunedModels/{my_id}", prompt="...")
-```
-
-
-
-
-Args |
-
-
-
-
-`source_model`
-
- |
-
-
-The name of the model to tune.
-
- |
-
-
-
-`training_data`
-
- |
-
-
-The dataset to tune the model on. This must be either:
-* A protos.Dataset , or
-* An `Iterable` of:
- *protos.TuningExample ,
- * `{'text_input': text_input, 'output': output}` dicts
- * `(text_input, output)` tuples.
-* A `Mapping` of `Iterable[str]` - use `input_key` and `output_key` to choose which
- columns to use as the input/output
-* A csv file (will be read with `pd.read_csv` and handles as a `Mapping`
- above). This can be:
- * A local path as a `str` or `pathlib.Path`.
- * A url for a csv file.
- * The url of a Google Sheets file.
-* A JSON file - Its contents will be handled either as an `Iterable` or `Mapping`
- above. This can be:
- * A local path as a `str` or `pathlib.Path`.
-
- |
-
-
-
-`id`
-
- |
-
-
-The model identifier, used to refer to the model in the API
-`tunedModels/{id}`. Must be unique.
-
- |
-
-
-
-`display_name`
-
- |
-
-
-A human-readable name for display.
-
- |
-
-
-
-`description`
-
- |
-
-
-A description of the tuned model.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-The default temperature for the tuned model, see types.Model for details.
-
- |
-
-
-
-`top_p`
-
- |
-
-
-The default `top_p` for the model, see types.Model for details.
-
- |
-
-
-
-`top_k`
-
- |
-
-
-The default `top_k` for the model, see types.Model for details.
-
- |
-
-
-
-`epoch_count`
-
- |
-
-
-The number of tuning epochs to run. An epoch is a pass over the whole dataset.
-
- |
-
-
-
-`batch_size`
-
- |
-
-
-The number of examples to use in each training batch.
-
- |
-
-
-
-`learning_rate`
-
- |
-
-
-The step size multiplier for the gradient updates.
-
- |
-
-
-
-`client`
-
- |
-
-
-Which client to use.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-A [`google.api_core.operation.Operation`](https://googleapis.dev/python/google-api-core/latest/operation.html)
-
- |
-
-
-
-
diff --git a/docs/api/google/generativeai/delete_file.md b/docs/api/google/generativeai/delete_file.md
deleted file mode 100644
index 5a3bfc295..000000000
--- a/docs/api/google/generativeai/delete_file.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.delete_file
-
-
-
-
-
-
-
-Calls the API to permanently delete a specified file using a supported file service.
-
-
-
-google.generativeai.delete_file(
- name: (str | file_types.File | protos.File)
-)
-
-
-
-
-
diff --git a/docs/api/google/generativeai/delete_tuned_model.md b/docs/api/google/generativeai/delete_tuned_model.md
deleted file mode 100644
index 5b1396cea..000000000
--- a/docs/api/google/generativeai/delete_tuned_model.md
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# google.generativeai.delete_tuned_model
-
-
-
-
-
-
-
-Calls the API to delete a specified tuned model
-
-
-
-google.generativeai.delete_tuned_model(
- tuned_model: model_types.TunedModelNameOptions,
- client: (glm.ModelServiceClient | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> None
-
-
-
-
-
diff --git a/docs/api/google/generativeai/embed_content.md b/docs/api/google/generativeai/embed_content.md
deleted file mode 100644
index af0e3c99b..000000000
--- a/docs/api/google/generativeai/embed_content.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-# google.generativeai.embed_content
-
-
-
-
-
-
-
-Calls the API to create embeddings for content passed in.
-
-
-
-google.generativeai.embed_content(
- model: model_types.BaseModelNameOptions,
- content: (content_types.ContentType | Iterable[content_types.ContentType]),
- task_type: (EmbeddingTaskTypeOptions | None) = None,
- title: (str | None) = None,
- output_dimensionality: (int | None) = None,
- client: glm.GenerativeServiceClient = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> (text_types.EmbeddingDict | text_types.BatchEmbeddingDict)
-
-
-
-
-
-
-
-
-
-
-Args |
-
-
-
-
-`model`
-
- |
-
-
- Which [model](https://ai.google.dev/models/gemini#embedding) to
-call, as a string or a types.Model .
-
- |
-
-
-
-`content`
-
- |
-
-
- Content to embed.
-
- |
-
-
-
-`task_type`
-
- |
-
-
- Optional task type for which the embeddings will be used. Can only
-be set for `models/embedding-001`.
-
- |
-
-
-
-`title`
-
- |
-
-
- An optional title for the text. Only applicable when task_type is
-`RETRIEVAL_DOCUMENT`.
-
- |
-
-
-
-`output_dimensionality`
-
- |
-
-
- Optional reduced dimensionality for the output embeddings. If set,
-excessive values from the output embeddings will be truncated from
-the end.
-
- |
-
-
-
-`request_options`
-
- |
-
-
- Options for the request.
-
- |
-
-
-
-
-
-
-
-
-Return |
-
-
-
-Dictionary containing the embedding (list of float values) for the
-input content.
-
- |
-
-
-
-
diff --git a/docs/api/google/generativeai/embed_content_async.md b/docs/api/google/generativeai/embed_content_async.md
deleted file mode 100644
index 8c8ebc9c2..000000000
--- a/docs/api/google/generativeai/embed_content_async.md
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# google.generativeai.embed_content_async
-
-
-
-
-
-
-
-Calls the API to create async embeddings for content passed in.
-
-
-
-google.generativeai.embed_content_async(
- model,
- content,
- task_type=None,
- title=None,
- output_dimensionality=None,
- client=None,
- request_options=None
-)
-
-
-
-
-
diff --git a/docs/api/google/generativeai/get_base_model.md b/docs/api/google/generativeai/get_base_model.md
deleted file mode 100644
index 760acc975..000000000
--- a/docs/api/google/generativeai/get_base_model.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# google.generativeai.get_base_model
-
-
-
-
-
-
-
-Calls the API to fetch a base model by name.
-
-
-
-google.generativeai.get_base_model(
- name: model_types.BaseModelNameOptions,
- *,
- client=None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> model_types.Model
-
-
-
-
-
-
-```
-import pprint
-model = genai.get_base_model('models/chat-bison-001')
-pprint.pprint(model)
-```
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The name of the model to fetch. Should start with `models/`
-
- |
-
-
-
-`client`
-
- |
-
-
-The client to use.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/get_file.md b/docs/api/google/generativeai/get_file.md
deleted file mode 100644
index 18000cd7c..000000000
--- a/docs/api/google/generativeai/get_file.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.get_file
-
-
-
-
-
-
-
-Calls the API to retrieve a specified file using a supported file service.
-
-
-
-google.generativeai.get_file(
- name: str
-) -> file_types.File
-
-
-
-
-
diff --git a/docs/api/google/generativeai/get_model.md b/docs/api/google/generativeai/get_model.md
deleted file mode 100644
index cba445afc..000000000
--- a/docs/api/google/generativeai/get_model.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# google.generativeai.get_model
-
-
-
-
-
-
-
-Calls the API to fetch a model by name.
-
-
-
-google.generativeai.get_model(
- name: model_types.AnyModelNameOptions,
- *,
- client=None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> (model_types.Model | model_types.TunedModel)
-
-
-
-
-
-
-```
-import pprint
-model = genai.get_model('models/gemini-1.5-flash')
-pprint.pprint(model)
-```
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The name of the model to fetch. Should start with `models/`
-
- |
-
-
-
-`client`
-
- |
-
-
-The client to use.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/get_operation.md b/docs/api/google/generativeai/get_operation.md
deleted file mode 100644
index c4041621c..000000000
--- a/docs/api/google/generativeai/get_operation.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.get_operation
-
-
-
-
-
-
-
-Calls the API to get a specific operation
-
-
-
-google.generativeai.get_operation(
- name: str, *, client=None
-) -> CreateTunedModelOperation
-
-
-
-
-
diff --git a/docs/api/google/generativeai/get_tuned_model.md b/docs/api/google/generativeai/get_tuned_model.md
deleted file mode 100644
index 72b62dac8..000000000
--- a/docs/api/google/generativeai/get_tuned_model.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# google.generativeai.get_tuned_model
-
-
-
-
-
-
-
-Calls the API to fetch a tuned model by name.
-
-
-
-google.generativeai.get_tuned_model(
- name: model_types.TunedModelNameOptions,
- *,
- client=None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> model_types.TunedModel
-
-
-
-
-
-
-```
-import pprint
-model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001')
-pprint.pprint(model)
-```
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The name of the model to fetch. Should start with `tunedModels/`
-
- |
-
-
-
-`client`
-
- |
-
-
-The client to use.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/list_files.md b/docs/api/google/generativeai/list_files.md
deleted file mode 100644
index ceafc9677..000000000
--- a/docs/api/google/generativeai/list_files.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.list_files
-
-
-
-
-
-
-
-Calls the API to list files using a supported file service.
-
-
-
-google.generativeai.list_files(
- page_size=100
-) -> Iterable[file_types.File]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/list_models.md b/docs/api/google/generativeai/list_models.md
deleted file mode 100644
index befb027db..000000000
--- a/docs/api/google/generativeai/list_models.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# google.generativeai.list_models
-
-
-
-
-
-
-
-Calls the API to list all available models.
-
-
-
-google.generativeai.list_models(
- *,
- page_size: (int | None) = 50,
- client: (glm.ModelServiceClient | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> model_types.ModelsIterable
-
-
-
-
-
-
-```
-import pprint
-for model in genai.list_models():
- pprint.pprint(model)
-```
-
-
-
-
-Args |
-
-
-
-
-`page_size`
-
- |
-
-
-How many `types.Models` to fetch per page (api call).
-
- |
-
-
-
-`client`
-
- |
-
-
-You may pass a `glm.ModelServiceClient` instead of using the default client.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/list_operations.md b/docs/api/google/generativeai/list_operations.md
deleted file mode 100644
index d28a21022..000000000
--- a/docs/api/google/generativeai/list_operations.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.list_operations
-
-
-
-
-
-
-
-Calls the API to list all operations
-
-
-
-google.generativeai.list_operations(
- *, client=None
-) -> Iterator[CreateTunedModelOperation]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/list_tuned_models.md b/docs/api/google/generativeai/list_tuned_models.md
deleted file mode 100644
index 71558375b..000000000
--- a/docs/api/google/generativeai/list_tuned_models.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-# google.generativeai.list_tuned_models
-
-
-
-
-
-
-
-Calls the API to list all tuned models.
-
-
-
-google.generativeai.list_tuned_models(
- *,
- page_size: (int | None) = 50,
- client: (glm.ModelServiceClient | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> model_types.TunedModelsIterable
-
-
-
-
-
-
-```
-import pprint
-for model in genai.list_tuned_models():
- pprint.pprint(model)
-```
-
-
-
-
-Args |
-
-
-
-
-`page_size`
-
- |
-
-
-How many `types.Models` to fetch per page (api call).
-
- |
-
-
-
-`client`
-
- |
-
-
-You may pass a `glm.ModelServiceClient` instead of using the default client.
-
- |
-
-
-
-`request_options`
-
- |
-
-
-Options for the request.
-
- |
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos.md b/docs/api/google/generativeai/protos.md
deleted file mode 100644
index a01b25324..000000000
--- a/docs/api/google/generativeai/protos.md
+++ /dev/null
@@ -1,384 +0,0 @@
-
-# Module: google.generativeai.protos
-
-
-
-
-
-
-
-This module provides low level access to the ProtoBuffer "Message" classes used by the API.
-
-
-**For typical usage of this SDK you do not need to use any of these classes.**
-
-ProtoBufers are Google API's serilization format. They are strongly typed and efficient.
-
-The `genai` SDK tries to be permissive about what objects it will accept from a user, but in the end
-the SDK always converts input to an appropriate Proto Message object to send as the request. Each API request
-has a `*Request` and `*Response` Message defined here.
-
-If you have any uncertainty about what the API may accept or return, these classes provide the
-complete/unambiguous answer. They come from the `google-ai-generativelanguage` package which is
-generated from a snapshot of the API definition.
-
-```
->>> from google.generativeai import protos
->>> import inspect
->>> print(inspect.getsource(protos.Part))
-```
-
-Proto classes can have "oneof" fields. Use `in` to check which `oneof` field is set.
-
-```
->>> p = protos.Part(text='hello')
->>> 'text' in p
-True
->>> p.inline_data = {'mime_type':'image/png', 'data': b'PNG'}
->>> type(p.inline_data) is protos.Blob
-True
->>> 'inline_data' in p
-True
->>> 'text' in p
-False
-```
-
-Instances of all Message classes can be converted into JSON compatible dictionaries with the following construct
-(Bytes are base64 encoded):
-
-```
->>> p_dict = type(p).to_dict(p)
->>> p_dict
-{'inline_data': {'mime_type': 'image/png', 'data': 'UE5H'}}
-```
-
-A compatible dict can be converted to an instance of a Message class by passing it as the first argument to the
-constructor:
-
-```
->>> p = protos.Part(p_dict)
-inline_data {
- mime_type: "image/png"
- data: "PNG"
-}
-```
-
-Note when converting that `to_dict` accepts additional arguments:
-
-- `use_integers_for_enums:bool = True`, Set it to `False` to replace enum int values with their string
- names in the output
-- ` including_default_value_fields:bool = True`, Set it to `False` to reduce the verbosity of the output.
-
-Additional arguments are described in the docstring:
-
-```
->>> help(proto.Part.to_dict)
-```
-
-## Classes
-
-[`class AttributionSourceId`](../../google/generativeai/protos/AttributionSourceId.md): Identifier for the source contributing to this attribution.
-
-[`class BatchCreateChunksRequest`](../../google/generativeai/protos/BatchCreateChunksRequest.md): Request to batch create ``Chunk``\ s.
-
-[`class BatchCreateChunksResponse`](../../google/generativeai/protos/BatchCreateChunksResponse.md): Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s.
-
-[`class BatchDeleteChunksRequest`](../../google/generativeai/protos/BatchDeleteChunksRequest.md): Request to batch delete ``Chunk``\ s.
-
-[`class BatchEmbedContentsRequest`](../../google/generativeai/protos/BatchEmbedContentsRequest.md): Batch request to get embeddings from the model for a list of prompts.
-
-[`class BatchEmbedContentsResponse`](../../google/generativeai/protos/BatchEmbedContentsResponse.md): The response to a ``BatchEmbedContentsRequest``.
-
-[`class BatchEmbedTextRequest`](../../google/generativeai/protos/BatchEmbedTextRequest.md): Batch request to get a text embedding from the model.
-
-[`class BatchEmbedTextResponse`](../../google/generativeai/protos/BatchEmbedTextResponse.md): The response to a EmbedTextRequest.
-
-[`class BatchUpdateChunksRequest`](../../google/generativeai/protos/BatchUpdateChunksRequest.md): Request to batch update ``Chunk``\ s.
-
-[`class BatchUpdateChunksResponse`](../../google/generativeai/protos/BatchUpdateChunksResponse.md): Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s.
-
-[`class Blob`](../../google/generativeai/protos/Blob.md): Raw media bytes.
-
-[`class CachedContent`](../../google/generativeai/protos/CachedContent.md): Content that has been preprocessed and can be used in subsequent request to GenerativeService.
-
-[`class Candidate`](../../google/generativeai/protos/Candidate.md): A response candidate generated from the model.
-
-[`class Chunk`](../../google/generativeai/protos/Chunk.md): A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage.
-
-[`class ChunkData`](../../google/generativeai/protos/ChunkData.md): Extracted data that represents the ``Chunk`` content.
-
-[`class CitationMetadata`](../../google/generativeai/protos/CitationMetadata.md): A collection of source attributions for a piece of content.
-
-[`class CitationSource`](../../google/generativeai/protos/CitationSource.md): A citation to a source for a portion of a specific response.
-
-[`class CodeExecution`](../../google/generativeai/protos/CodeExecution.md): Tool that executes code generated by the model, and automatically returns the result to the model.
-
-[`class CodeExecutionResult`](../../google/generativeai/protos/CodeExecutionResult.md): Result of executing the ``ExecutableCode``.
-
-[`class Condition`](../../google/generativeai/protos/Condition.md): Filter condition applicable to a single key.
-
-[`class Content`](../../google/generativeai/protos/Content.md): The base structured datatype containing multi-part content of a message.
-
-[`class ContentEmbedding`](../../google/generativeai/protos/ContentEmbedding.md): A list of floats representing an embedding.
-
-[`class ContentFilter`](../../google/generativeai/protos/ContentFilter.md): Content filtering metadata associated with processing a single request.
-
-[`class Corpus`](../../google/generativeai/protos/Corpus.md): A ``Corpus`` is a collection of ``Document``\ s.
-
-[`class CountMessageTokensRequest`](../../google/generativeai/protos/CountMessageTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
-
-[`class CountMessageTokensResponse`](../../google/generativeai/protos/CountMessageTokensResponse.md): A response from ``CountMessageTokens``.
-
-[`class CountTextTokensRequest`](../../google/generativeai/protos/CountTextTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
-
-[`class CountTextTokensResponse`](../../google/generativeai/protos/CountTextTokensResponse.md): A response from ``CountTextTokens``.
-
-[`class CountTokensRequest`](../../google/generativeai/protos/CountTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
-
-[`class CountTokensResponse`](../../google/generativeai/protos/CountTokensResponse.md): A response from ``CountTokens``.
-
-[`class CreateCachedContentRequest`](../../google/generativeai/protos/CreateCachedContentRequest.md): Request to create CachedContent.
-
-[`class CreateChunkRequest`](../../google/generativeai/protos/CreateChunkRequest.md): Request to create a ``Chunk``.
-
-[`class CreateCorpusRequest`](../../google/generativeai/protos/CreateCorpusRequest.md): Request to create a ``Corpus``.
-
-[`class CreateDocumentRequest`](../../google/generativeai/protos/CreateDocumentRequest.md): Request to create a ``Document``.
-
-[`class CreateFileRequest`](../../google/generativeai/protos/CreateFileRequest.md): Request for ``CreateFile``.
-
-[`class CreateFileResponse`](../../google/generativeai/protos/CreateFileResponse.md): Response for ``CreateFile``.
-
-[`class CreatePermissionRequest`](../../google/generativeai/protos/CreatePermissionRequest.md): Request to create a ``Permission``.
-
-[`class CreateTunedModelMetadata`](../../google/generativeai/protos/CreateTunedModelMetadata.md): Metadata about the state and progress of creating a tuned model returned from the long-running operation
-
-[`class CreateTunedModelRequest`](../../google/generativeai/protos/CreateTunedModelRequest.md): Request to create a TunedModel.
-
-[`class CustomMetadata`](../../google/generativeai/protos/CustomMetadata.md): User provided metadata stored as key-value pairs.
-
-[`class Dataset`](../../google/generativeai/protos/Dataset.md): Dataset for training or validation.
-
-[`class DeleteCachedContentRequest`](../../google/generativeai/protos/DeleteCachedContentRequest.md): Request to delete CachedContent.
-
-[`class DeleteChunkRequest`](../../google/generativeai/protos/DeleteChunkRequest.md): Request to delete a ``Chunk``.
-
-[`class DeleteCorpusRequest`](../../google/generativeai/protos/DeleteCorpusRequest.md): Request to delete a ``Corpus``.
-
-[`class DeleteDocumentRequest`](../../google/generativeai/protos/DeleteDocumentRequest.md): Request to delete a ``Document``.
-
-[`class DeleteFileRequest`](../../google/generativeai/protos/DeleteFileRequest.md): Request for ``DeleteFile``.
-
-[`class DeletePermissionRequest`](../../google/generativeai/protos/DeletePermissionRequest.md): Request to delete the ``Permission``.
-
-[`class DeleteTunedModelRequest`](../../google/generativeai/protos/DeleteTunedModelRequest.md): Request to delete a TunedModel.
-
-[`class Document`](../../google/generativeai/protos/Document.md): A ``Document`` is a collection of ``Chunk``\ s.
-
-[`class DynamicRetrievalConfig`](../../google/generativeai/protos/DynamicRetrievalConfig.md): Describes the options to customize dynamic retrieval.
-
-[`class EmbedContentRequest`](../../google/generativeai/protos/EmbedContentRequest.md): Request containing the ``Content`` for the model to embed.
-
-[`class EmbedContentResponse`](../../google/generativeai/protos/EmbedContentResponse.md): The response to an ``EmbedContentRequest``.
-
-[`class EmbedTextRequest`](../../google/generativeai/protos/EmbedTextRequest.md): Request to get a text embedding from the model.
-
-[`class EmbedTextResponse`](../../google/generativeai/protos/EmbedTextResponse.md): The response to a EmbedTextRequest.
-
-[`class Embedding`](../../google/generativeai/protos/Embedding.md): A list of floats representing the embedding.
-
-[`class Example`](../../google/generativeai/protos/Example.md): An input/output example used to instruct the Model.
-
-[`class ExecutableCode`](../../google/generativeai/protos/ExecutableCode.md): Code generated by the model that is meant to be executed, and the result returned to the model.
-
-[`class File`](../../google/generativeai/protos/File.md): A file uploaded to the API.
-
-[`class FileData`](../../google/generativeai/protos/FileData.md): URI based data.
-
-[`class FunctionCall`](../../google/generativeai/protos/FunctionCall.md): A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
-
-[`class FunctionCallingConfig`](../../google/generativeai/protos/FunctionCallingConfig.md): Configuration for specifying function calling behavior.
-
-[`class FunctionDeclaration`](../../google/generativeai/protos/FunctionDeclaration.md): Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__.
-
-[`class FunctionResponse`](../../google/generativeai/protos/FunctionResponse.md): The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
-
-[`class GenerateAnswerRequest`](../../google/generativeai/protos/GenerateAnswerRequest.md): Request to generate a grounded answer from the ``Model``.
-
-[`class GenerateAnswerResponse`](../../google/generativeai/protos/GenerateAnswerResponse.md): Response from the model for a grounded answer.
-
-[`class GenerateContentRequest`](../../google/generativeai/protos/GenerateContentRequest.md): Request to generate a completion from the model.
-
-[`class GenerateContentResponse`](../../google/generativeai/protos/GenerateContentResponse.md): Response from the model supporting multiple candidate responses.
-
-[`class GenerateMessageRequest`](../../google/generativeai/protos/GenerateMessageRequest.md): Request to generate a message response from the model.
-
-[`class GenerateMessageResponse`](../../google/generativeai/protos/GenerateMessageResponse.md): The response from the model.
-
-[`class GenerateTextRequest`](../../google/generativeai/protos/GenerateTextRequest.md): Request to generate a text completion response from the model.
-
-[`class GenerateTextResponse`](../../google/generativeai/protos/GenerateTextResponse.md): The response from the model, including candidate completions.
-
-[`class GenerationConfig`](../../google/generativeai/protos/GenerationConfig.md): Configuration options for model generation and outputs.
-
-[`class GetCachedContentRequest`](../../google/generativeai/protos/GetCachedContentRequest.md): Request to read CachedContent.
-
-[`class GetChunkRequest`](../../google/generativeai/protos/GetChunkRequest.md): Request for getting information about a specific ``Chunk``.
-
-[`class GetCorpusRequest`](../../google/generativeai/protos/GetCorpusRequest.md): Request for getting information about a specific ``Corpus``.
-
-[`class GetDocumentRequest`](../../google/generativeai/protos/GetDocumentRequest.md): Request for getting information about a specific ``Document``.
-
-[`class GetFileRequest`](../../google/generativeai/protos/GetFileRequest.md): Request for ``GetFile``.
-
-[`class GetModelRequest`](../../google/generativeai/protos/GetModelRequest.md): Request for getting information about a specific Model.
-
-[`class GetPermissionRequest`](../../google/generativeai/protos/GetPermissionRequest.md): Request for getting information about a specific ``Permission``.
-
-[`class GetTunedModelRequest`](../../google/generativeai/protos/GetTunedModelRequest.md): Request for getting information about a specific Model.
-
-[`class GoogleSearchRetrieval`](../../google/generativeai/protos/GoogleSearchRetrieval.md): Tool to retrieve public web data for grounding, powered by Google.
-
-[`class GroundingAttribution`](../../google/generativeai/protos/GroundingAttribution.md): Attribution for a source that contributed to an answer.
-
-[`class GroundingChunk`](../../google/generativeai/protos/GroundingChunk.md): Grounding chunk.
-
-[`class GroundingMetadata`](../../google/generativeai/protos/GroundingMetadata.md): Metadata returned to client when grounding is enabled.
-
-[`class GroundingPassage`](../../google/generativeai/protos/GroundingPassage.md): Passage included inline with a grounding configuration.
-
-[`class GroundingPassages`](../../google/generativeai/protos/GroundingPassages.md): A repeated list of passages.
-
-[`class GroundingSupport`](../../google/generativeai/protos/GroundingSupport.md): Grounding support.
-
-[`class HarmCategory`](../../google/generativeai/protos/HarmCategory.md): The category of a rating.
-
-[`class Hyperparameters`](../../google/generativeai/protos/Hyperparameters.md): Hyperparameters controlling the tuning process.
-
-[`class ListCachedContentsRequest`](../../google/generativeai/protos/ListCachedContentsRequest.md): Request to list CachedContents.
-
-[`class ListCachedContentsResponse`](../../google/generativeai/protos/ListCachedContentsResponse.md): Response with CachedContents list.
-
-[`class ListChunksRequest`](../../google/generativeai/protos/ListChunksRequest.md): Request for listing ``Chunk``\ s.
-
-[`class ListChunksResponse`](../../google/generativeai/protos/ListChunksResponse.md): Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s.
-
-[`class ListCorporaRequest`](../../google/generativeai/protos/ListCorporaRequest.md): Request for listing ``Corpora``.
-
-[`class ListCorporaResponse`](../../google/generativeai/protos/ListCorporaResponse.md): Response from ``ListCorpora`` containing a paginated list of ``Corpora``.
-
-[`class ListDocumentsRequest`](../../google/generativeai/protos/ListDocumentsRequest.md): Request for listing ``Document``\ s.
-
-[`class ListDocumentsResponse`](../../google/generativeai/protos/ListDocumentsResponse.md): Response from ``ListDocuments`` containing a paginated list of ``Document``\ s.
-
-[`class ListFilesRequest`](../../google/generativeai/protos/ListFilesRequest.md): Request for ``ListFiles``.
-
-[`class ListFilesResponse`](../../google/generativeai/protos/ListFilesResponse.md): Response for ``ListFiles``.
-
-[`class ListModelsRequest`](../../google/generativeai/protos/ListModelsRequest.md): Request for listing all Models.
-
-[`class ListModelsResponse`](../../google/generativeai/protos/ListModelsResponse.md): Response from ``ListModel`` containing a paginated list of Models.
-
-[`class ListPermissionsRequest`](../../google/generativeai/protos/ListPermissionsRequest.md): Request for listing permissions.
-
-[`class ListPermissionsResponse`](../../google/generativeai/protos/ListPermissionsResponse.md): Response from ``ListPermissions`` containing a paginated list of permissions.
-
-[`class ListTunedModelsRequest`](../../google/generativeai/protos/ListTunedModelsRequest.md): Request for listing TunedModels.
-
-[`class ListTunedModelsResponse`](../../google/generativeai/protos/ListTunedModelsResponse.md): Response from ``ListTunedModels`` containing a paginated list of Models.
-
-[`class LogprobsResult`](../../google/generativeai/protos/LogprobsResult.md): Logprobs Result
-
-[`class Message`](../../google/generativeai/protos/Message.md): The base unit of structured text.
-
-[`class MessagePrompt`](../../google/generativeai/protos/MessagePrompt.md): All of the structured input text passed to the model as a prompt.
-
-[`class MetadataFilter`](../../google/generativeai/protos/MetadataFilter.md): User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values.
-
-[`class Model`](../../google/generativeai/protos/Model.md): Information about a Generative Language Model.
-
-[`class Part`](../../google/generativeai/protos/Part.md): A datatype containing media that is part of a multi-part ``Content`` message.
-
-[`class Permission`](../../google/generativeai/protos/Permission.md): Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
-
-[`class PredictRequest`](../../google/generativeai/protos/PredictRequest.md): Request message for [PredictionService.Predict][google.ai.generativelanguage.v1beta.PredictionService.Predict].
-
-[`class PredictResponse`](../../google/generativeai/protos/PredictResponse.md): Response message for [PredictionService.Predict].
-
-[`class QueryCorpusRequest`](../../google/generativeai/protos/QueryCorpusRequest.md): Request for querying a ``Corpus``.
-
-[`class QueryCorpusResponse`](../../google/generativeai/protos/QueryCorpusResponse.md): Response from ``QueryCorpus`` containing a list of relevant chunks.
-
-[`class QueryDocumentRequest`](../../google/generativeai/protos/QueryDocumentRequest.md): Request for querying a ``Document``.
-
-[`class QueryDocumentResponse`](../../google/generativeai/protos/QueryDocumentResponse.md): Response from ``QueryDocument`` containing a list of relevant chunks.
-
-[`class RelevantChunk`](../../google/generativeai/protos/RelevantChunk.md): The information for a chunk relevant to a query.
-
-[`class RetrievalMetadata`](../../google/generativeai/protos/RetrievalMetadata.md): Metadata related to retrieval in the grounding flow.
-
-[`class SafetyFeedback`](../../google/generativeai/protos/SafetyFeedback.md): Safety feedback for an entire request.
-
-[`class SafetyRating`](../../google/generativeai/protos/SafetyRating.md): Safety rating for a piece of content.
-
-[`class SafetySetting`](../../google/generativeai/protos/SafetySetting.md): Safety setting, affecting the safety-blocking behavior.
-
-[`class Schema`](../../google/generativeai/protos/Schema.md): The ``Schema`` object allows the definition of input and output data types.
-
-[`class SearchEntryPoint`](../../google/generativeai/protos/SearchEntryPoint.md): Google search entry point.
-
-[`class Segment`](../../google/generativeai/protos/Segment.md): Segment of the content.
-
-[`class SemanticRetrieverConfig`](../../google/generativeai/protos/SemanticRetrieverConfig.md): Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API.
-
-[`class StringList`](../../google/generativeai/protos/StringList.md): User provided string values assigned to a single metadata key.
-
-[`class TaskType`](../../google/generativeai/protos/TaskType.md): Type of task for which the embedding will be used.
-
-[`class TextCompletion`](../../google/generativeai/protos/TextCompletion.md): Output text returned from a model.
-
-[`class TextPrompt`](../../google/generativeai/protos/TextPrompt.md): Text given to the model as a prompt.
-
-[`class Tool`](../../google/generativeai/protos/Tool.md): Tool details that the model may use to generate response.
-
-[`class ToolConfig`](../../google/generativeai/protos/ToolConfig.md): The Tool configuration containing parameters for specifying ``Tool`` use in the request.
-
-[`class TransferOwnershipRequest`](../../google/generativeai/protos/TransferOwnershipRequest.md): Request to transfer the ownership of the tuned model.
-
-[`class TransferOwnershipResponse`](../../google/generativeai/protos/TransferOwnershipResponse.md): Response from ``TransferOwnership``.
-
-[`class TunedModel`](../../google/generativeai/protos/TunedModel.md): A fine-tuned model created using ModelService.CreateTunedModel.
-
-[`class TunedModelSource`](../../google/generativeai/protos/TunedModelSource.md): Tuned model as a source for training a new model.
-
-[`class TuningExample`](../../google/generativeai/protos/TuningExample.md): A single example for tuning.
-
-[`class TuningExamples`](../../google/generativeai/protos/TuningExamples.md): A set of tuning examples. Can be training or validation data.
-
-[`class TuningSnapshot`](../../google/generativeai/protos/TuningSnapshot.md): Record for a single tuning step.
-
-[`class TuningTask`](../../google/generativeai/protos/TuningTask.md): Tuning tasks that create tuned models.
-
-[`class Type`](../../google/generativeai/protos/Type.md): Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
-
-[`class UpdateCachedContentRequest`](../../google/generativeai/protos/UpdateCachedContentRequest.md): Request to update CachedContent.
-
-[`class UpdateChunkRequest`](../../google/generativeai/protos/UpdateChunkRequest.md): Request to update a ``Chunk``.
-
-[`class UpdateCorpusRequest`](../../google/generativeai/protos/UpdateCorpusRequest.md): Request to update a ``Corpus``.
-
-[`class UpdateDocumentRequest`](../../google/generativeai/protos/UpdateDocumentRequest.md): Request to update a ``Document``.
-
-[`class UpdatePermissionRequest`](../../google/generativeai/protos/UpdatePermissionRequest.md): Request to update the ``Permission``.
-
-[`class UpdateTunedModelRequest`](../../google/generativeai/protos/UpdateTunedModelRequest.md): Request to update a TunedModel.
-
-[`class VideoMetadata`](../../google/generativeai/protos/VideoMetadata.md): Metadata for a video ``File``.
-
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId.md b/docs/api/google/generativeai/protos/AttributionSourceId.md
deleted file mode 100644
index 83d292303..000000000
--- a/docs/api/google/generativeai/protos/AttributionSourceId.md
+++ /dev/null
@@ -1,73 +0,0 @@
-
-# google.generativeai.protos.AttributionSourceId
-
-
-
-
-
-
-
-Identifier for the source contributing to this attribution.
-
-
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`grounding_passage`
-
- |
-
-
-`google.ai.generativelanguage.AttributionSourceId.GroundingPassageId`
-
-Identifier for an inline passage.
-
-This field is a member of `oneof`_ ``source``.
-
- |
-
-
-
-`semantic_retriever_chunk`
-
- |
-
-
-`google.ai.generativelanguage.AttributionSourceId.SemanticRetrieverChunk`
-
-Identifier for a ``Chunk`` fetched via Semantic Retriever.
-
-This field is a member of `oneof`_ ``source``.
-
- |
-
-
-
-
-
-## Child Classes
-[`class GroundingPassageId`](../../../google/generativeai/protos/AttributionSourceId/GroundingPassageId.md)
-
-[`class SemanticRetrieverChunk`](../../../google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md)
-
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
deleted file mode 100644
index 99d5e65aa..000000000
--- a/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.AttributionSourceId.GroundingPassageId
-
-
-
-
-
-
-
-Identifier for a part within a ``GroundingPassage``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`passage_id`
-
- |
-
-
-`str`
-
-Output only. ID of the passage matching the
-``GenerateAnswerRequest``'s GroundingPassage.id .
-
- |
-
-
-
-`part_index`
-
- |
-
-
-`int`
-
-Output only. Index of the part within the
-``GenerateAnswerRequest``'s GroundingPassage.content .
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
deleted file mode 100644
index 699a07c0f..000000000
--- a/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk
-
-
-
-
-
-
-
-Identifier for a ``Chunk`` retrieved via Semantic Retriever specified in the ``GenerateAnswerRequest`` using ``SemanticRetrieverConfig``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`source`
-
- |
-
-
-`str`
-
-Output only. Name of the source matching the request's
-SemanticRetrieverConfig.source . Example: ``corpora/123``
-or ``corpora/123/documents/abc``
-
- |
-
-
-
-`chunk`
-
- |
-
-
-`str`
-
-Output only. Name of the ``Chunk`` containing the attributed
-text. Example: ``corpora/123/documents/abc/chunks/xyz``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
deleted file mode 100644
index e734ed5b7..000000000
--- a/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.BatchCreateChunksRequest
-
-
-
-
-
-
-
-Request to batch create ``Chunk``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Optional. The name of the ``Document`` where this batch of
-``Chunk``\ s will be created. The parent field in every
-``CreateChunkRequest`` must match this value. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`requests`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.CreateChunkRequest]`
-
-Required. The request messages specifying the ``Chunk``\ s
-to create. A maximum of 100 ``Chunk``\ s can be created in a
-batch.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
deleted file mode 100644
index 03eb60ada..000000000
--- a/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.BatchCreateChunksResponse
-
-
-
-
-
-
-
-Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`chunks`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Chunk]`
-
-``Chunk``\ s created.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
deleted file mode 100644
index 74b79a461..000000000
--- a/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# google.generativeai.protos.BatchDeleteChunksRequest
-
-
-
-
-
-
-
-Request to batch delete ``Chunk``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Optional. The name of the ``Document`` containing the
-``Chunk``\ s to delete. The parent field in every
-``DeleteChunkRequest`` must match this value. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`requests`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.DeleteChunkRequest]`
-
-Required. The request messages specifying the ``Chunk``\ s
-to delete.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
deleted file mode 100644
index 73b5ae36c..000000000
--- a/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# google.generativeai.protos.BatchEmbedContentsRequest
-
-
-
-
-
-
-
-Batch request to get embeddings from the model for a list of prompts.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The model's resource name. This serves as an ID
-for the Model to use.
-
-This name should match a model name returned by the
-``ListModels`` method.
-
-Format: ``models/{model}``
-
- |
-
-
-
-`requests`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.EmbedContentRequest]`
-
-Required. Embed requests for the batch. The model in each of
-these requests must match the model specified
-BatchEmbedContentsRequest.model .
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
deleted file mode 100644
index 27bb06245..000000000
--- a/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.BatchEmbedContentsResponse
-
-
-
-
-
-
-
-The response to a ``BatchEmbedContentsRequest``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`embeddings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.ContentEmbedding]`
-
-Output only. The embeddings for each request,
-in the same order as provided in the batch
-request.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
deleted file mode 100644
index d878bffc5..000000000
--- a/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-# google.generativeai.protos.BatchEmbedTextRequest
-
-
-
-
-
-
-
-Batch request to get a text embedding from the model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The name of the ``Model`` to use for generating
-the embedding. Examples: models/embedding-gecko-001
-
- |
-
-
-
-`texts`
-
- |
-
-
-`MutableSequence[str]`
-
-Optional. The free-form input texts that the
-model will turn into an embedding. The current
-limit is 100 texts, over which an error will be
-thrown.
-
- |
-
-
-
-`requests`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.EmbedTextRequest]`
-
-Optional. Embed requests for the batch. Only one of
-``texts`` or ``requests`` can be set.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
deleted file mode 100644
index 3a9359d1d..000000000
--- a/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.BatchEmbedTextResponse
-
-
-
-
-
-
-
-The response to a EmbedTextRequest.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`embeddings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Embedding]`
-
-Output only. The embeddings generated from
-the input text.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
deleted file mode 100644
index 3a85775b0..000000000
--- a/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.BatchUpdateChunksRequest
-
-
-
-
-
-
-
-Request to batch update ``Chunk``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Optional. The name of the ``Document`` containing the
-``Chunk``\ s to update. The parent field in every
-``UpdateChunkRequest`` must match this value. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`requests`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.UpdateChunkRequest]`
-
-Required. The request messages specifying the ``Chunk``\ s
-to update. A maximum of 100 ``Chunk``\ s can be updated in a
-batch.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
deleted file mode 100644
index bc797699c..000000000
--- a/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.BatchUpdateChunksResponse
-
-
-
-
-
-
-
-Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`chunks`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Chunk]`
-
-``Chunk``\ s updated.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Blob.md b/docs/api/google/generativeai/protos/Blob.md
deleted file mode 100644
index f2b36ca9c..000000000
--- a/docs/api/google/generativeai/protos/Blob.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# google.generativeai.protos.Blob
-
-
-
-
-
-
-
-Raw media bytes.
-
-
-
-Text should not be sent as raw bytes, use the 'text' field.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`mime_type`
-
- |
-
-
-`str`
-
-The IANA standard MIME type of the source data. Examples:
-
-- image/png
-- image/jpeg If an unsupported MIME type is provided, an
- error will be returned. For a complete list of supported
- types, see `Supported file
- formats `__.
-
- |
-
-
-
-`data`
-
- |
-
-
-`bytes`
-
-Raw bytes for media formats.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CachedContent.md b/docs/api/google/generativeai/protos/CachedContent.md
deleted file mode 100644
index 063c8fa43..000000000
--- a/docs/api/google/generativeai/protos/CachedContent.md
+++ /dev/null
@@ -1,222 +0,0 @@
-
-# google.generativeai.protos.CachedContent
-
-
-
-
-
-
-
-Content that has been preprocessed and can be used in subsequent request to GenerativeService.
-
-
-
-Cached content can be only used with model it was created for.
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`expire_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Timestamp in UTC of when this resource is considered
-expired. This is *always* provided on output, regardless of
-what was sent on input.
-
-This field is a member of `oneof`_ ``expiration``.
-
- |
-
-
-
-`ttl`
-
- |
-
-
-`google.protobuf.duration_pb2.Duration`
-
-Input only. New TTL for this resource, input
-only.
-
-This field is a member of `oneof`_ ``expiration``.
-
- |
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Optional. Identifier. The resource name referring to the
-cached content. Format: ``cachedContents/{id}``
-
-
- |
-
-
-
-`display_name`
-
- |
-
-
-`str`
-
-Optional. Immutable. The user-generated
-meaningful display name of the cached content.
-Maximum 128 Unicode characters.
-
-
- |
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. Immutable. The name of the ``Model`` to use for
-cached content Format: ``models/{model}``
-
-
- |
-
-
-
-`system_instruction`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Optional. Input only. Immutable. Developer
-set system instruction. Currently text only.
-
-
- |
-
-
-
-`contents`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Content]`
-
-Optional. Input only. Immutable. The content
-to cache.
-
- |
-
-
-
-`tools`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Tool]`
-
-Optional. Input only. Immutable. A list of ``Tools`` the
-model may use to generate the next response
-
- |
-
-
-
-`tool_config`
-
- |
-
-
-`google.ai.generativelanguage.ToolConfig`
-
-Optional. Input only. Immutable. Tool config.
-This config is shared for all tools.
-
-
- |
-
-
-
-`create_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. Creation time of the cache
-entry.
-
- |
-
-
-
-`update_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. When the cache entry was last
-updated in UTC time.
-
- |
-
-
-
-`usage_metadata`
-
- |
-
-
-`google.ai.generativelanguage.CachedContent.UsageMetadata`
-
-Output only. Metadata on the usage of the
-cached content.
-
- |
-
-
-
-
-
-## Child Classes
-[`class UsageMetadata`](../../../google/generativeai/protos/CachedContent/UsageMetadata.md)
-
diff --git a/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
deleted file mode 100644
index bfb519b55..000000000
--- a/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.CachedContent.UsageMetadata
-
-
-
-
-
-
-
-Metadata on the usage of the cached content.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`total_token_count`
-
- |
-
-
-`int`
-
-Total number of tokens that the cached
-content consumes.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Candidate.md b/docs/api/google/generativeai/protos/Candidate.md
deleted file mode 100644
index c2db87a92..000000000
--- a/docs/api/google/generativeai/protos/Candidate.md
+++ /dev/null
@@ -1,186 +0,0 @@
-
-# google.generativeai.protos.Candidate
-
-
-
-
-
-
-
-A response candidate generated from the model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`index`
-
- |
-
-
-`int`
-
-Output only. Index of the candidate in the
-list of response candidates.
-
-
- |
-
-
-
-`content`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Output only. Generated content returned from
-the model.
-
- |
-
-
-
-`finish_reason`
-
- |
-
-
-`google.ai.generativelanguage.Candidate.FinishReason`
-
-Optional. Output only. The reason why the
-model stopped generating tokens.
-If empty, the model has not stopped generating
-tokens.
-
- |
-
-
-
-`safety_ratings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetyRating]`
-
-List of ratings for the safety of a response
-candidate.
-There is at most one rating per category.
-
- |
-
-
-
-`citation_metadata`
-
- |
-
-
-`google.ai.generativelanguage.CitationMetadata`
-
-Output only. Citation information for model-generated
-candidate.
-
-This field may be populated with recitation information for
-any text included in the ``content``. These are passages
-that are "recited" from copyrighted material in the
-foundational LLM's training data.
-
- |
-
-
-
-`token_count`
-
- |
-
-
-`int`
-
-Output only. Token count for this candidate.
-
- |
-
-
-
-`grounding_attributions`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.GroundingAttribution]`
-
-Output only. Attribution information for sources that
-contributed to a grounded answer.
-
-This field is populated for ``GenerateAnswer`` calls.
-
- |
-
-
-
-`grounding_metadata`
-
- |
-
-
-`google.ai.generativelanguage.GroundingMetadata`
-
-Output only. Grounding metadata for the candidate.
-
-This field is populated for ``GenerateContent`` calls.
-
- |
-
-
-
-`avg_logprobs`
-
- |
-
-
-`float`
-
-Output only.
-
- |
-
-
-
-`logprobs_result`
-
- |
-
-
-`google.ai.generativelanguage.LogprobsResult`
-
-Output only. Log-likelihood scores for the
-response tokens and top tokens
-
- |
-
-
-
-
-
-## Child Classes
-[`class FinishReason`](../../../google/generativeai/protos/Candidate/FinishReason.md)
-
diff --git a/docs/api/google/generativeai/protos/Candidate/FinishReason.md b/docs/api/google/generativeai/protos/Candidate/FinishReason.md
deleted file mode 100644
index 9067d6ff4..000000000
--- a/docs/api/google/generativeai/protos/Candidate/FinishReason.md
+++ /dev/null
@@ -1,876 +0,0 @@
-
-# google.generativeai.protos.Candidate.FinishReason
-
-
-
-
-
-
-
-Defines the reason why the model stopped generating tokens.
-
-
-google.generativeai.protos.Candidate.FinishReason(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`FINISH_REASON_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Default value. This value is unused.
-
- |
-
-
-
-`STOP`
-
- |
-
-
-`1`
-
-Natural stop point of the model or provided
-stop sequence.
-
- |
-
-
-
-`MAX_TOKENS`
-
- |
-
-
-`2`
-
-The maximum number of tokens as specified in
-the request was reached.
-
- |
-
-
-
-`SAFETY`
-
- |
-
-
-`3`
-
-The response candidate content was flagged
-for safety reasons.
-
- |
-
-
-
-`RECITATION`
-
- |
-
-
-`4`
-
-The response candidate content was flagged
-for recitation reasons.
-
- |
-
-
-
-`LANGUAGE`
-
- |
-
-
-`6`
-
-The response candidate content was flagged
-for using an unsupported language.
-
- |
-
-
-
-`OTHER`
-
- |
-
-
-`5`
-
-Unknown reason.
-
- |
-
-
-
-`BLOCKLIST`
-
- |
-
-
-`7`
-
-Token generation stopped because the content
-contains forbidden terms.
-
- |
-
-
-
-`PROHIBITED_CONTENT`
-
- |
-
-
-`8`
-
-Token generation stopped for potentially
-containing prohibited content.
-
- |
-
-
-
-`SPII`
-
- |
-
-
-`9`
-
-Token generation stopped because the content
-potentially contains Sensitive Personally
-Identifiable Information (SPII).
-
- |
-
-
-
-`MALFORMED_FUNCTION_CALL`
-
- |
-
-
-`10`
-
-The function call generated by the model is
-invalid.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-BLOCKLIST
-
- |
-
-
-``
-
- |
-
-
-
-FINISH_REASON_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-LANGUAGE
-
- |
-
-
-``
-
- |
-
-
-
-MALFORMED_FUNCTION_CALL
-
- |
-
-
-``
-
- |
-
-
-
-MAX_TOKENS
-
- |
-
-
-``
-
- |
-
-
-
-OTHER
-
- |
-
-
-``
-
- |
-
-
-
-PROHIBITED_CONTENT
-
- |
-
-
-``
-
- |
-
-
-
-RECITATION
-
- |
-
-
-``
-
- |
-
-
-
-SAFETY
-
- |
-
-
-``
-
- |
-
-
-
-SPII
-
- |
-
-
-``
-
- |
-
-
-
-STOP
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/Chunk.md b/docs/api/google/generativeai/protos/Chunk.md
deleted file mode 100644
index b1fbdb9e6..000000000
--- a/docs/api/google/generativeai/protos/Chunk.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-# google.generativeai.protos.Chunk
-
-
-
-
-
-
-
-A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage.
-
-
- A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Immutable. Identifier. The ``Chunk`` resource name. The ID
-(name excluding the `corpora/*/documents/*/chunks/` prefix)
-can contain up to 40 characters that are lowercase
-alphanumeric or dashes (-). The ID cannot start or end with
-a dash. If the name is empty on create, a random
-12-character unique ID will be generated. Example:
-``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c``
-
- |
-
-
-
-`data`
-
- |
-
-
-`google.ai.generativelanguage.ChunkData`
-
-Required. The content for the ``Chunk``, such as the text
-string. The maximum number of tokens per chunk is 2043.
-
- |
-
-
-
-`custom_metadata`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.CustomMetadata]`
-
-Optional. User provided custom metadata stored as key-value
-pairs. The maximum number of ``CustomMetadata`` per chunk is
-20.
-
- |
-
-
-
-`create_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The Timestamp of when the ``Chunk`` was
-created.
-
- |
-
-
-
-`update_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The Timestamp of when the ``Chunk`` was last
-updated.
-
- |
-
-
-
-`state`
-
- |
-
-
-`google.ai.generativelanguage.Chunk.State`
-
-Output only. Current state of the ``Chunk``.
-
- |
-
-
-
-
-
-## Child Classes
-[`class State`](../../../google/generativeai/protos/Chunk/State.md)
-
diff --git a/docs/api/google/generativeai/protos/Chunk/State.md b/docs/api/google/generativeai/protos/Chunk/State.md
deleted file mode 100644
index c825186bf..000000000
--- a/docs/api/google/generativeai/protos/Chunk/State.md
+++ /dev/null
@@ -1,699 +0,0 @@
-
-# google.generativeai.protos.Chunk.State
-
-
-
-
-
-
-
-States for the lifecycle of a ``Chunk``.
-
-
-google.generativeai.protos.Chunk.State(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`STATE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-The default value. This value is used if the
-state is omitted.
-
- |
-
-
-
-`STATE_PENDING_PROCESSING`
-
- |
-
-
-`1`
-
-``Chunk`` is being processed (embedding and vector storage).
-
- |
-
-
-
-`STATE_ACTIVE`
-
- |
-
-
-`2`
-
-``Chunk`` is processed and available for querying.
-
- |
-
-
-
-`STATE_FAILED`
-
- |
-
-
-`10`
-
-``Chunk`` failed processing.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-STATE_ACTIVE
-
- |
-
-
-``
-
- |
-
-
-
-STATE_FAILED
-
- |
-
-
-``
-
- |
-
-
-
-STATE_PENDING_PROCESSING
-
- |
-
-
-``
-
- |
-
-
-
-STATE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/ChunkData.md b/docs/api/google/generativeai/protos/ChunkData.md
deleted file mode 100644
index d07af3291..000000000
--- a/docs/api/google/generativeai/protos/ChunkData.md
+++ /dev/null
@@ -1,49 +0,0 @@
-
-# google.generativeai.protos.ChunkData
-
-
-
-
-
-
-
-Extracted data that represents the ``Chunk`` content.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`string_value`
-
- |
-
-
-`str`
-
-The ``Chunk`` content as a string. The maximum number of
-tokens per chunk is 2043.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CitationMetadata.md b/docs/api/google/generativeai/protos/CitationMetadata.md
deleted file mode 100644
index ddf7c3a25..000000000
--- a/docs/api/google/generativeai/protos/CitationMetadata.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.CitationMetadata
-
-
-
-
-
-
-
-A collection of source attributions for a piece of content.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`citation_sources`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.CitationSource]`
-
-Citations to sources for a specific response.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CitationSource.md b/docs/api/google/generativeai/protos/CitationSource.md
deleted file mode 100644
index 4eb956912..000000000
--- a/docs/api/google/generativeai/protos/CitationSource.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-# google.generativeai.protos.CitationSource
-
-
-
-
-
-
-
-A citation to a source for a portion of a specific response.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`start_index`
-
- |
-
-
-`int`
-
-Optional. Start of segment of the response
-that is attributed to this source.
-
-Index indicates the start of the segment,
-measured in bytes.
-
-
- |
-
-
-
-`end_index`
-
- |
-
-
-`int`
-
-Optional. End of the attributed segment,
-exclusive.
-
-
- |
-
-
-
-`uri`
-
- |
-
-
-`str`
-
-Optional. URI that is attributed as a source
-for a portion of the text.
-
-
- |
-
-
-
-`license_`
-
- |
-
-
-`str`
-
-Optional. License for the GitHub project that
-is attributed as a source for segment.
-
-License info is required for code citations.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CodeExecution.md b/docs/api/google/generativeai/protos/CodeExecution.md
deleted file mode 100644
index d8a467e21..000000000
--- a/docs/api/google/generativeai/protos/CodeExecution.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# google.generativeai.protos.CodeExecution
-
-
-
-
-
-
-
-Tool that executes code generated by the model, and automatically returns the result to the model.
-
-
-
-See also ``ExecutableCode`` and ``CodeExecutionResult`` which are
-only generated when using this tool.
-
diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult.md b/docs/api/google/generativeai/protos/CodeExecutionResult.md
deleted file mode 100644
index a02521651..000000000
--- a/docs/api/google/generativeai/protos/CodeExecutionResult.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# google.generativeai.protos.CodeExecutionResult
-
-
-
-
-
-
-
-Result of executing the ``ExecutableCode``.
-
-
-
-Only generated when using the ``CodeExecution``, and always follows
-a ``part`` containing the ``ExecutableCode``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`outcome`
-
- |
-
-
-`google.ai.generativelanguage.CodeExecutionResult.Outcome`
-
-Required. Outcome of the code execution.
-
- |
-
-
-
-`output`
-
- |
-
-
-`str`
-
-Optional. Contains stdout when code execution
-is successful, stderr or other description
-otherwise.
-
- |
-
-
-
-
-
-## Child Classes
-[`class Outcome`](../../../google/generativeai/protos/CodeExecutionResult/Outcome.md)
-
diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
deleted file mode 100644
index 81894cf17..000000000
--- a/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
+++ /dev/null
@@ -1,702 +0,0 @@
-
-# google.generativeai.protos.CodeExecutionResult.Outcome
-
-
-
-
-
-
-
-Enumeration of possible outcomes of the code execution.
-
-
-google.generativeai.protos.CodeExecutionResult.Outcome(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`OUTCOME_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Unspecified status. This value should not be
-used.
-
- |
-
-
-
-`OUTCOME_OK`
-
- |
-
-
-`1`
-
-Code execution completed successfully.
-
- |
-
-
-
-`OUTCOME_FAILED`
-
- |
-
-
-`2`
-
-Code execution finished but with a failure. ``stderr``
-should contain the reason.
-
- |
-
-
-
-`OUTCOME_DEADLINE_EXCEEDED`
-
- |
-
-
-`3`
-
-Code execution ran for too long, and was
-cancelled. There may or may not be a partial
-output present.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-OUTCOME_DEADLINE_EXCEEDED
-
- |
-
-
-``
-
- |
-
-
-
-OUTCOME_FAILED
-
- |
-
-
-``
-
- |
-
-
-
-OUTCOME_OK
-
- |
-
-
-``
-
- |
-
-
-
-OUTCOME_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/Condition.md b/docs/api/google/generativeai/protos/Condition.md
deleted file mode 100644
index 64387da7a..000000000
--- a/docs/api/google/generativeai/protos/Condition.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-# google.generativeai.protos.Condition
-
-
-
-
-
-
-
-Filter condition applicable to a single key.
-
-
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`string_value`
-
- |
-
-
-`str`
-
-The string value to filter the metadata on.
-
-This field is a member of `oneof`_ ``value``.
-
- |
-
-
-
-`numeric_value`
-
- |
-
-
-`float`
-
-The numeric value to filter the metadata on.
-
-This field is a member of `oneof`_ ``value``.
-
- |
-
-
-
-`operation`
-
- |
-
-
-`google.ai.generativelanguage.Condition.Operator`
-
-Required. Operator applied to the given
-key-value pair to trigger the condition.
-
- |
-
-
-
-
-
-## Child Classes
-[`class Operator`](../../../google/generativeai/protos/Condition/Operator.md)
-
diff --git a/docs/api/google/generativeai/protos/Condition/Operator.md b/docs/api/google/generativeai/protos/Condition/Operator.md
deleted file mode 100644
index d77479498..000000000
--- a/docs/api/google/generativeai/protos/Condition/Operator.md
+++ /dev/null
@@ -1,820 +0,0 @@
-
-# google.generativeai.protos.Condition.Operator
-
-
-
-
-
-
-
-Defines the valid operators that can be applied to a key-value pair.
-
-
-google.generativeai.protos.Condition.Operator(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`OPERATOR_UNSPECIFIED`
-
- |
-
-
-`0`
-
-The default value. This value is unused.
-
- |
-
-
-
-`LESS`
-
- |
-
-
-`1`
-
-Supported by numeric.
-
- |
-
-
-
-`LESS_EQUAL`
-
- |
-
-
-`2`
-
-Supported by numeric.
-
- |
-
-
-
-`EQUAL`
-
- |
-
-
-`3`
-
-Supported by numeric & string.
-
- |
-
-
-
-`GREATER_EQUAL`
-
- |
-
-
-`4`
-
-Supported by numeric.
-
- |
-
-
-
-`GREATER`
-
- |
-
-
-`5`
-
-Supported by numeric.
-
- |
-
-
-
-`NOT_EQUAL`
-
- |
-
-
-`6`
-
-Supported by numeric & string.
-
- |
-
-
-
-`INCLUDES`
-
- |
-
-
-`7`
-
-Supported by string only when ``CustomMetadata`` value type
-for the given key has a ``string_list_value``.
-
- |
-
-
-
-`EXCLUDES`
-
- |
-
-
-`8`
-
-Supported by string only when ``CustomMetadata`` value type
-for the given key has a ``string_list_value``.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-EQUAL
-
- |
-
-
-``
-
- |
-
-
-
-EXCLUDES
-
- |
-
-
-``
-
- |
-
-
-
-GREATER
-
- |
-
-
-``
-
- |
-
-
-
-GREATER_EQUAL
-
- |
-
-
-``
-
- |
-
-
-
-INCLUDES
-
- |
-
-
-``
-
- |
-
-
-
-LESS
-
- |
-
-
-``
-
- |
-
-
-
-LESS_EQUAL
-
- |
-
-
-``
-
- |
-
-
-
-NOT_EQUAL
-
- |
-
-
-``
-
- |
-
-
-
-OPERATOR_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/Content.md b/docs/api/google/generativeai/protos/Content.md
deleted file mode 100644
index f4ae11276..000000000
--- a/docs/api/google/generativeai/protos/Content.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# google.generativeai.protos.Content
-
-
-
-
-
-
-
-The base structured datatype containing multi-part content of a message.
-
-
-
-A ``Content`` includes a ``role`` field designating the producer of
-the ``Content`` and a ``parts`` field containing multi-part data
-that contains the content of the message turn.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parts`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Part]`
-
-Ordered ``Parts`` that constitute a single message. Parts
-may have different MIME types.
-
- |
-
-
-
-`role`
-
- |
-
-
-`str`
-
-Optional. The producer of the content. Must
-be either 'user' or 'model'.
-Useful to set for multi-turn conversations,
-otherwise can be left blank or unset.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ContentEmbedding.md b/docs/api/google/generativeai/protos/ContentEmbedding.md
deleted file mode 100644
index b243b5b92..000000000
--- a/docs/api/google/generativeai/protos/ContentEmbedding.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.ContentEmbedding
-
-
-
-
-
-
-
-A list of floats representing an embedding.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`values`
-
- |
-
-
-`MutableSequence[float]`
-
-The embedding values.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ContentFilter.md b/docs/api/google/generativeai/protos/ContentFilter.md
deleted file mode 100644
index a324b43cf..000000000
--- a/docs/api/google/generativeai/protos/ContentFilter.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# google.generativeai.protos.ContentFilter
-
-
-
-
-
-
-
-Content filtering metadata associated with processing a single request.
-
-
-ContentFilter contains a reason and an optional supporting
-string. The reason may be unspecified.
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`reason`
-
- |
-
-
-`google.ai.generativelanguage.ContentFilter.BlockedReason`
-
-The reason content was blocked during request
-processing.
-
- |
-
-
-
-`message`
-
- |
-
-
-`str`
-
-A string that describes the filtering
-behavior in more detail.
-
-
- |
-
-
-
-
-
-## Child Classes
-[`class BlockedReason`](../../../google/generativeai/types/BlockedReason.md)
-
diff --git a/docs/api/google/generativeai/protos/Corpus.md b/docs/api/google/generativeai/protos/Corpus.md
deleted file mode 100644
index 0022cce5c..000000000
--- a/docs/api/google/generativeai/protos/Corpus.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-# google.generativeai.protos.Corpus
-
-
-
-
-
-
-
-A ``Corpus`` is a collection of ``Document``\ s.
-
-
- A project can
-create up to 5 corpora.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Immutable. Identifier. The ``Corpus`` resource name. The ID
-(name excluding the "corpora/" prefix) can contain up to 40
-characters that are lowercase alphanumeric or dashes (-).
-The ID cannot start or end with a dash. If the name is empty
-on create, a unique name will be derived from
-``display_name`` along with a 12 character random suffix.
-Example: ``corpora/my-awesome-corpora-123a456b789c``
-
- |
-
-
-
-`display_name`
-
- |
-
-
-`str`
-
-Optional. The human-readable display name for the
-``Corpus``. The display name must be no more than 512
-characters in length, including spaces. Example: "Docs on
-Semantic Retriever".
-
- |
-
-
-
-`create_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The Timestamp of when the ``Corpus`` was
-created.
-
- |
-
-
-
-`update_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The Timestamp of when the ``Corpus`` was last
-updated.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CountMessageTokensRequest.md b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md
deleted file mode 100644
index 548a8b60f..000000000
--- a/docs/api/google/generativeai/protos/CountMessageTokensRequest.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# google.generativeai.protos.CountMessageTokensRequest
-
-
-
-
-
-
-
-Counts the number of tokens in the ``prompt`` sent to a model.
-
-
-
-Models may tokenize text differently, so each model may return a
-different ``token_count``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The model's resource name. This serves as an ID
-for the Model to use.
-
-This name should match a model name returned by the
-``ListModels`` method.
-
-Format: ``models/{model}``
-
- |
-
-
-
-`prompt`
-
- |
-
-
-`google.ai.generativelanguage.MessagePrompt`
-
-Required. The prompt, whose token count is to
-be returned.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CountMessageTokensResponse.md b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md
deleted file mode 100644
index d7a45c871..000000000
--- a/docs/api/google/generativeai/protos/CountMessageTokensResponse.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-# google.generativeai.protos.CountMessageTokensResponse
-
-
-
-
-
-
-
-A response from ``CountMessageTokens``.
-
-
-
-It returns the model's ``token_count`` for the ``prompt``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`token_count`
-
- |
-
-
-`int`
-
-The number of tokens that the ``model`` tokenizes the
-``prompt`` into.
-
-Always non-negative.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CountTextTokensRequest.md b/docs/api/google/generativeai/protos/CountTextTokensRequest.md
deleted file mode 100644
index f208e45c5..000000000
--- a/docs/api/google/generativeai/protos/CountTextTokensRequest.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# google.generativeai.protos.CountTextTokensRequest
-
-
-
-
-
-
-
-Counts the number of tokens in the ``prompt`` sent to a model.
-
-
-
-Models may tokenize text differently, so each model may return a
-different ``token_count``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The model's resource name. This serves as an ID
-for the Model to use.
-
-This name should match a model name returned by the
-``ListModels`` method.
-
-Format: ``models/{model}``
-
- |
-
-
-
-`prompt`
-
- |
-
-
-`google.ai.generativelanguage.TextPrompt`
-
-Required. The free-form input text given to
-the model as a prompt.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CountTextTokensResponse.md b/docs/api/google/generativeai/protos/CountTextTokensResponse.md
deleted file mode 100644
index a3f36b0e5..000000000
--- a/docs/api/google/generativeai/protos/CountTextTokensResponse.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-# google.generativeai.protos.CountTextTokensResponse
-
-
-
-
-
-
-
-A response from ``CountTextTokens``.
-
-
-
-It returns the model's ``token_count`` for the ``prompt``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`token_count`
-
- |
-
-
-`int`
-
-The number of tokens that the ``model`` tokenizes the
-``prompt`` into.
-
-Always non-negative.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CountTokensRequest.md b/docs/api/google/generativeai/protos/CountTokensRequest.md
deleted file mode 100644
index df49484e4..000000000
--- a/docs/api/google/generativeai/protos/CountTokensRequest.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-# google.generativeai.protos.CountTokensRequest
-
-
-
-
-
-
-
-Counts the number of tokens in the ``prompt`` sent to a model.
-
-
-
-Models may tokenize text differently, so each model may return a
-different ``token_count``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The model's resource name. This serves as an ID
-for the Model to use.
-
-This name should match a model name returned by the
-``ListModels`` method.
-
-Format: ``models/{model}``
-
- |
-
-
-
-`contents`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Content]`
-
-Optional. The input given to the model as a prompt. This
-field is ignored when ``generate_content_request`` is set.
-
- |
-
-
-
-`generate_content_request`
-
- |
-
-
-`google.ai.generativelanguage.GenerateContentRequest`
-
-Optional. The overall input given to the ``Model``. This
-includes the prompt as well as other model steering
-information like `system
-instructions `__,
-and/or function declarations for `function
-calling `__.
-``Model``\ s/\ ``Content``\ s and
-``generate_content_request``\ s are mutually exclusive. You
-can either send ``Model`` + ``Content``\ s or a
-``generate_content_request``, but never both.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CountTokensResponse.md b/docs/api/google/generativeai/protos/CountTokensResponse.md
deleted file mode 100644
index a17b73761..000000000
--- a/docs/api/google/generativeai/protos/CountTokensResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.CountTokensResponse
-
-
-
-
-
-
-
-A response from ``CountTokens``.
-
-
-
-It returns the model's ``token_count`` for the ``prompt``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`total_tokens`
-
- |
-
-
-`int`
-
-The number of tokens that the ``Model`` tokenizes the
-``prompt`` into. Always non-negative.
-
- |
-
-
-
-`cached_content_token_count`
-
- |
-
-
-`int`
-
-Number of tokens in the cached part of the
-prompt (the cached content).
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateCachedContentRequest.md b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md
deleted file mode 100644
index 4b83d5d77..000000000
--- a/docs/api/google/generativeai/protos/CreateCachedContentRequest.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.CreateCachedContentRequest
-
-
-
-
-
-
-
-Request to create CachedContent.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`cached_content`
-
- |
-
-
-`google.ai.generativelanguage.CachedContent`
-
-Required. The cached content to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateChunkRequest.md b/docs/api/google/generativeai/protos/CreateChunkRequest.md
deleted file mode 100644
index 3ab8abffa..000000000
--- a/docs/api/google/generativeai/protos/CreateChunkRequest.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.CreateChunkRequest
-
-
-
-
-
-
-
-Request to create a ``Chunk``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Required. The name of the ``Document`` where this ``Chunk``
-will be created. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`chunk`
-
- |
-
-
-`google.ai.generativelanguage.Chunk`
-
-Required. The ``Chunk`` to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateCorpusRequest.md b/docs/api/google/generativeai/protos/CreateCorpusRequest.md
deleted file mode 100644
index 5eeb5eb4e..000000000
--- a/docs/api/google/generativeai/protos/CreateCorpusRequest.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.CreateCorpusRequest
-
-
-
-
-
-
-
-Request to create a ``Corpus``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`corpus`
-
- |
-
-
-`google.ai.generativelanguage.Corpus`
-
-Required. The ``Corpus`` to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateDocumentRequest.md b/docs/api/google/generativeai/protos/CreateDocumentRequest.md
deleted file mode 100644
index 43def7bc5..000000000
--- a/docs/api/google/generativeai/protos/CreateDocumentRequest.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.CreateDocumentRequest
-
-
-
-
-
-
-
-Request to create a ``Document``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Required. The name of the ``Corpus`` where this ``Document``
-will be created. Example: ``corpora/my-corpus-123``
-
- |
-
-
-
-`document`
-
- |
-
-
-`google.ai.generativelanguage.Document`
-
-Required. The ``Document`` to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateFileRequest.md b/docs/api/google/generativeai/protos/CreateFileRequest.md
deleted file mode 100644
index 0b3d1b297..000000000
--- a/docs/api/google/generativeai/protos/CreateFileRequest.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.CreateFileRequest
-
-
-
-
-
-
-
-Request for ``CreateFile``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`file`
-
- |
-
-
-`google.ai.generativelanguage.File`
-
-Optional. Metadata for the file to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateFileResponse.md b/docs/api/google/generativeai/protos/CreateFileResponse.md
deleted file mode 100644
index 2c4f591ae..000000000
--- a/docs/api/google/generativeai/protos/CreateFileResponse.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.CreateFileResponse
-
-
-
-
-
-
-
-Response for ``CreateFile``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`file`
-
- |
-
-
-`google.ai.generativelanguage.File`
-
-Metadata for the created file.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreatePermissionRequest.md b/docs/api/google/generativeai/protos/CreatePermissionRequest.md
deleted file mode 100644
index c6b2bd653..000000000
--- a/docs/api/google/generativeai/protos/CreatePermissionRequest.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.CreatePermissionRequest
-
-
-
-
-
-
-
-Request to create a ``Permission``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Required. The parent resource of the ``Permission``.
-Formats: ``tunedModels/{tuned_model}`` ``corpora/{corpus}``
-
- |
-
-
-
-`permission`
-
- |
-
-
-`google.ai.generativelanguage.Permission`
-
-Required. The permission to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
deleted file mode 100644
index 89e69b822..000000000
--- a/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-# google.generativeai.protos.CreateTunedModelMetadata
-
-
-
-
-
-
-
-Metadata about the state and progress of creating a tuned model returned from the long-running operation
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`tuned_model`
-
- |
-
-
-`str`
-
-Name of the tuned model associated with the
-tuning operation.
-
- |
-
-
-
-`total_steps`
-
- |
-
-
-`int`
-
-The total number of tuning steps.
-
- |
-
-
-
-`completed_steps`
-
- |
-
-
-`int`
-
-The number of steps completed.
-
- |
-
-
-
-`completed_percent`
-
- |
-
-
-`float`
-
-The completed percentage for the tuning
-operation.
-
- |
-
-
-
-`snapshots`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.TuningSnapshot]`
-
-Metrics collected during tuning.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CreateTunedModelRequest.md b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md
deleted file mode 100644
index 198579199..000000000
--- a/docs/api/google/generativeai/protos/CreateTunedModelRequest.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.CreateTunedModelRequest
-
-
-
-
-
-
-
-Request to create a TunedModel.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`tuned_model_id`
-
- |
-
-
-`str`
-
-Optional. The unique id for the tuned model if specified.
-This value should be up to 40 characters, the first
-character must be a letter, the last could be a letter or a
-number. The id must match the regular expression:
-``[a-z]([a-z0-9-]{0,38}[a-z0-9])?``.
-
-
- |
-
-
-
-`tuned_model`
-
- |
-
-
-`google.ai.generativelanguage.TunedModel`
-
-Required. The tuned model to create.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/CustomMetadata.md b/docs/api/google/generativeai/protos/CustomMetadata.md
deleted file mode 100644
index 1c9426b2f..000000000
--- a/docs/api/google/generativeai/protos/CustomMetadata.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-# google.generativeai.protos.CustomMetadata
-
-
-
-
-
-
-
-User provided metadata stored as key-value pairs.
-
-
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`string_value`
-
- |
-
-
-`str`
-
-The string value of the metadata to store.
-
-This field is a member of `oneof`_ ``value``.
-
- |
-
-
-
-`string_list_value`
-
- |
-
-
-`google.ai.generativelanguage.StringList`
-
-The StringList value of the metadata to
-store.
-
-This field is a member of `oneof`_ ``value``.
-
- |
-
-
-
-`numeric_value`
-
- |
-
-
-`float`
-
-The numeric value of the metadata to store.
-
-This field is a member of `oneof`_ ``value``.
-
- |
-
-
-
-`key`
-
- |
-
-
-`str`
-
-Required. The key of the metadata to store.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Dataset.md b/docs/api/google/generativeai/protos/Dataset.md
deleted file mode 100644
index 32cc28cf1..000000000
--- a/docs/api/google/generativeai/protos/Dataset.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.Dataset
-
-
-
-
-
-
-
-Dataset for training or validation.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`examples`
-
- |
-
-
-`google.ai.generativelanguage.TuningExamples`
-
-Optional. Inline examples.
-
-This field is a member of `oneof`_ ``dataset``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
deleted file mode 100644
index 184707486..000000000
--- a/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.DeleteCachedContentRequest
-
-
-
-
-
-
-
-Request to delete CachedContent.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name referring to the content cache
-entry Format: ``cachedContents/{id}``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeleteChunkRequest.md b/docs/api/google/generativeai/protos/DeleteChunkRequest.md
deleted file mode 100644
index 514a124c5..000000000
--- a/docs/api/google/generativeai/protos/DeleteChunkRequest.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.DeleteChunkRequest
-
-
-
-
-
-
-
-Request to delete a ``Chunk``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the ``Chunk`` to delete.
-Example:
-``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeleteCorpusRequest.md b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md
deleted file mode 100644
index 11cf262f5..000000000
--- a/docs/api/google/generativeai/protos/DeleteCorpusRequest.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.DeleteCorpusRequest
-
-
-
-
-
-
-
-Request to delete a ``Corpus``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the ``Corpus``. Example:
-``corpora/my-corpus-123``
-
- |
-
-
-
-`force`
-
- |
-
-
-`bool`
-
-Optional. If set to true, any ``Document``\ s and objects
-related to this ``Corpus`` will also be deleted.
-
-If false (the default), a ``FAILED_PRECONDITION`` error will
-be returned if ``Corpus`` contains any ``Document``\ s.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeleteDocumentRequest.md b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md
deleted file mode 100644
index 698944b2f..000000000
--- a/docs/api/google/generativeai/protos/DeleteDocumentRequest.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.DeleteDocumentRequest
-
-
-
-
-
-
-
-Request to delete a ``Document``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the ``Document`` to delete.
-Example: ``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`force`
-
- |
-
-
-`bool`
-
-Optional. If set to true, any ``Chunk``\ s and objects
-related to this ``Document`` will also be deleted.
-
-If false (the default), a ``FAILED_PRECONDITION`` error will
-be returned if ``Document`` contains any ``Chunk``\ s.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeleteFileRequest.md b/docs/api/google/generativeai/protos/DeleteFileRequest.md
deleted file mode 100644
index e0c28ae36..000000000
--- a/docs/api/google/generativeai/protos/DeleteFileRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.DeleteFileRequest
-
-
-
-
-
-
-
-Request for ``DeleteFile``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``File`` to delete. Example:
-``files/abc-123``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeletePermissionRequest.md b/docs/api/google/generativeai/protos/DeletePermissionRequest.md
deleted file mode 100644
index de773d62b..000000000
--- a/docs/api/google/generativeai/protos/DeletePermissionRequest.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.DeletePermissionRequest
-
-
-
-
-
-
-
-Request to delete the ``Permission``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the permission. Formats:
-``tunedModels/{tuned_model}/permissions/{permission}``
-``corpora/{corpus}/permissions/{permission}``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
deleted file mode 100644
index f335c1163..000000000
--- a/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.DeleteTunedModelRequest
-
-
-
-
-
-
-
-Request to delete a TunedModel.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the model. Format:
-``tunedModels/my-model-id``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Document.md b/docs/api/google/generativeai/protos/Document.md
deleted file mode 100644
index 96be59407..000000000
--- a/docs/api/google/generativeai/protos/Document.md
+++ /dev/null
@@ -1,113 +0,0 @@
-
-# google.generativeai.protos.Document
-
-
-
-
-
-
-
-A ``Document`` is a collection of ``Chunk``\ s.
-
-
- A ``Corpus`` can
-have a maximum of 10,000 ``Document``\ s.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Immutable. Identifier. The ``Document`` resource name. The
-ID (name excluding the `corpora/*/documents/` prefix) can
-contain up to 40 characters that are lowercase alphanumeric
-or dashes (-). The ID cannot start or end with a dash. If
-the name is empty on create, a unique name will be derived
-from ``display_name`` along with a 12 character random
-suffix. Example:
-``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c``
-
- |
-
-
-
-`display_name`
-
- |
-
-
-`str`
-
-Optional. The human-readable display name for the
-``Document``. The display name must be no more than 512
-characters in length, including spaces. Example: "Semantic
-Retriever Documentation".
-
- |
-
-
-
-`custom_metadata`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.CustomMetadata]`
-
-Optional. User provided custom metadata stored as key-value
-pairs used for querying. A ``Document`` can have a maximum
-of 20 ``CustomMetadata``.
-
- |
-
-
-
-`update_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The Timestamp of when the ``Document`` was last
-updated.
-
- |
-
-
-
-`create_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The Timestamp of when the ``Document`` was
-created.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/DynamicRetrievalConfig.md b/docs/api/google/generativeai/protos/DynamicRetrievalConfig.md
deleted file mode 100644
index 5e7b3405b..000000000
--- a/docs/api/google/generativeai/protos/DynamicRetrievalConfig.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# google.generativeai.protos.DynamicRetrievalConfig
-
-
-
-
-
-
-
-Describes the options to customize dynamic retrieval.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`mode`
-
- |
-
-
-`google.ai.generativelanguage.DynamicRetrievalConfig.Mode`
-
-The mode of the predictor to be used in
-dynamic retrieval.
-
- |
-
-
-
-`dynamic_threshold`
-
- |
-
-
-`float`
-
-The threshold to be used in dynamic
-retrieval. If not set, a system default value is
-used.
-
-
- |
-
-
-
-
-
-## Child Classes
-[`class Mode`](../../../google/generativeai/protos/DynamicRetrievalConfig/Mode.md)
-
diff --git a/docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md b/docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md
deleted file mode 100644
index fcb8af8ef..000000000
--- a/docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md
+++ /dev/null
@@ -1,651 +0,0 @@
-
-# google.generativeai.protos.DynamicRetrievalConfig.Mode
-
-
-
-
-
-
-
-The mode of the predictor to be used in dynamic retrieval.
-
-
-google.generativeai.protos.DynamicRetrievalConfig.Mode(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`MODE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Always trigger retrieval.
-
- |
-
-
-
-`MODE_DYNAMIC`
-
- |
-
-
-`1`
-
-Run retrieval only when system decides it is
-necessary.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-MODE_DYNAMIC
-
- |
-
-
-``
-
- |
-
-
-
-MODE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/EmbedContentRequest.md b/docs/api/google/generativeai/protos/EmbedContentRequest.md
deleted file mode 100644
index f219f20d1..000000000
--- a/docs/api/google/generativeai/protos/EmbedContentRequest.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-# google.generativeai.protos.EmbedContentRequest
-
-
-
-
-
-
-
-Request containing the ``Content`` for the model to embed.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The model's resource name. This serves as an ID
-for the Model to use.
-
-This name should match a model name returned by the
-``ListModels`` method.
-
-Format: ``models/{model}``
-
- |
-
-
-
-`content`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Required. The content to embed. Only the ``parts.text``
-fields will be counted.
-
- |
-
-
-
-`task_type`
-
- |
-
-
-`google.ai.generativelanguage.TaskType`
-
-Optional. Optional task type for which the embeddings will
-be used. Can only be set for ``models/embedding-001``.
-
-
- |
-
-
-
-`title`
-
- |
-
-
-`str`
-
-Optional. An optional title for the text. Only applicable
-when TaskType is ``RETRIEVAL_DOCUMENT``.
-
-Note: Specifying a ``title`` for ``RETRIEVAL_DOCUMENT``
-provides better quality embeddings for retrieval.
-
-
- |
-
-
-
-`output_dimensionality`
-
- |
-
-
-`int`
-
-Optional. Optional reduced dimension for the output
-embedding. If set, excessive values in the output embedding
-are truncated from the end. Supported by newer models since
-2024 only. You cannot set this value if using the earlier
-model (``models/embedding-001``).
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/EmbedContentResponse.md b/docs/api/google/generativeai/protos/EmbedContentResponse.md
deleted file mode 100644
index 79b728d3c..000000000
--- a/docs/api/google/generativeai/protos/EmbedContentResponse.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.EmbedContentResponse
-
-
-
-
-
-
-
-The response to an ``EmbedContentRequest``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`embedding`
-
- |
-
-
-`google.ai.generativelanguage.ContentEmbedding`
-
-Output only. The embedding generated from the
-input content.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/EmbedTextRequest.md b/docs/api/google/generativeai/protos/EmbedTextRequest.md
deleted file mode 100644
index d390fb154..000000000
--- a/docs/api/google/generativeai/protos/EmbedTextRequest.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.EmbedTextRequest
-
-
-
-
-
-
-
-Request to get a text embedding from the model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The model name to use with the
-format model=models/{model}.
-
- |
-
-
-
-`text`
-
- |
-
-
-`str`
-
-Optional. The free-form input text that the
-model will turn into an embedding.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/EmbedTextResponse.md b/docs/api/google/generativeai/protos/EmbedTextResponse.md
deleted file mode 100644
index d4f47d38b..000000000
--- a/docs/api/google/generativeai/protos/EmbedTextResponse.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.EmbedTextResponse
-
-
-
-
-
-
-
-The response to a EmbedTextRequest.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`embedding`
-
- |
-
-
-`google.ai.generativelanguage.Embedding`
-
-Output only. The embedding generated from the
-input text.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Embedding.md b/docs/api/google/generativeai/protos/Embedding.md
deleted file mode 100644
index b44723b60..000000000
--- a/docs/api/google/generativeai/protos/Embedding.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.Embedding
-
-
-
-
-
-
-
-A list of floats representing the embedding.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`value`
-
- |
-
-
-`MutableSequence[float]`
-
-The embedding values.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Example.md b/docs/api/google/generativeai/protos/Example.md
deleted file mode 100644
index 0707655fe..000000000
--- a/docs/api/google/generativeai/protos/Example.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.Example
-
-
-
-
-
-
-
-An input/output example used to instruct the Model.
-
-
-
-It demonstrates how the model should respond or format its
-response.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`input`
-
- |
-
-
-`google.ai.generativelanguage.Message`
-
-Required. An example of an input ``Message`` from the user.
-
- |
-
-
-
-`output`
-
- |
-
-
-`google.ai.generativelanguage.Message`
-
-Required. An example of what the model should
-output given the input.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ExecutableCode.md b/docs/api/google/generativeai/protos/ExecutableCode.md
deleted file mode 100644
index d486ec720..000000000
--- a/docs/api/google/generativeai/protos/ExecutableCode.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# google.generativeai.protos.ExecutableCode
-
-
-
-
-
-
-
-Code generated by the model that is meant to be executed, and the result returned to the model.
-
-
-
-Only generated when using the ``CodeExecution`` tool, in which the
-code will be automatically executed, and a corresponding
-``CodeExecutionResult`` will also be generated.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`language`
-
- |
-
-
-`google.ai.generativelanguage.ExecutableCode.Language`
-
-Required. Programming language of the ``code``.
-
- |
-
-
-
-`code`
-
- |
-
-
-`str`
-
-Required. The code to be executed.
-
- |
-
-
-
-
-
-## Child Classes
-[`class Language`](../../../google/generativeai/protos/ExecutableCode/Language.md)
-
diff --git a/docs/api/google/generativeai/protos/ExecutableCode/Language.md b/docs/api/google/generativeai/protos/ExecutableCode/Language.md
deleted file mode 100644
index a880d8366..000000000
--- a/docs/api/google/generativeai/protos/ExecutableCode/Language.md
+++ /dev/null
@@ -1,652 +0,0 @@
-
-# google.generativeai.protos.ExecutableCode.Language
-
-
-
-
-
-
-
-Supported programming languages for the generated code.
-
-
-google.generativeai.protos.ExecutableCode.Language(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`LANGUAGE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Unspecified language. This value should not
-be used.
-
- |
-
-
-
-`PYTHON`
-
- |
-
-
-`1`
-
-Python >= 3.10, with numpy and simpy
-available.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-LANGUAGE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-PYTHON
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/File.md b/docs/api/google/generativeai/protos/File.md
deleted file mode 100644
index 8466c3ad0..000000000
--- a/docs/api/google/generativeai/protos/File.md
+++ /dev/null
@@ -1,205 +0,0 @@
-
-# google.generativeai.protos.File
-
-
-
-
-
-
-
-A file uploaded to the API.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`video_metadata`
-
- |
-
-
-`google.ai.generativelanguage.VideoMetadata`
-
-Output only. Metadata for a video.
-
-This field is a member of `oneof`_ ``metadata``.
-
- |
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Immutable. Identifier. The ``File`` resource name. The ID
-(name excluding the "files/" prefix) can contain up to 40
-characters that are lowercase alphanumeric or dashes (-).
-The ID cannot start or end with a dash. If the name is empty
-on create, a unique name will be generated. Example:
-``files/123-456``
-
- |
-
-
-
-`display_name`
-
- |
-
-
-`str`
-
-Optional. The human-readable display name for the ``File``.
-The display name must be no more than 512 characters in
-length, including spaces. Example: "Welcome Image".
-
- |
-
-
-
-`mime_type`
-
- |
-
-
-`str`
-
-Output only. MIME type of the file.
-
- |
-
-
-
-`size_bytes`
-
- |
-
-
-`int`
-
-Output only. Size of the file in bytes.
-
- |
-
-
-
-`create_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp of when the ``File`` was created.
-
- |
-
-
-
-`update_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp of when the ``File`` was last
-updated.
-
- |
-
-
-
-`expiration_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp of when the ``File`` will be
-deleted. Only set if the ``File`` is scheduled to expire.
-
- |
-
-
-
-`sha256_hash`
-
- |
-
-
-`bytes`
-
-Output only. SHA-256 hash of the uploaded
-bytes.
-
- |
-
-
-
-`uri`
-
- |
-
-
-`str`
-
-Output only. The uri of the ``File``.
-
- |
-
-
-
-`state`
-
- |
-
-
-`google.ai.generativelanguage.File.State`
-
-Output only. Processing state of the File.
-
- |
-
-
-
-`error`
-
- |
-
-
-`google.rpc.status_pb2.Status`
-
-Output only. Error status if File processing
-failed.
-
- |
-
-
-
-
-
-## Child Classes
-[`class State`](../../../google/generativeai/protos/File/State.md)
-
diff --git a/docs/api/google/generativeai/protos/File/State.md b/docs/api/google/generativeai/protos/File/State.md
deleted file mode 100644
index c8e3835ac..000000000
--- a/docs/api/google/generativeai/protos/File/State.md
+++ /dev/null
@@ -1,701 +0,0 @@
-
-# google.generativeai.protos.File.State
-
-
-
-
-
-
-
-States for the lifecycle of a File.
-
-
-google.generativeai.protos.File.State(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`STATE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-The default value. This value is used if the
-state is omitted.
-
- |
-
-
-
-`PROCESSING`
-
- |
-
-
-`1`
-
-File is being processed and cannot be used
-for inference yet.
-
- |
-
-
-
-`ACTIVE`
-
- |
-
-
-`2`
-
-File is processed and available for
-inference.
-
- |
-
-
-
-`FAILED`
-
- |
-
-
-`10`
-
-File failed processing.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-ACTIVE
-
- |
-
-
-``
-
- |
-
-
-
-FAILED
-
- |
-
-
-``
-
- |
-
-
-
-PROCESSING
-
- |
-
-
-``
-
- |
-
-
-
-STATE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/FileData.md b/docs/api/google/generativeai/protos/FileData.md
deleted file mode 100644
index 7676fa1f5..000000000
--- a/docs/api/google/generativeai/protos/FileData.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.FileData
-
-
-
-
-
-
-
-URI based data.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`mime_type`
-
- |
-
-
-`str`
-
-Optional. The IANA standard MIME type of the
-source data.
-
- |
-
-
-
-`file_uri`
-
- |
-
-
-`str`
-
-Required. URI.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/FunctionCall.md b/docs/api/google/generativeai/protos/FunctionCall.md
deleted file mode 100644
index 5d95ec884..000000000
--- a/docs/api/google/generativeai/protos/FunctionCall.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.FunctionCall
-
-
-
-
-
-
-
-A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the function to call.
-Must be a-z, A-Z, 0-9, or contain underscores
-and dashes, with a maximum length of 63.
-
- |
-
-
-
-`args`
-
- |
-
-
-`google.protobuf.struct_pb2.Struct`
-
-Optional. The function parameters and values
-in JSON object format.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig.md b/docs/api/google/generativeai/protos/FunctionCallingConfig.md
deleted file mode 100644
index c0c0657f3..000000000
--- a/docs/api/google/generativeai/protos/FunctionCallingConfig.md
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# google.generativeai.protos.FunctionCallingConfig
-
-
-
-
-
-
-
-Configuration for specifying function calling behavior.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`mode`
-
- |
-
-
-`google.ai.generativelanguage.FunctionCallingConfig.Mode`
-
-Optional. Specifies the mode in which
-function calling should execute. If unspecified,
-the default value will be set to AUTO.
-
- |
-
-
-
-`allowed_function_names`
-
- |
-
-
-`MutableSequence[str]`
-
-Optional. A set of function names that, when provided,
-limits the functions the model will call.
-
-This should only be set when the Mode is ANY. Function names
-should match [FunctionDeclaration.name]. With mode set to
-ANY, model will predict a function call from the set of
-function names provided.
-
- |
-
-
-
-
-
-## Child Classes
-[`class Mode`](../../../google/generativeai/protos/FunctionCallingConfig/Mode.md)
-
diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
deleted file mode 100644
index 0ba361f5c..000000000
--- a/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
+++ /dev/null
@@ -1,707 +0,0 @@
-
-# google.generativeai.protos.FunctionCallingConfig.Mode
-
-
-
-
-
-
-
-Defines the execution behavior for function calling by defining the execution mode.
-
-
-google.generativeai.protos.FunctionCallingConfig.Mode(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`MODE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Unspecified function calling mode. This value
-should not be used.
-
- |
-
-
-
-`AUTO`
-
- |
-
-
-`1`
-
-Default model behavior, model decides to
-predict either a function call or a natural
-language response.
-
- |
-
-
-
-`ANY`
-
- |
-
-
-`2`
-
-Model is constrained to always predicting a function call
-only. If "allowed_function_names" are set, the predicted
-function call will be limited to any one of
-"allowed_function_names", else the predicted function call
-will be any one of the provided "function_declarations".
-
- |
-
-
-
-`NONE`
-
- |
-
-
-`3`
-
-Model will not predict any function call.
-Model behavior is same as when not passing any
-function declarations.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-ANY
-
- |
-
-
-``
-
- |
-
-
-
-AUTO
-
- |
-
-
-``
-
- |
-
-
-
-MODE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-NONE
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/FunctionDeclaration.md b/docs/api/google/generativeai/protos/FunctionDeclaration.md
deleted file mode 100644
index fb6ba658b..000000000
--- a/docs/api/google/generativeai/protos/FunctionDeclaration.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# google.generativeai.protos.FunctionDeclaration
-
-
-
-
-
-
-
-Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__.
-
-
- Included in
-this declaration are the function name and parameters. This
-FunctionDeclaration is a representation of a block of code that can
-be used as a ``Tool`` by the model and executed by the client.
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the function.
-Must be a-z, A-Z, 0-9, or contain underscores
-and dashes, with a maximum length of 63.
-
- |
-
-
-
-`description`
-
- |
-
-
-`str`
-
-Required. A brief description of the
-function.
-
- |
-
-
-
-`parameters`
-
- |
-
-
-`google.ai.generativelanguage.Schema`
-
-Optional. Describes the parameters to this
-function. Reflects the Open API 3.03 Parameter
-Object string Key: the name of the parameter.
-Parameter names are case sensitive. Schema
-Value: the Schema defining the type used for the
-parameter.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/FunctionResponse.md b/docs/api/google/generativeai/protos/FunctionResponse.md
deleted file mode 100644
index 277825166..000000000
--- a/docs/api/google/generativeai/protos/FunctionResponse.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# google.generativeai.protos.FunctionResponse
-
-
-
-
-
-
-
-The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
-
-
- This should contain the result of a\ ``FunctionCall``
-made based on model prediction.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the function to call.
-Must be a-z, A-Z, 0-9, or contain underscores
-and dashes, with a maximum length of 63.
-
- |
-
-
-
-`response`
-
- |
-
-
-`google.protobuf.struct_pb2.Struct`
-
-Required. The function response in JSON
-object format.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md
deleted file mode 100644
index 24a43ce53..000000000
--- a/docs/api/google/generativeai/protos/GenerateAnswerRequest.md
+++ /dev/null
@@ -1,177 +0,0 @@
-
-# google.generativeai.protos.GenerateAnswerRequest
-
-
-
-
-
-
-
-Request to generate a grounded answer from the ``Model``.
-
-
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`inline_passages`
-
- |
-
-
-`google.ai.generativelanguage.GroundingPassages`
-
-Passages provided inline with the request.
-
-This field is a member of `oneof`_ ``grounding_source``.
-
- |
-
-
-
-`semantic_retriever`
-
- |
-
-
-`google.ai.generativelanguage.SemanticRetrieverConfig`
-
-Content retrieved from resources created via
-the Semantic Retriever API.
-
-This field is a member of `oneof`_ ``grounding_source``.
-
- |
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The name of the ``Model`` to use for generating
-the grounded response.
-
-Format: ``model=models/{model}``.
-
- |
-
-
-
-`contents`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Content]`
-
-Required. The content of the current conversation with the
-``Model``. For single-turn queries, this is a single
-question to answer. For multi-turn queries, this is a
-repeated field that contains conversation history and the
-last ``Content`` in the list containing the question.
-
-Note: ``GenerateAnswer`` only supports queries in English.
-
- |
-
-
-
-`answer_style`
-
- |
-
-
-`google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
-
-Required. Style in which answers should be
-returned.
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetySetting]`
-
-Optional. A list of unique ``SafetySetting`` instances for
-blocking unsafe content.
-
-This will be enforced on the
-GenerateAnswerRequest.contents and
-``GenerateAnswerResponse.candidate``. There should not be
-more than one setting for each ``SafetyCategory`` type. The
-API will block any contents and responses that fail to meet
-the thresholds set by these settings. This list overrides
-the default settings for each ``SafetyCategory`` specified
-in the safety_settings. If there is no ``SafetySetting`` for
-a given ``SafetyCategory`` provided in the list, the API
-will use the default safety setting for that category. Harm
-categories HARM_CATEGORY_HATE_SPEECH,
-HARM_CATEGORY_SEXUALLY_EXPLICIT,
-HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
-are supported. Refer to the
-`guide `__
-for detailed information on available safety settings. Also
-refer to the `Safety
-guidance `__
-to learn how to incorporate safety considerations in your AI
-applications.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-`float`
-
-Optional. Controls the randomness of the output.
-
-Values can range from [0.0,1.0], inclusive. A value closer
-to 1.0 will produce responses that are more varied and
-creative, while a value closer to 0.0 will typically result
-in more straightforward responses from the model. A low
-temperature (~0.2) is usually recommended for
-Attributed-Question-Answering use cases.
-
-
- |
-
-
-
-
-
-## Child Classes
-[`class AnswerStyle`](../../../google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md)
-
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
deleted file mode 100644
index 4f1b53ae1..000000000
--- a/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
+++ /dev/null
@@ -1,701 +0,0 @@
-
-# google.generativeai.protos.GenerateAnswerRequest.AnswerStyle
-
-
-
-
-
-
-
-Style for grounded answers.
-
-
-google.generativeai.protos.GenerateAnswerRequest.AnswerStyle(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`ANSWER_STYLE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Unspecified answer style.
-
- |
-
-
-
-`ABSTRACTIVE`
-
- |
-
-
-`1`
-
-Succint but abstract style.
-
- |
-
-
-
-`EXTRACTIVE`
-
- |
-
-
-`2`
-
-Very brief and extractive style.
-
- |
-
-
-
-`VERBOSE`
-
- |
-
-
-`3`
-
-Verbose style including extra details. The
-response may be formatted as a sentence,
-paragraph, multiple paragraphs, or bullet
-points, etc.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-ABSTRACTIVE
-
- |
-
-
-``
-
- |
-
-
-
-ANSWER_STYLE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-EXTRACTIVE
-
- |
-
-
-``
-
- |
-
-
-
-VERBOSE
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md
deleted file mode 100644
index 9e9f57fb6..000000000
--- a/docs/api/google/generativeai/protos/GenerateAnswerResponse.md
+++ /dev/null
@@ -1,108 +0,0 @@
-
-# google.generativeai.protos.GenerateAnswerResponse
-
-
-
-
-
-
-
-Response from the model for a grounded answer.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`answer`
-
- |
-
-
-`google.ai.generativelanguage.Candidate`
-
-Candidate answer from the model.
-
-Note: The model *always* attempts to provide a grounded
-answer, even when the answer is unlikely to be answerable
-from the given passages. In that case, a low-quality or
-ungrounded answer may be provided, along with a low
-``answerable_probability``.
-
- |
-
-
-
-`answerable_probability`
-
- |
-
-
-`float`
-
-Output only. The model's estimate of the probability that
-its answer is correct and grounded in the input passages.
-
-A low ``answerable_probability`` indicates that the answer
-might not be grounded in the sources.
-
-When ``answerable_probability`` is low, you may want to:
-
-- Display a message to the effect of "We couldn’t answer
- that question" to the user.
-- Fall back to a general-purpose LLM that answers the
- question from world knowledge. The threshold and nature
- of such fallbacks will depend on individual use cases.
- ``0.5`` is a good starting threshold.
-
-
- |
-
-
-
-`input_feedback`
-
- |
-
-
-`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback`
-
-Output only. Feedback related to the input data used to
-answer the question, as opposed to the model-generated
-response to the question.
-
-The input data can be one or more of the following:
-
-- Question specified by the last entry in
- ``GenerateAnswerRequest.content``
-- Conversation history specified by the other entries in
- ``GenerateAnswerRequest.content``
-- Grounding sources
- (GenerateAnswerRequest.semantic_retriever or
- GenerateAnswerRequest.inline_passages )
-
-
- |
-
-
-
-
-
-## Child Classes
-[`class InputFeedback`](../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md)
-
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
deleted file mode 100644
index 3b51e3b05..000000000
--- a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# google.generativeai.protos.GenerateAnswerResponse.InputFeedback
-
-
-
-
-
-
-
-Feedback related to the input data used to answer the question, as opposed to the model-generated response to the question.
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`block_reason`
-
- |
-
-
-`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback.BlockReason`
-
-Optional. If set, the input was blocked and
-no candidates are returned. Rephrase the input.
-
-
- |
-
-
-
-`safety_ratings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetyRating]`
-
-Ratings for safety of the input.
-There is at most one rating per category.
-
- |
-
-
-
-
-
-## Child Classes
-[`class BlockReason`](../../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md)
-
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
deleted file mode 100644
index fd5b36814..000000000
--- a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
+++ /dev/null
@@ -1,676 +0,0 @@
-
-# google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason
-
-
-
-
-
-
-
-Specifies what was the reason why input was blocked.
-
-
-google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`BLOCK_REASON_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Default value. This value is unused.
-
- |
-
-
-
-`SAFETY`
-
- |
-
-
-`1`
-
-Input was blocked due to safety reasons. Inspect
-``safety_ratings`` to understand which safety category
-blocked it.
-
- |
-
-
-
-`OTHER`
-
- |
-
-
-`2`
-
-Input was blocked due to other reasons.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-BLOCK_REASON_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-OTHER
-
- |
-
-
-``
-
- |
-
-
-
-SAFETY
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateContentRequest.md b/docs/api/google/generativeai/protos/GenerateContentRequest.md
deleted file mode 100644
index 0bab8ea0c..000000000
--- a/docs/api/google/generativeai/protos/GenerateContentRequest.md
+++ /dev/null
@@ -1,192 +0,0 @@
-
-# google.generativeai.protos.GenerateContentRequest
-
-
-
-
-
-
-
-Request to generate a completion from the model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The name of the ``Model`` to use for generating
-the completion.
-
-Format: ``name=models/{model}``.
-
- |
-
-
-
-`system_instruction`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Optional. Developer set `system
-instruction(s) `__.
-Currently, text only.
-
-
- |
-
-
-
-`contents`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Content]`
-
-Required. The content of the current conversation with the
-model.
-
-For single-turn queries, this is a single instance. For
-multi-turn queries like
-`chat `__,
-this is a repeated field that contains the conversation
-history and the latest request.
-
- |
-
-
-
-`tools`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Tool]`
-
-Optional. A list of ``Tools`` the ``Model`` may use to
-generate the next response.
-
-A ``Tool`` is a piece of code that enables the system to
-interact with external systems to perform an action, or set
-of actions, outside of knowledge and scope of the ``Model``.
-Supported ``Tool``\ s are ``Function`` and
-``code_execution``. Refer to the `Function
-calling `__
-and the `Code
-execution `__
-guides to learn more.
-
- |
-
-
-
-`tool_config`
-
- |
-
-
-`google.ai.generativelanguage.ToolConfig`
-
-Optional. Tool configuration for any ``Tool`` specified in
-the request. Refer to the `Function calling
-guide `__
-for a usage example.
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetySetting]`
-
-Optional. A list of unique ``SafetySetting`` instances for
-blocking unsafe content.
-
-This will be enforced on the
-GenerateContentRequest.contents and
-GenerateContentResponse.candidates . There should not be
-more than one setting for each ``SafetyCategory`` type. The
-API will block any contents and responses that fail to meet
-the thresholds set by these settings. This list overrides
-the default settings for each ``SafetyCategory`` specified
-in the safety_settings. If there is no ``SafetySetting`` for
-a given ``SafetyCategory`` provided in the list, the API
-will use the default safety setting for that category. Harm
-categories HARM_CATEGORY_HATE_SPEECH,
-HARM_CATEGORY_SEXUALLY_EXPLICIT,
-HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
-are supported. Refer to the
-`guide `__
-for detailed information on available safety settings. Also
-refer to the `Safety
-guidance `__
-to learn how to incorporate safety considerations in your AI
-applications.
-
- |
-
-
-
-`generation_config`
-
- |
-
-
-`google.ai.generativelanguage.GenerationConfig`
-
-Optional. Configuration options for model
-generation and outputs.
-
-
- |
-
-
-
-`cached_content`
-
- |
-
-
-`str`
-
-Optional. The name of the content
-`cached `__
-to use as context to serve the prediction. Format:
-``cachedContents/{cachedContent}``
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse.md b/docs/api/google/generativeai/protos/GenerateContentResponse.md
deleted file mode 100644
index a19fa46cc..000000000
--- a/docs/api/google/generativeai/protos/GenerateContentResponse.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-# google.generativeai.protos.GenerateContentResponse
-
-
-
-
-
-
-
-Response from the model supporting multiple candidate responses.
-
-
-
-Safety ratings and content filtering are reported for both prompt in
-GenerateContentResponse.prompt_feedback
and for each candidate
-in ``finish_reason`` and in ``safety_ratings``. The API:
-
-- Returns either all requested candidates or none of them
-- Returns no candidates at all only if there was something wrong
- with the prompt (check ``prompt_feedback``)
-- Reports feedback on each candidate in ``finish_reason`` and
- ``safety_ratings``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidates`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Candidate]`
-
-Candidate responses from the model.
-
- |
-
-
-
-`prompt_feedback`
-
- |
-
-
-`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback`
-
-Returns the prompt's feedback related to the
-content filters.
-
- |
-
-
-
-`usage_metadata`
-
- |
-
-
-`google.ai.generativelanguage.GenerateContentResponse.UsageMetadata`
-
-Output only. Metadata on the generation
-requests' token usage.
-
- |
-
-
-
-
-
-## Child Classes
-[`class PromptFeedback`](../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback.md)
-
-[`class UsageMetadata`](../../../google/generativeai/protos/GenerateContentResponse/UsageMetadata.md)
-
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
deleted file mode 100644
index bdf993a82..000000000
--- a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.GenerateContentResponse.PromptFeedback
-
-
-
-
-
-
-
-A set of the feedback metadata the prompt specified in ``GenerateContentRequest.content``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`block_reason`
-
- |
-
-
-`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback.BlockReason`
-
-Optional. If set, the prompt was blocked and
-no candidates are returned. Rephrase the prompt.
-
- |
-
-
-
-`safety_ratings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetyRating]`
-
-Ratings for safety of the prompt.
-There is at most one rating per category.
-
- |
-
-
-
-
-
-## Child Classes
-[`class BlockReason`](../../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md)
-
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
deleted file mode 100644
index df7bb344f..000000000
--- a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
+++ /dev/null
@@ -1,725 +0,0 @@
-
-# google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason
-
-
-
-
-
-
-
-Specifies the reason why the prompt was blocked.
-
-
-google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`BLOCK_REASON_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Default value. This value is unused.
-
- |
-
-
-
-`SAFETY`
-
- |
-
-
-`1`
-
-Prompt was blocked due to safety reasons. Inspect
-``safety_ratings`` to understand which safety category
-blocked it.
-
- |
-
-
-
-`OTHER`
-
- |
-
-
-`2`
-
-Prompt was blocked due to unknown reasons.
-
- |
-
-
-
-`BLOCKLIST`
-
- |
-
-
-`3`
-
-Prompt was blocked due to the terms which are
-included from the terminology blocklist.
-
- |
-
-
-
-`PROHIBITED_CONTENT`
-
- |
-
-
-`4`
-
-Prompt was blocked due to prohibited content.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-BLOCKLIST
-
- |
-
-
-``
-
- |
-
-
-
-BLOCK_REASON_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-OTHER
-
- |
-
-
-``
-
- |
-
-
-
-PROHIBITED_CONTENT
-
- |
-
-
-``
-
- |
-
-
-
-SAFETY
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
deleted file mode 100644
index 1aee78acc..000000000
--- a/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-# google.generativeai.protos.GenerateContentResponse.UsageMetadata
-
-
-
-
-
-
-
-Metadata on the generation request's token usage.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`prompt_token_count`
-
- |
-
-
-`int`
-
-Number of tokens in the prompt. When ``cached_content`` is
-set, this is still the total effective prompt size meaning
-this includes the number of tokens in the cached content.
-
- |
-
-
-
-`cached_content_token_count`
-
- |
-
-
-`int`
-
-Number of tokens in the cached part of the
-prompt (the cached content)
-
- |
-
-
-
-`candidates_token_count`
-
- |
-
-
-`int`
-
-Total number of tokens across all the
-generated response candidates.
-
- |
-
-
-
-`total_token_count`
-
- |
-
-
-`int`
-
-Total token count for the generation request
-(prompt + response candidates).
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateMessageRequest.md b/docs/api/google/generativeai/protos/GenerateMessageRequest.md
deleted file mode 100644
index ed737707f..000000000
--- a/docs/api/google/generativeai/protos/GenerateMessageRequest.md
+++ /dev/null
@@ -1,142 +0,0 @@
-
-# google.generativeai.protos.GenerateMessageRequest
-
-
-
-
-
-
-
-Request to generate a message response from the model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The name of the model to use.
-
-Format: ``name=models/{model}``.
-
- |
-
-
-
-`prompt`
-
- |
-
-
-`google.ai.generativelanguage.MessagePrompt`
-
-Required. The structured textual input given
-to the model as a prompt.
-Given a
-prompt, the model will return what it predicts
-is the next message in the discussion.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-`float`
-
-Optional. Controls the randomness of the output.
-
-Values can range over ``[0.0,1.0]``, inclusive. A value
-closer to ``1.0`` will produce responses that are more
-varied, while a value closer to ``0.0`` will typically
-result in less surprising responses from the model.
-
-
- |
-
-
-
-`candidate_count`
-
- |
-
-
-`int`
-
-Optional. The number of generated response messages to
-return.
-
-This value must be between ``[1, 8]``, inclusive. If unset,
-this will default to ``1``.
-
-
- |
-
-
-
-`top_p`
-
- |
-
-
-`float`
-
-Optional. The maximum cumulative probability of tokens to
-consider when sampling.
-
-The model uses combined Top-k and nucleus sampling.
-
-Nucleus sampling considers the smallest set of tokens whose
-probability sum is at least ``top_p``.
-
-
- |
-
-
-
-`top_k`
-
- |
-
-
-`int`
-
-Optional. The maximum number of tokens to consider when
-sampling.
-
-The model uses combined Top-k and nucleus sampling.
-
-Top-k sampling considers the set of ``top_k`` most probable
-tokens.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateMessageResponse.md b/docs/api/google/generativeai/protos/GenerateMessageResponse.md
deleted file mode 100644
index 52ed9e51d..000000000
--- a/docs/api/google/generativeai/protos/GenerateMessageResponse.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-# google.generativeai.protos.GenerateMessageResponse
-
-
-
-
-
-
-
-The response from the model.
-
-
-
-This includes candidate messages and
-conversation history in the form of chronologically-ordered
-messages.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidates`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Message]`
-
-Candidate response messages from the model.
-
- |
-
-
-
-`messages`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Message]`
-
-The conversation history used by the model.
-
- |
-
-
-
-`filters`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.ContentFilter]`
-
-A set of content filtering metadata for the prompt and
-response text.
-
-This indicates which ``SafetyCategory``\ (s) blocked a
-candidate from this response, the lowest ``HarmProbability``
-that triggered a block, and the HarmThreshold setting for
-that category.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateTextRequest.md b/docs/api/google/generativeai/protos/GenerateTextRequest.md
deleted file mode 100644
index d6aed4add..000000000
--- a/docs/api/google/generativeai/protos/GenerateTextRequest.md
+++ /dev/null
@@ -1,219 +0,0 @@
-
-# google.generativeai.protos.GenerateTextRequest
-
-
-
-
-
-
-
-Request to generate a text completion response from the model.
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The name of the ``Model`` or ``TunedModel`` to use
-for generating the completion. Examples:
-models/text-bison-001 tunedModels/sentence-translator-u3b7m
-
- |
-
-
-
-`prompt`
-
- |
-
-
-`google.ai.generativelanguage.TextPrompt`
-
-Required. The free-form input text given to
-the model as a prompt.
-Given a prompt, the model will generate a
-TextCompletion response it predicts as the
-completion of the input text.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-`float`
-
-Optional. Controls the randomness of the output. Note: The
-default value varies by model, see the Model.temperature
-attribute of the ``Model`` returned the ``getModel``
-function.
-
-Values can range from [0.0,1.0], inclusive. A value closer
-to 1.0 will produce responses that are more varied and
-creative, while a value closer to 0.0 will typically result
-in more straightforward responses from the model.
-
-
- |
-
-
-
-`candidate_count`
-
- |
-
-
-`int`
-
-Optional. Number of generated responses to return.
-
-This value must be between [1, 8], inclusive. If unset, this
-will default to 1.
-
-
- |
-
-
-
-`max_output_tokens`
-
- |
-
-
-`int`
-
-Optional. The maximum number of tokens to include in a
-candidate.
-
-If unset, this will default to output_token_limit specified
-in the ``Model`` specification.
-
-
- |
-
-
-
-`top_p`
-
- |
-
-
-`float`
-
-Optional. The maximum cumulative probability of tokens to
-consider when sampling.
-
-The model uses combined Top-k and nucleus sampling.
-
-Tokens are sorted based on their assigned probabilities so
-that only the most likely tokens are considered. Top-k
-sampling directly limits the maximum number of tokens to
-consider, while Nucleus sampling limits number of tokens
-based on the cumulative probability.
-
-Note: The default value varies by model, see the
-Model.top_p attribute of the ``Model`` returned the
-``getModel`` function.
-
-
- |
-
-
-
-`top_k`
-
- |
-
-
-`int`
-
-Optional. The maximum number of tokens to consider when
-sampling.
-
-The model uses combined Top-k and nucleus sampling.
-
-Top-k sampling considers the set of ``top_k`` most probable
-tokens. Defaults to 40.
-
-Note: The default value varies by model, see the
-Model.top_k attribute of the ``Model`` returned the
-``getModel`` function.
-
-
- |
-
-
-
-`safety_settings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetySetting]`
-
-Optional. A list of unique ``SafetySetting`` instances for
-blocking unsafe content.
-
-that will be enforced on the GenerateTextRequest.prompt
-and GenerateTextResponse.candidates . There should not be
-more than one setting for each ``SafetyCategory`` type. The
-API will block any prompts and responses that fail to meet
-the thresholds set by these settings. This list overrides
-the default settings for each ``SafetyCategory`` specified
-in the safety_settings. If there is no ``SafetySetting`` for
-a given ``SafetyCategory`` provided in the list, the API
-will use the default safety setting for that category. Harm
-categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY,
-HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL,
-HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported
-in text service.
-
- |
-
-
-
-`stop_sequences`
-
- |
-
-
-`MutableSequence[str]`
-
-The set of character sequences (up to 5) that
-will stop output generation. If specified, the
-API will stop at the first appearance of a stop
-sequence. The stop sequence will not be included
-as part of the response.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerateTextResponse.md b/docs/api/google/generativeai/protos/GenerateTextResponse.md
deleted file mode 100644
index a7b07a833..000000000
--- a/docs/api/google/generativeai/protos/GenerateTextResponse.md
+++ /dev/null
@@ -1,84 +0,0 @@
-
-# google.generativeai.protos.GenerateTextResponse
-
-
-
-
-
-
-
-The response from the model, including candidate completions.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidates`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.TextCompletion]`
-
-Candidate responses from the model.
-
- |
-
-
-
-`filters`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.ContentFilter]`
-
-A set of content filtering metadata for the prompt and
-response text.
-
-This indicates which ``SafetyCategory``\ (s) blocked a
-candidate from this response, the lowest ``HarmProbability``
-that triggered a block, and the HarmThreshold setting for
-that category. This indicates the smallest change to the
-``SafetySettings`` that would be necessary to unblock at
-least 1 response.
-
-The blocking is configured by the ``SafetySettings`` in the
-request (or the default ``SafetySettings`` of the API).
-
- |
-
-
-
-`safety_feedback`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetyFeedback]`
-
-Returns any safety feedback related to
-content filtering.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GenerationConfig.md b/docs/api/google/generativeai/protos/GenerationConfig.md
deleted file mode 100644
index 87d1a63f8..000000000
--- a/docs/api/google/generativeai/protos/GenerationConfig.md
+++ /dev/null
@@ -1,297 +0,0 @@
-
-# google.generativeai.protos.GenerationConfig
-
-
-
-
-
-
-
-Configuration options for model generation and outputs.
-
-
- Not
-all parameters are configurable for every model.
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidate_count`
-
- |
-
-
-`int`
-
-Optional. Number of generated responses to
-return.
-Currently, this value can only be set to 1. If
-unset, this will default to 1.
-
-
- |
-
-
-
-`stop_sequences`
-
- |
-
-
-`MutableSequence[str]`
-
-Optional. The set of character sequences (up to 5) that will
-stop output generation. If specified, the API will stop at
-the first appearance of a ``stop_sequence``. The stop
-sequence will not be included as part of the response.
-
- |
-
-
-
-`max_output_tokens`
-
- |
-
-
-`int`
-
-Optional. The maximum number of tokens to include in a
-response candidate.
-
-Note: The default value varies by model, see the
-Model.output_token_limit attribute of the ``Model``
-returned from the ``getModel`` function.
-
-
- |
-
-
-
-`temperature`
-
- |
-
-
-`float`
-
-Optional. Controls the randomness of the output.
-
-Note: The default value varies by model, see the
-Model.temperature attribute of the ``Model`` returned
-from the ``getModel`` function.
-
-Values can range from [0.0, 2.0].
-
-
- |
-
-
-
-`top_p`
-
- |
-
-
-`float`
-
-Optional. The maximum cumulative probability of tokens to
-consider when sampling.
-
-The model uses combined Top-k and Top-p (nucleus) sampling.
-
-Tokens are sorted based on their assigned probabilities so
-that only the most likely tokens are considered. Top-k
-sampling directly limits the maximum number of tokens to
-consider, while Nucleus sampling limits the number of tokens
-based on the cumulative probability.
-
-Note: The default value varies by ``Model`` and is specified
-by the\ Model.top_p attribute returned from the
-``getModel`` function. An empty ``top_k`` attribute
-indicates that the model doesn't apply top-k sampling and
-doesn't allow setting ``top_k`` on requests.
-
-
- |
-
-
-
-`top_k`
-
- |
-
-
-`int`
-
-Optional. The maximum number of tokens to consider when
-sampling.
-
-Gemini models use Top-p (nucleus) sampling or a combination
-of Top-k and nucleus sampling. Top-k sampling considers the
-set of ``top_k`` most probable tokens. Models running with
-nucleus sampling don't allow top_k setting.
-
-Note: The default value varies by ``Model`` and is specified
-by the\ Model.top_p attribute returned from the
-``getModel`` function. An empty ``top_k`` attribute
-indicates that the model doesn't apply top-k sampling and
-doesn't allow setting ``top_k`` on requests.
-
-
- |
-
-
-
-`response_mime_type`
-
- |
-
-
-`str`
-
-Optional. MIME type of the generated candidate text.
-Supported MIME types are: ``text/plain``: (default) Text
-output. ``application/json``: JSON response in the response
-candidates. ``text/x.enum``: ENUM as a string response in
-the response candidates. Refer to the
-`docs `__
-for a list of all supported text MIME types.
-
- |
-
-
-
-`response_schema`
-
- |
-
-
-`google.ai.generativelanguage.Schema`
-
-Optional. Output schema of the generated candidate text.
-Schemas must be a subset of the `OpenAPI
-schema `__ and
-can be objects, primitives or arrays.
-
-If set, a compatible ``response_mime_type`` must also be
-set. Compatible MIME types: ``application/json``: Schema for
-JSON response. Refer to the `JSON text generation
-guide `__
-for more details.
-
- |
-
-
-
-`presence_penalty`
-
- |
-
-
-`float`
-
-Optional. Presence penalty applied to the next token's
-logprobs if the token has already been seen in the response.
-
-This penalty is binary on/off and not dependant on the
-number of times the token is used (after the first). Use
-[frequency_penalty][google.ai.generativelanguage.v1beta.GenerationConfig.frequency_penalty]
-for a penalty that increases with each use.
-
-A positive penalty will discourage the use of tokens that
-have already been used in the response, increasing the
-vocabulary.
-
-A negative penalty will encourage the use of tokens that
-have already been used in the response, decreasing the
-vocabulary.
-
-
- |
-
-
-
-`frequency_penalty`
-
- |
-
-
-`float`
-
-Optional. Frequency penalty applied to the next token's
-logprobs, multiplied by the number of times each token has
-been seen in the respponse so far.
-
-A positive penalty will discourage the use of tokens that
-have already been used, proportional to the number of times
-the token has been used: The more a token is used, the more
-dificult it is for the model to use that token again
-increasing the vocabulary of responses.
-
-Caution: A *negative* penalty will encourage the model to
-reuse tokens proportional to the number of times the token
-has been used. Small negative values will reduce the
-vocabulary of a response. Larger negative values will cause
-the model to start repeating a common token until it hits
-the
-[max_output_tokens][google.ai.generativelanguage.v1beta.GenerationConfig.max_output_tokens]
-limit: "...the the the the the...".
-
-
- |
-
-
-
-`response_logprobs`
-
- |
-
-
-`bool`
-
-Optional. If true, export the logprobs
-results in response.
-
-
- |
-
-
-
-`logprobs`
-
- |
-
-
-`int`
-
-Optional. Only valid if
-[response_logprobs=True][google.ai.generativelanguage.v1beta.GenerationConfig.response_logprobs].
-This sets the number of top logprobs to return at each
-decoding step in the
-[Candidate.logprobs_result][google.ai.generativelanguage.v1beta.Candidate.logprobs_result].
-
-This field is a member of `oneof`_ ``_logprobs``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetCachedContentRequest.md b/docs/api/google/generativeai/protos/GetCachedContentRequest.md
deleted file mode 100644
index 90b218160..000000000
--- a/docs/api/google/generativeai/protos/GetCachedContentRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.GetCachedContentRequest
-
-
-
-
-
-
-
-Request to read CachedContent.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name referring to the content cache
-entry. Format: ``cachedContents/{id}``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetChunkRequest.md b/docs/api/google/generativeai/protos/GetChunkRequest.md
deleted file mode 100644
index e6af445dc..000000000
--- a/docs/api/google/generativeai/protos/GetChunkRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.GetChunkRequest
-
-
-
-
-
-
-
-Request for getting information about a specific ``Chunk``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``Chunk`` to retrieve. Example:
-``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetCorpusRequest.md b/docs/api/google/generativeai/protos/GetCorpusRequest.md
deleted file mode 100644
index bfe9f9dff..000000000
--- a/docs/api/google/generativeai/protos/GetCorpusRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.GetCorpusRequest
-
-
-
-
-
-
-
-Request for getting information about a specific ``Corpus``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``Corpus``. Example:
-``corpora/my-corpus-123``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetDocumentRequest.md b/docs/api/google/generativeai/protos/GetDocumentRequest.md
deleted file mode 100644
index 8d4b83e4e..000000000
--- a/docs/api/google/generativeai/protos/GetDocumentRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.GetDocumentRequest
-
-
-
-
-
-
-
-Request for getting information about a specific ``Document``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``Document`` to retrieve. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetFileRequest.md b/docs/api/google/generativeai/protos/GetFileRequest.md
deleted file mode 100644
index dba220926..000000000
--- a/docs/api/google/generativeai/protos/GetFileRequest.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.GetFileRequest
-
-
-
-
-
-
-
-Request for ``GetFile``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``File`` to get. Example:
-``files/abc-123``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetModelRequest.md b/docs/api/google/generativeai/protos/GetModelRequest.md
deleted file mode 100644
index 455f32b64..000000000
--- a/docs/api/google/generativeai/protos/GetModelRequest.md
+++ /dev/null
@@ -1,51 +0,0 @@
-
-# google.generativeai.protos.GetModelRequest
-
-
-
-
-
-
-
-Request for getting information about a specific Model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the model.
-
-This name should match a model name returned by the
-``ListModels`` method.
-
-Format: ``models/{model}``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetPermissionRequest.md b/docs/api/google/generativeai/protos/GetPermissionRequest.md
deleted file mode 100644
index 0b26ec3bb..000000000
--- a/docs/api/google/generativeai/protos/GetPermissionRequest.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-# google.generativeai.protos.GetPermissionRequest
-
-
-
-
-
-
-
-Request for getting information about a specific ``Permission``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the permission.
-
-Formats:
-``tunedModels/{tuned_model}/permissions/{permission}``
-``corpora/{corpus}/permissions/{permission}``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GetTunedModelRequest.md b/docs/api/google/generativeai/protos/GetTunedModelRequest.md
deleted file mode 100644
index 725980887..000000000
--- a/docs/api/google/generativeai/protos/GetTunedModelRequest.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.GetTunedModelRequest
-
-
-
-
-
-
-
-Request for getting information about a specific Model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the model.
-
-Format: ``tunedModels/my-model-id``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GoogleSearchRetrieval.md b/docs/api/google/generativeai/protos/GoogleSearchRetrieval.md
deleted file mode 100644
index 9d795e8ed..000000000
--- a/docs/api/google/generativeai/protos/GoogleSearchRetrieval.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.GoogleSearchRetrieval
-
-
-
-
-
-
-
-Tool to retrieve public web data for grounding, powered by Google.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`dynamic_retrieval_config`
-
- |
-
-
-`google.ai.generativelanguage.DynamicRetrievalConfig`
-
-Specifies the dynamic retrieval configuration
-for the given source.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GroundingAttribution.md b/docs/api/google/generativeai/protos/GroundingAttribution.md
deleted file mode 100644
index f07e399b3..000000000
--- a/docs/api/google/generativeai/protos/GroundingAttribution.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.GroundingAttribution
-
-
-
-
-
-
-
-Attribution for a source that contributed to an answer.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`source_id`
-
- |
-
-
-`google.ai.generativelanguage.AttributionSourceId`
-
-Output only. Identifier for the source
-contributing to this attribution.
-
- |
-
-
-
-`content`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Grounding source content that makes up this
-attribution.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GroundingChunk.md b/docs/api/google/generativeai/protos/GroundingChunk.md
deleted file mode 100644
index adb637a71..000000000
--- a/docs/api/google/generativeai/protos/GroundingChunk.md
+++ /dev/null
@@ -1,51 +0,0 @@
-
-# google.generativeai.protos.GroundingChunk
-
-
-
-
-
-
-
-Grounding chunk.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`web`
-
- |
-
-
-`google.ai.generativelanguage.GroundingChunk.Web`
-
-Grounding chunk from the web.
-
-This field is a member of `oneof`_ ``chunk_type``.
-
- |
-
-
-
-
-
-## Child Classes
-[`class Web`](../../../google/generativeai/protos/GroundingChunk/Web.md)
-
diff --git a/docs/api/google/generativeai/protos/GroundingChunk/Web.md b/docs/api/google/generativeai/protos/GroundingChunk/Web.md
deleted file mode 100644
index f9757e0c8..000000000
--- a/docs/api/google/generativeai/protos/GroundingChunk/Web.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.GroundingChunk.Web
-
-
-
-
-
-
-
-Chunk from the web.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`uri`
-
- |
-
-
-`str`
-
-URI reference of the chunk.
-
-
- |
-
-
-
-`title`
-
- |
-
-
-`str`
-
-Title of the chunk.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GroundingMetadata.md b/docs/api/google/generativeai/protos/GroundingMetadata.md
deleted file mode 100644
index 54811ea73..000000000
--- a/docs/api/google/generativeai/protos/GroundingMetadata.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-# google.generativeai.protos.GroundingMetadata
-
-
-
-
-
-
-
-Metadata returned to client when grounding is enabled.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`search_entry_point`
-
- |
-
-
-`google.ai.generativelanguage.SearchEntryPoint`
-
-Optional. Google search entry for the
-following-up web searches.
-
-
- |
-
-
-
-`grounding_chunks`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.GroundingChunk]`
-
-List of supporting references retrieved from
-specified grounding source.
-
- |
-
-
-
-`grounding_supports`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.GroundingSupport]`
-
-List of grounding support.
-
- |
-
-
-
-`retrieval_metadata`
-
- |
-
-
-`google.ai.generativelanguage.RetrievalMetadata`
-
-Metadata related to retrieval in the
-grounding flow.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GroundingPassage.md b/docs/api/google/generativeai/protos/GroundingPassage.md
deleted file mode 100644
index 9b0e45d17..000000000
--- a/docs/api/google/generativeai/protos/GroundingPassage.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.GroundingPassage
-
-
-
-
-
-
-
-Passage included inline with a grounding configuration.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`id`
-
- |
-
-
-`str`
-
-Identifier for the passage for attributing
-this passage in grounded answers.
-
- |
-
-
-
-`content`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Content of the passage.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GroundingPassages.md b/docs/api/google/generativeai/protos/GroundingPassages.md
deleted file mode 100644
index a3b353808..000000000
--- a/docs/api/google/generativeai/protos/GroundingPassages.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.GroundingPassages
-
-
-
-
-
-
-
-A repeated list of passages.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`passages`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.GroundingPassage]`
-
-List of passages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/GroundingSupport.md b/docs/api/google/generativeai/protos/GroundingSupport.md
deleted file mode 100644
index d3ddd823c..000000000
--- a/docs/api/google/generativeai/protos/GroundingSupport.md
+++ /dev/null
@@ -1,80 +0,0 @@
-
-# google.generativeai.protos.GroundingSupport
-
-
-
-
-
-
-
-Grounding support.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`segment`
-
- |
-
-
-`google.ai.generativelanguage.Segment`
-
-Segment of the content this support belongs
-to.
-
-
- |
-
-
-
-`grounding_chunk_indices`
-
- |
-
-
-`MutableSequence[int]`
-
-A list of indices (into 'grounding_chunk') specifying the
-citations associated with the claim. For instance [1,3,4]
-means that grounding_chunk[1], grounding_chunk[3],
-grounding_chunk[4] are the retrieved content attributed to
-the claim.
-
- |
-
-
-
-`confidence_scores`
-
- |
-
-
-`MutableSequence[float]`
-
-Confidence score of the support references. Ranges from 0 to
-1. 1 is the most confident. This list must have the same
-size as the grounding_chunk_indices.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/HarmCategory.md b/docs/api/google/generativeai/protos/HarmCategory.md
deleted file mode 100644
index cd7a72b9d..000000000
--- a/docs/api/google/generativeai/protos/HarmCategory.md
+++ /dev/null
@@ -1,897 +0,0 @@
-
-# google.generativeai.protos.HarmCategory
-
-
-
-
-
-
-
-The category of a rating.
-
-
-google.generativeai.protos.HarmCategory(
- *args, **kwds
-)
-
-
-
-
-
-
-These categories cover various kinds of harms that developers
-may wish to adjust.
-
-
-
-
-Values |
-
-
-
-
-`HARM_CATEGORY_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Category is unspecified.
-
- |
-
-
-
-`HARM_CATEGORY_DEROGATORY`
-
- |
-
-
-`1`
-
-**PaLM** - Negative or harmful comments targeting identity
-and/or protected attribute.
-
- |
-
-
-
-`HARM_CATEGORY_TOXICITY`
-
- |
-
-
-`2`
-
-**PaLM** - Content that is rude, disrespectful, or profane.
-
- |
-
-
-
-`HARM_CATEGORY_VIOLENCE`
-
- |
-
-
-`3`
-
-**PaLM** - Describes scenarios depicting violence against an
-individual or group, or general descriptions of gore.
-
- |
-
-
-
-`HARM_CATEGORY_SEXUAL`
-
- |
-
-
-`4`
-
-**PaLM** - Contains references to sexual acts or other lewd
-content.
-
- |
-
-
-
-`HARM_CATEGORY_MEDICAL`
-
- |
-
-
-`5`
-
-**PaLM** - Promotes unchecked medical advice.
-
- |
-
-
-
-`HARM_CATEGORY_DANGEROUS`
-
- |
-
-
-`6`
-
-**PaLM** - Dangerous content that promotes, facilitates, or
-encourages harmful acts.
-
- |
-
-
-
-`HARM_CATEGORY_HARASSMENT`
-
- |
-
-
-`7`
-
-**Gemini** - Harassment content.
-
- |
-
-
-
-`HARM_CATEGORY_HATE_SPEECH`
-
- |
-
-
-`8`
-
-**Gemini** - Hate speech and content.
-
- |
-
-
-
-`HARM_CATEGORY_SEXUALLY_EXPLICIT`
-
- |
-
-
-`9`
-
-**Gemini** - Sexually explicit content.
-
- |
-
-
-
-`HARM_CATEGORY_DANGEROUS_CONTENT`
-
- |
-
-
-`10`
-
-**Gemini** - Dangerous content.
-
- |
-
-
-
-`HARM_CATEGORY_CIVIC_INTEGRITY`
-
- |
-
-
-`11`
-
-**Gemini** - Content that may be used to harm civic
-integrity.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-HARM_CATEGORY_CIVIC_INTEGRITY
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_DANGEROUS
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_DANGEROUS_CONTENT
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_DEROGATORY
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_HARASSMENT
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_HATE_SPEECH
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_MEDICAL
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_SEXUAL
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_SEXUALLY_EXPLICIT
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_TOXICITY
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_VIOLENCE
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/Hyperparameters.md b/docs/api/google/generativeai/protos/Hyperparameters.md
deleted file mode 100644
index 80067de5c..000000000
--- a/docs/api/google/generativeai/protos/Hyperparameters.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-# google.generativeai.protos.Hyperparameters
-
-
-
-
-
-
-
-Hyperparameters controlling the tuning process.
-
-
- Read more at
-https://ai.google.dev/docs/model_tuning_guidance
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`learning_rate`
-
- |
-
-
-`float`
-
-Optional. Immutable. The learning rate
-hyperparameter for tuning. If not set, a default
-of 0.001 or 0.0002 will be calculated based on
-the number of training examples.
-
-This field is a member of `oneof`_ ``learning_rate_option``.
-
- |
-
-
-
-`learning_rate_multiplier`
-
- |
-
-
-`float`
-
-Optional. Immutable. The learning rate multiplier is used to
-calculate a final learning_rate based on the default
-(recommended) value. Actual learning rate :=
-learning_rate_multiplier \* default learning rate Default
-learning rate is dependent on base model and dataset size.
-If not set, a default of 1.0 will be used.
-
-This field is a member of `oneof`_ ``learning_rate_option``.
-
- |
-
-
-
-`epoch_count`
-
- |
-
-
-`int`
-
-Immutable. The number of training epochs. An
-epoch is one pass through the training data. If
-not set, a default of 5 will be used.
-
-
- |
-
-
-
-`batch_size`
-
- |
-
-
-`int`
-
-Immutable. The batch size hyperparameter for
-tuning. If not set, a default of 4 or 16 will be
-used based on the number of training examples.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListCachedContentsRequest.md b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md
deleted file mode 100644
index 72da58e3d..000000000
--- a/docs/api/google/generativeai/protos/ListCachedContentsRequest.md
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# google.generativeai.protos.ListCachedContentsRequest
-
-
-
-
-
-
-
-Request to list CachedContents.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. The maximum number of cached
-contents to return. The service may return fewer
-than this value. If unspecified, some default
-(under maximum) number of items will be
-returned. The maximum value is 1000; values
-above 1000 will be coerced to 1000.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token, received from a previous
-``ListCachedContents`` call. Provide this to retrieve the
-subsequent page.
-
-When paginating, all other parameters provided to
-``ListCachedContents`` must match the call that provided the
-page token.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListCachedContentsResponse.md b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md
deleted file mode 100644
index a55772386..000000000
--- a/docs/api/google/generativeai/protos/ListCachedContentsResponse.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.ListCachedContentsResponse
-
-
-
-
-
-
-
-Response with CachedContents list.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`cached_contents`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.CachedContent]`
-
-List of cached contents.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page. If this field is omitted, there are no subsequent
-pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListChunksRequest.md b/docs/api/google/generativeai/protos/ListChunksRequest.md
deleted file mode 100644
index db163b75f..000000000
--- a/docs/api/google/generativeai/protos/ListChunksRequest.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# google.generativeai.protos.ListChunksRequest
-
-
-
-
-
-
-
-Request for listing ``Chunk``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Required. The name of the ``Document`` containing
-``Chunk``\ s. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``Chunk``\ s to return (per
-page). The service may return fewer ``Chunk``\ s.
-
-If unspecified, at most 10 ``Chunk``\ s will be returned.
-The maximum size limit is 100 ``Chunk``\ s per page.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token, received from a previous
-``ListChunks`` call.
-
-Provide the ``next_page_token`` returned in the response as
-an argument to the next request to retrieve the next page.
-
-When paginating, all other parameters provided to
-``ListChunks`` must match the call that provided the page
-token.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListChunksResponse.md b/docs/api/google/generativeai/protos/ListChunksResponse.md
deleted file mode 100644
index 32c0cfdfc..000000000
--- a/docs/api/google/generativeai/protos/ListChunksResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.ListChunksResponse
-
-
-
-
-
-
-
-Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s.
-
-
- The ``Chunk``\ s are sorted by ascending
-``chunk.create_time``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`chunks`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Chunk]`
-
-The returned ``Chunk``\ s.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page. If this field is omitted, there are no more
-pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListCorporaRequest.md b/docs/api/google/generativeai/protos/ListCorporaRequest.md
deleted file mode 100644
index ec0eace8c..000000000
--- a/docs/api/google/generativeai/protos/ListCorporaRequest.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# google.generativeai.protos.ListCorporaRequest
-
-
-
-
-
-
-
-Request for listing ``Corpora``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``Corpora`` to return (per
-page). The service may return fewer ``Corpora``.
-
-If unspecified, at most 10 ``Corpora`` will be returned. The
-maximum size limit is 20 ``Corpora`` per page.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token, received from a previous
-``ListCorpora`` call.
-
-Provide the ``next_page_token`` returned in the response as
-an argument to the next request to retrieve the next page.
-
-When paginating, all other parameters provided to
-``ListCorpora`` must match the call that provided the page
-token.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListCorporaResponse.md b/docs/api/google/generativeai/protos/ListCorporaResponse.md
deleted file mode 100644
index d66a5abbe..000000000
--- a/docs/api/google/generativeai/protos/ListCorporaResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.ListCorporaResponse
-
-
-
-
-
-
-
-Response from ``ListCorpora`` containing a paginated list of ``Corpora``.
-
-
- The results are sorted by ascending
-``corpus.create_time``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`corpora`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Corpus]`
-
-The returned corpora.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page. If this field is omitted, there are no more
-pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListDocumentsRequest.md b/docs/api/google/generativeai/protos/ListDocumentsRequest.md
deleted file mode 100644
index ae33ad435..000000000
--- a/docs/api/google/generativeai/protos/ListDocumentsRequest.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-# google.generativeai.protos.ListDocumentsRequest
-
-
-
-
-
-
-
-Request for listing ``Document``\ s.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Required. The name of the ``Corpus`` containing
-``Document``\ s. Example: ``corpora/my-corpus-123``
-
- |
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``Document``\ s to return
-(per page). The service may return fewer ``Document``\ s.
-
-If unspecified, at most 10 ``Document``\ s will be returned.
-The maximum size limit is 20 ``Document``\ s per page.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token, received from a previous
-``ListDocuments`` call.
-
-Provide the ``next_page_token`` returned in the response as
-an argument to the next request to retrieve the next page.
-
-When paginating, all other parameters provided to
-``ListDocuments`` must match the call that provided the page
-token.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListDocumentsResponse.md b/docs/api/google/generativeai/protos/ListDocumentsResponse.md
deleted file mode 100644
index 3c98c4992..000000000
--- a/docs/api/google/generativeai/protos/ListDocumentsResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.ListDocumentsResponse
-
-
-
-
-
-
-
-Response from ``ListDocuments`` containing a paginated list of ``Document``\ s.
-
-
- The ``Document``\ s are sorted by ascending
-``document.create_time``.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`documents`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Document]`
-
-The returned ``Document``\ s.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page. If this field is omitted, there are no more
-pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListFilesRequest.md b/docs/api/google/generativeai/protos/ListFilesRequest.md
deleted file mode 100644
index 31874b586..000000000
--- a/docs/api/google/generativeai/protos/ListFilesRequest.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.ListFilesRequest
-
-
-
-
-
-
-
-Request for ``ListFiles``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. Maximum number of ``File``\ s to return per page.
-If unspecified, defaults to 10. Maximum ``page_size`` is
-100.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token from a previous ``ListFiles`` call.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListFilesResponse.md b/docs/api/google/generativeai/protos/ListFilesResponse.md
deleted file mode 100644
index 92b6fc1f4..000000000
--- a/docs/api/google/generativeai/protos/ListFilesResponse.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.ListFilesResponse
-
-
-
-
-
-
-
-Response for ``ListFiles``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`files`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.File]`
-
-The list of ``File``\ s.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token that can be sent as a ``page_token`` into a
-subsequent ``ListFiles`` call.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListModelsRequest.md b/docs/api/google/generativeai/protos/ListModelsRequest.md
deleted file mode 100644
index 25bcaa195..000000000
--- a/docs/api/google/generativeai/protos/ListModelsRequest.md
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# google.generativeai.protos.ListModelsRequest
-
-
-
-
-
-
-
-Request for listing all Models.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-The maximum number of ``Models`` to return (per page).
-
-If unspecified, 50 models will be returned per page. This
-method returns at most 1000 models per page, even if you
-pass a larger page_size.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-A page token, received from a previous ``ListModels`` call.
-
-Provide the ``page_token`` returned by one request as an
-argument to the next request to retrieve the next page.
-
-When paginating, all other parameters provided to
-``ListModels`` must match the call that provided the page
-token.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListModelsResponse.md b/docs/api/google/generativeai/protos/ListModelsResponse.md
deleted file mode 100644
index c85950641..000000000
--- a/docs/api/google/generativeai/protos/ListModelsResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.ListModelsResponse
-
-
-
-
-
-
-
-Response from ``ListModel`` containing a paginated list of Models.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`models`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Model]`
-
-The returned Models.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page.
-
-If this field is omitted, there are no more pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListPermissionsRequest.md b/docs/api/google/generativeai/protos/ListPermissionsRequest.md
deleted file mode 100644
index 053d9495b..000000000
--- a/docs/api/google/generativeai/protos/ListPermissionsRequest.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# google.generativeai.protos.ListPermissionsRequest
-
-
-
-
-
-
-
-Request for listing permissions.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-`str`
-
-Required. The parent resource of the permissions. Formats:
-``tunedModels/{tuned_model}`` ``corpora/{corpus}``
-
- |
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``Permission``\ s to return
-(per page). The service may return fewer permissions.
-
-If unspecified, at most 10 permissions will be returned.
-This method returns at most 1000 permissions per page, even
-if you pass larger page_size.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token, received from a previous
-``ListPermissions`` call.
-
-Provide the ``page_token`` returned by one request as an
-argument to the next request to retrieve the next page.
-
-When paginating, all other parameters provided to
-``ListPermissions`` must match the call that provided the
-page token.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListPermissionsResponse.md b/docs/api/google/generativeai/protos/ListPermissionsResponse.md
deleted file mode 100644
index 8fab74f1c..000000000
--- a/docs/api/google/generativeai/protos/ListPermissionsResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.ListPermissionsResponse
-
-
-
-
-
-
-
-Response from ``ListPermissions`` containing a paginated list of permissions.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`permissions`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Permission]`
-
-Returned permissions.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page.
-
-If this field is omitted, there are no more pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListTunedModelsRequest.md b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md
deleted file mode 100644
index 69f5f5590..000000000
--- a/docs/api/google/generativeai/protos/ListTunedModelsRequest.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-# google.generativeai.protos.ListTunedModelsRequest
-
-
-
-
-
-
-
-Request for listing TunedModels.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`page_size`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``TunedModels`` to return
-(per page). The service may return fewer tuned models.
-
-If unspecified, at most 10 tuned models will be returned.
-This method returns at most 1000 models per page, even if
-you pass a larger page_size.
-
- |
-
-
-
-`page_token`
-
- |
-
-
-`str`
-
-Optional. A page token, received from a previous
-``ListTunedModels`` call.
-
-Provide the ``page_token`` returned by one request as an
-argument to the next request to retrieve the next page.
-
-When paginating, all other parameters provided to
-``ListTunedModels`` must match the call that provided the
-page token.
-
- |
-
-
-
-`filter`
-
- |
-
-
-`str`
-
-Optional. A filter is a full text search over
-the tuned model's description and display name.
-By default, results will not include tuned
-models shared with everyone.
-
-Additional operators:
-
- - owner:me
- - writers:me
- - readers:me
- - readers:everyone
-
-Examples:
-
- "owner:me" returns all tuned models to which
-caller has owner role "readers:me" returns all
-tuned models to which caller has reader role
-"readers:everyone" returns all tuned models that
-are shared with everyone
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ListTunedModelsResponse.md b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md
deleted file mode 100644
index 0487606bd..000000000
--- a/docs/api/google/generativeai/protos/ListTunedModelsResponse.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.protos.ListTunedModelsResponse
-
-
-
-
-
-
-
-Response from ``ListTunedModels`` containing a paginated list of Models.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`tuned_models`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.TunedModel]`
-
-The returned Models.
-
- |
-
-
-
-`next_page_token`
-
- |
-
-
-`str`
-
-A token, which can be sent as ``page_token`` to retrieve the
-next page.
-
-If this field is omitted, there are no more pages.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/LogprobsResult.md b/docs/api/google/generativeai/protos/LogprobsResult.md
deleted file mode 100644
index 59ec1949a..000000000
--- a/docs/api/google/generativeai/protos/LogprobsResult.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# google.generativeai.protos.LogprobsResult
-
-
-
-
-
-
-
-Logprobs Result
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`top_candidates`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.LogprobsResult.TopCandidates]`
-
-Length = total number of decoding steps.
-
- |
-
-
-
-`chosen_candidates`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.LogprobsResult.Candidate]`
-
-Length = total number of decoding steps. The chosen
-candidates may or may not be in top_candidates.
-
- |
-
-
-
-
-
-## Child Classes
-[`class Candidate`](../../../google/generativeai/protos/LogprobsResult/Candidate.md)
-
-[`class TopCandidates`](../../../google/generativeai/protos/LogprobsResult/TopCandidates.md)
-
diff --git a/docs/api/google/generativeai/protos/LogprobsResult/Candidate.md b/docs/api/google/generativeai/protos/LogprobsResult/Candidate.md
deleted file mode 100644
index 59d88d3b1..000000000
--- a/docs/api/google/generativeai/protos/LogprobsResult/Candidate.md
+++ /dev/null
@@ -1,75 +0,0 @@
-
-# google.generativeai.protos.LogprobsResult.Candidate
-
-
-
-
-
-
-
-Candidate for the logprobs token and score.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`token`
-
- |
-
-
-`str`
-
-The candidate’s token string value.
-
-
- |
-
-
-
-`token_id`
-
- |
-
-
-`int`
-
-The candidate’s token id value.
-
-
- |
-
-
-
-`log_probability`
-
- |
-
-
-`float`
-
-The candidate's log probability.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md b/docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md
deleted file mode 100644
index 9a8d5bba2..000000000
--- a/docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-# google.generativeai.protos.LogprobsResult.TopCandidates
-
-
-
-
-
-
-
-Candidates with top log probabilities at each decoding step.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidates`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.LogprobsResult.Candidate]`
-
-Sorted by log probability in descending
-order.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Message.md b/docs/api/google/generativeai/protos/Message.md
deleted file mode 100644
index 5f0dd52c1..000000000
--- a/docs/api/google/generativeai/protos/Message.md
+++ /dev/null
@@ -1,92 +0,0 @@
-
-# google.generativeai.protos.Message
-
-
-
-
-
-
-
-The base unit of structured text.
-
-
-
-A ``Message`` includes an ``author`` and the ``content`` of the
-``Message``.
-
-The ``author`` is used to tag messages when they are fed to the
-model as text.
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`author`
-
- |
-
-
-`str`
-
-Optional. The author of this Message.
-
-This serves as a key for tagging
-the content of this Message when it is fed to
-the model as text.
-
-The author can be any alphanumeric string.
-
- |
-
-
-
-`content`
-
- |
-
-
-`str`
-
-Required. The text content of the structured ``Message``.
-
- |
-
-
-
-`citation_metadata`
-
- |
-
-
-`google.ai.generativelanguage.CitationMetadata`
-
-Output only. Citation information for model-generated
-``content`` in this ``Message``.
-
-If this ``Message`` was generated as output from the model,
-this field may be populated with attribution information for
-any text included in the ``content``. This field is used
-only on output.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/MessagePrompt.md b/docs/api/google/generativeai/protos/MessagePrompt.md
deleted file mode 100644
index 6da9cfd45..000000000
--- a/docs/api/google/generativeai/protos/MessagePrompt.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-# google.generativeai.protos.MessagePrompt
-
-
-
-
-
-
-
-All of the structured input text passed to the model as a prompt.
-
-
-
-A ``MessagePrompt`` contains a structured set of fields that provide
-context for the conversation, examples of user input/model output
-message pairs that prime the model to respond in different ways, and
-the conversation history or list of messages representing the
-alternating turns of the conversation between the user and the
-model.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`context`
-
- |
-
-
-`str`
-
-Optional. Text that should be provided to the model first to
-ground the response.
-
-If not empty, this ``context`` will be given to the model
-first before the ``examples`` and ``messages``. When using a
-``context`` be sure to provide it with every request to
-maintain continuity.
-
-This field can be a description of your prompt to the model
-to help provide context and guide the responses. Examples:
-"Translate the phrase from English to French." or "Given a
-statement, classify the sentiment as happy, sad or neutral."
-
-Anything included in this field will take precedence over
-message history if the total input size exceeds the model's
-``input_token_limit`` and the input request is truncated.
-
- |
-
-
-
-`examples`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Example]`
-
-Optional. Examples of what the model should generate.
-
-This includes both user input and the response that the
-model should emulate.
-
-These ``examples`` are treated identically to conversation
-messages except that they take precedence over the history
-in ``messages``: If the total input size exceeds the model's
-``input_token_limit`` the input will be truncated. Items
-will be dropped from ``messages`` before ``examples``.
-
- |
-
-
-
-`messages`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Message]`
-
-Required. A snapshot of the recent conversation history
-sorted chronologically.
-
-Turns alternate between two authors.
-
-If the total input size exceeds the model's
-``input_token_limit`` the input will be truncated: The
-oldest items will be dropped from ``messages``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/MetadataFilter.md b/docs/api/google/generativeai/protos/MetadataFilter.md
deleted file mode 100644
index f5c33833b..000000000
--- a/docs/api/google/generativeai/protos/MetadataFilter.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# google.generativeai.protos.MetadataFilter
-
-
-
-
-
-
-
-User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values.
-
-
- Example (genre = drama OR genre
-= action): key = "document.custom_metadata.genre" conditions =
-[{string_value = "drama", operation = EQUAL}, {string_value =
-"action", operation = EQUAL}]
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`key`
-
- |
-
-
-`str`
-
-Required. The key of the metadata to filter
-on.
-
- |
-
-
-
-`conditions`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.Condition]`
-
-Required. The ``Condition``\ s for the given key that will
-trigger this filter. Multiple ``Condition``\ s are joined by
-logical ORs.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Model.md b/docs/api/google/generativeai/protos/Model.md
deleted file mode 100644
index dbc0d1c79..000000000
--- a/docs/api/google/generativeai/protos/Model.md
+++ /dev/null
@@ -1,240 +0,0 @@
-
-# google.generativeai.protos.Model
-
-
-
-
-
-
-
-Information about a Generative Language Model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the ``Model``. Refer to
-`Model
-variants `__
-for all allowed values.
-
-Format: ``models/{model}`` with a ``{model}`` naming
-convention of:
-
-- "{base_model_id}-{version}"
-
-Examples:
-
-- ``models/gemini-1.5-flash-001``
-
- |
-
-
-
-`base_model_id`
-
- |
-
-
-`str`
-
-Required. The name of the base model, pass this to the
-generation request.
-
-Examples:
-
-- ``gemini-1.5-flash``
-
- |
-
-
-
-`version`
-
- |
-
-
-`str`
-
-Required. The version number of the model.
-
-This represents the major version (``1.0`` or ``1.5``)
-
- |
-
-
-
-`display_name`
-
- |
-
-
-`str`
-
-The human-readable name of the model. E.g.
-"Gemini 1.5 Flash".
-The name can be up to 128 characters long and
-can consist of any UTF-8 characters.
-
- |
-
-
-
-`description`
-
- |
-
-
-`str`
-
-A short description of the model.
-
- |
-
-
-
-`input_token_limit`
-
- |
-
-
-`int`
-
-Maximum number of input tokens allowed for
-this model.
-
- |
-
-
-
-`output_token_limit`
-
- |
-
-
-`int`
-
-Maximum number of output tokens available for
-this model.
-
- |
-
-
-
-`supported_generation_methods`
-
- |
-
-
-`MutableSequence[str]`
-
-The model's supported generation methods.
-
-The corresponding API method names are defined as Pascal
-case strings, such as ``generateMessage`` and
-``generateContent``.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-`float`
-
-Controls the randomness of the output.
-
-Values can range over ``[0.0,max_temperature]``, inclusive.
-A higher value will produce responses that are more varied,
-while a value closer to ``0.0`` will typically result in
-less surprising responses from the model. This value
-specifies default to be used by the backend while making the
-call to the model.
-
-
- |
-
-
-
-`max_temperature`
-
- |
-
-
-`float`
-
-The maximum temperature this model can use.
-
-
- |
-
-
-
-`top_p`
-
- |
-
-
-`float`
-
-For `Nucleus
-sampling `__.
-
-Nucleus sampling considers the smallest set of tokens whose
-probability sum is at least ``top_p``. This value specifies
-default to be used by the backend while making the call to
-the model.
-
-
- |
-
-
-
-`top_k`
-
- |
-
-
-`int`
-
-For Top-k sampling.
-
-Top-k sampling considers the set of ``top_k`` most probable
-tokens. This value specifies default to be used by the
-backend while making the call to the model. If empty,
-indicates the model doesn't use top-k sampling, and
-``top_k`` isn't allowed as a generation parameter.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Part.md b/docs/api/google/generativeai/protos/Part.md
deleted file mode 100644
index 53497f688..000000000
--- a/docs/api/google/generativeai/protos/Part.md
+++ /dev/null
@@ -1,158 +0,0 @@
-
-# google.generativeai.protos.Part
-
-
-
-
-
-
-
-A datatype containing media that is part of a multi-part ``Content`` message.
-
-
-
-A ``Part`` consists of data which has an associated datatype. A
-``Part`` can only contain one of the accepted types in
-``Part.data``.
-
-A ``Part`` must have a fixed IANA MIME type identifying the type and
-subtype of the media if the ``inline_data`` field is filled with raw
-bytes.
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`text`
-
- |
-
-
-`str`
-
-Inline text.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-`inline_data`
-
- |
-
-
-`google.ai.generativelanguage.Blob`
-
-Inline media bytes.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-`function_call`
-
- |
-
-
-`google.ai.generativelanguage.FunctionCall`
-
-A predicted ``FunctionCall`` returned from the model that
-contains a string representing the
-FunctionDeclaration.name with the arguments and their
-values.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-`function_response`
-
- |
-
-
-`google.ai.generativelanguage.FunctionResponse`
-
-The result output of a ``FunctionCall`` that contains a
-string representing the FunctionDeclaration.name and a
-structured JSON object containing any output from the
-function is used as context to the model.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-`file_data`
-
- |
-
-
-`google.ai.generativelanguage.FileData`
-
-URI based data.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-`executable_code`
-
- |
-
-
-`google.ai.generativelanguage.ExecutableCode`
-
-Code generated by the model that is meant to
-be executed.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-`code_execution_result`
-
- |
-
-
-`google.ai.generativelanguage.CodeExecutionResult`
-
-Result of executing the ``ExecutableCode``.
-
-This field is a member of `oneof`_ ``data``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Permission.md b/docs/api/google/generativeai/protos/Permission.md
deleted file mode 100644
index 18cfc35bb..000000000
--- a/docs/api/google/generativeai/protos/Permission.md
+++ /dev/null
@@ -1,118 +0,0 @@
-
-# google.generativeai.protos.Permission
-
-
-
-
-
-
-
-Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
-
-
- a tuned model,
-corpus).
-
-A role is a collection of permitted operations that allows users
-to perform specific actions on PaLM API resources. To make them
-available to users, groups, or service accounts, you assign
-roles. When you assign a role, you grant permissions that the
-role contains.
-
-There are three concentric roles. Each role is a superset of the
-previous role's permitted operations:
-
-- reader can use the resource (e.g. tuned model, corpus) for
- inference
-- writer has reader's permissions and additionally can edit and
- share
-- owner has writer's permissions and additionally can delete
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Output only. Identifier. The permission name. A unique name
-will be generated on create. Examples:
-tunedModels/{tuned_model}/permissions/{permission}
-corpora/{corpus}/permissions/{permission} Output only.
-
- |
-
-
-
-`grantee_type`
-
- |
-
-
-`google.ai.generativelanguage.Permission.GranteeType`
-
-Optional. Immutable. The type of the grantee.
-
-
- |
-
-
-
-`email_address`
-
- |
-
-
-`str`
-
-Optional. Immutable. The email address of the
-user of group which this permission refers.
-Field is not set when permission's grantee type
-is EVERYONE.
-
-
- |
-
-
-
-`role`
-
- |
-
-
-`google.ai.generativelanguage.Permission.Role`
-
-Required. The role granted by this
-permission.
-
-
- |
-
-
-
-
-
-## Child Classes
-[`class GranteeType`](../../../google/generativeai/protos/Permission/GranteeType.md)
-
-[`class Role`](../../../google/generativeai/protos/Permission/Role.md)
-
diff --git a/docs/api/google/generativeai/protos/Permission/GranteeType.md b/docs/api/google/generativeai/protos/Permission/GranteeType.md
deleted file mode 100644
index 807fcc4dd..000000000
--- a/docs/api/google/generativeai/protos/Permission/GranteeType.md
+++ /dev/null
@@ -1,701 +0,0 @@
-
-# google.generativeai.protos.Permission.GranteeType
-
-
-
-
-
-
-
-Defines types of the grantee of this permission.
-
-
-google.generativeai.protos.Permission.GranteeType(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`GRANTEE_TYPE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-The default value. This value is unused.
-
- |
-
-
-
-`USER`
-
- |
-
-
-`1`
-
-Represents a user. When set, you must provide email_address
-for the user.
-
- |
-
-
-
-`GROUP`
-
- |
-
-
-`2`
-
-Represents a group. When set, you must provide email_address
-for the group.
-
- |
-
-
-
-`EVERYONE`
-
- |
-
-
-`3`
-
-Represents access to everyone. No extra
-information is required.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-EVERYONE
-
- |
-
-
-``
-
- |
-
-
-
-GRANTEE_TYPE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-GROUP
-
- |
-
-
-``
-
- |
-
-
-
-USER
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/Permission/Role.md b/docs/api/google/generativeai/protos/Permission/Role.md
deleted file mode 100644
index 12f34993e..000000000
--- a/docs/api/google/generativeai/protos/Permission/Role.md
+++ /dev/null
@@ -1,700 +0,0 @@
-
-# google.generativeai.protos.Permission.Role
-
-
-
-
-
-
-
-Defines the role granted by this permission.
-
-
-google.generativeai.protos.Permission.Role(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`ROLE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-The default value. This value is unused.
-
- |
-
-
-
-`OWNER`
-
- |
-
-
-`1`
-
-Owner can use, update, share and delete the
-resource.
-
- |
-
-
-
-`WRITER`
-
- |
-
-
-`2`
-
-Writer can use, update and share the
-resource.
-
- |
-
-
-
-`READER`
-
- |
-
-
-`3`
-
-Reader can use the resource.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-OWNER
-
- |
-
-
-``
-
- |
-
-
-
-READER
-
- |
-
-
-``
-
- |
-
-
-
-ROLE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-WRITER
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/PredictRequest.md b/docs/api/google/generativeai/protos/PredictRequest.md
deleted file mode 100644
index c28fdcaeb..000000000
--- a/docs/api/google/generativeai/protos/PredictRequest.md
+++ /dev/null
@@ -1,75 +0,0 @@
-
-# google.generativeai.protos.PredictRequest
-
-
-
-
-
-
-
-Request message for [PredictionService.Predict][google.ai.generativelanguage.v1beta.PredictionService.Predict].
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`model`
-
- |
-
-
-`str`
-
-Required. The name of the model for prediction. Format:
-``name=models/{model}``.
-
- |
-
-
-
-`instances`
-
- |
-
-
-`MutableSequence[google.protobuf.struct_pb2.Value]`
-
-Required. The instances that are the input to
-the prediction call.
-
- |
-
-
-
-`parameters`
-
- |
-
-
-`google.protobuf.struct_pb2.Value`
-
-Optional. The parameters that govern the
-prediction call.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/PredictResponse.md b/docs/api/google/generativeai/protos/PredictResponse.md
deleted file mode 100644
index 71bc979a3..000000000
--- a/docs/api/google/generativeai/protos/PredictResponse.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.PredictResponse
-
-
-
-
-
-
-
-Response message for [PredictionService.Predict].
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`predictions`
-
- |
-
-
-`MutableSequence[google.protobuf.struct_pb2.Value]`
-
-The outputs of the prediction call.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/QueryCorpusRequest.md b/docs/api/google/generativeai/protos/QueryCorpusRequest.md
deleted file mode 100644
index c7e328a35..000000000
--- a/docs/api/google/generativeai/protos/QueryCorpusRequest.md
+++ /dev/null
@@ -1,119 +0,0 @@
-
-# google.generativeai.protos.QueryCorpusRequest
-
-
-
-
-
-
-
-Request for querying a ``Corpus``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``Corpus`` to query. Example:
-``corpora/my-corpus-123``
-
- |
-
-
-
-`query`
-
- |
-
-
-`str`
-
-Required. Query string to perform semantic
-search.
-
- |
-
-
-
-`metadata_filters`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
-
-Optional. Filter for ``Chunk`` and ``Document`` metadata.
-Each ``MetadataFilter`` object should correspond to a unique
-key. Multiple ``MetadataFilter`` objects are joined by
-logical "AND"s.
-
-Example query at document level: (year >= 2020 OR year <
-2010) AND (genre = drama OR genre = action)
-
-``MetadataFilter`` object list: metadata_filters = [ {key =
-"document.custom_metadata.year" conditions = [{int_value =
-2020, operation = GREATER_EQUAL}, {int_value = 2010,
-operation = LESS}]}, {key = "document.custom_metadata.year"
-conditions = [{int_value = 2020, operation = GREATER_EQUAL},
-{int_value = 2010, operation = LESS}]}, {key =
-"document.custom_metadata.genre" conditions = [{string_value
-= "drama", operation = EQUAL}, {string_value = "action",
-operation = EQUAL}]}]
-
-Example query at chunk level for a numeric range of values:
-(year > 2015 AND year <= 2020)
-
-``MetadataFilter`` object list: metadata_filters = [ {key =
-"chunk.custom_metadata.year" conditions = [{int_value =
-2015, operation = GREATER}]}, {key =
-"chunk.custom_metadata.year" conditions = [{int_value =
-2020, operation = LESS_EQUAL}]}]
-
-Note: "AND"s for the same key are only supported for numeric
-values. String values only support "OR"s for the same key.
-
- |
-
-
-
-`results_count`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``Chunk``\ s to return. The
-service may return fewer ``Chunk``\ s.
-
-If unspecified, at most 10 ``Chunk``\ s will be returned.
-The maximum specified result count is 100.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/QueryCorpusResponse.md b/docs/api/google/generativeai/protos/QueryCorpusResponse.md
deleted file mode 100644
index e2b73d063..000000000
--- a/docs/api/google/generativeai/protos/QueryCorpusResponse.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.QueryCorpusResponse
-
-
-
-
-
-
-
-Response from ``QueryCorpus`` containing a list of relevant chunks.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`relevant_chunks`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.RelevantChunk]`
-
-The relevant chunks.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/QueryDocumentRequest.md b/docs/api/google/generativeai/protos/QueryDocumentRequest.md
deleted file mode 100644
index 55fd996f3..000000000
--- a/docs/api/google/generativeai/protos/QueryDocumentRequest.md
+++ /dev/null
@@ -1,119 +0,0 @@
-
-# google.generativeai.protos.QueryDocumentRequest
-
-
-
-
-
-
-
-Request for querying a ``Document``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The name of the ``Document`` to query. Example:
-``corpora/my-corpus-123/documents/the-doc-abc``
-
- |
-
-
-
-`query`
-
- |
-
-
-`str`
-
-Required. Query string to perform semantic
-search.
-
- |
-
-
-
-`results_count`
-
- |
-
-
-`int`
-
-Optional. The maximum number of ``Chunk``\ s to return. The
-service may return fewer ``Chunk``\ s.
-
-If unspecified, at most 10 ``Chunk``\ s will be returned.
-The maximum specified result count is 100.
-
- |
-
-
-
-`metadata_filters`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
-
-Optional. Filter for ``Chunk`` metadata. Each
-``MetadataFilter`` object should correspond to a unique key.
-Multiple ``MetadataFilter`` objects are joined by logical
-"AND"s.
-
-Note: ``Document``-level filtering is not supported for this
-request because a ``Document`` name is already specified.
-
-Example query: (year >= 2020 OR year < 2010) AND (genre =
-drama OR genre = action)
-
-``MetadataFilter`` object list: metadata_filters = [ {key =
-"chunk.custom_metadata.year" conditions = [{int_value =
-2020, operation = GREATER_EQUAL}, {int_value = 2010,
-operation = LESS}}, {key = "chunk.custom_metadata.genre"
-conditions = [{string_value = "drama", operation = EQUAL},
-{string_value = "action", operation = EQUAL}}]
-
-Example query for a numeric range of values: (year > 2015
-AND year <= 2020)
-
-``MetadataFilter`` object list: metadata_filters = [ {key =
-"chunk.custom_metadata.year" conditions = [{int_value =
-2015, operation = GREATER}]}, {key =
-"chunk.custom_metadata.year" conditions = [{int_value =
-2020, operation = LESS_EQUAL}]}]
-
-Note: "AND"s for the same key are only supported for numeric
-values. String values only support "OR"s for the same key.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/QueryDocumentResponse.md b/docs/api/google/generativeai/protos/QueryDocumentResponse.md
deleted file mode 100644
index cb41e8bb4..000000000
--- a/docs/api/google/generativeai/protos/QueryDocumentResponse.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.QueryDocumentResponse
-
-
-
-
-
-
-
-Response from ``QueryDocument`` containing a list of relevant chunks.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`relevant_chunks`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.RelevantChunk]`
-
-The returned relevant chunks.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/RelevantChunk.md b/docs/api/google/generativeai/protos/RelevantChunk.md
deleted file mode 100644
index ab4a05ed1..000000000
--- a/docs/api/google/generativeai/protos/RelevantChunk.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-# google.generativeai.protos.RelevantChunk
-
-
-
-
-
-
-
-The information for a chunk relevant to a query.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`chunk_relevance_score`
-
- |
-
-
-`float`
-
-``Chunk`` relevance to the query.
-
- |
-
-
-
-`chunk`
-
- |
-
-
-`google.ai.generativelanguage.Chunk`
-
-``Chunk`` associated with the query.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/RetrievalMetadata.md b/docs/api/google/generativeai/protos/RetrievalMetadata.md
deleted file mode 100644
index eec4e6906..000000000
--- a/docs/api/google/generativeai/protos/RetrievalMetadata.md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-# google.generativeai.protos.RetrievalMetadata
-
-
-
-
-
-
-
-Metadata related to retrieval in the grounding flow.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`google_search_dynamic_retrieval_score`
-
- |
-
-
-`float`
-
-Optional. Score indicating how likely information from
-google search could help answer the prompt. The score is in
-the range [0, 1], where 0 is the least likely and 1 is the
-most likely. This score is only populated when google search
-grounding and dynamic retrieval is enabled. It will be
-compared to the threshold to determine whether to trigger
-google search.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/SafetyFeedback.md b/docs/api/google/generativeai/protos/SafetyFeedback.md
deleted file mode 100644
index d0f1c766a..000000000
--- a/docs/api/google/generativeai/protos/SafetyFeedback.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# google.generativeai.protos.SafetyFeedback
-
-
-
-
-
-
-
-Safety feedback for an entire request.
-
-
-
-This field is populated if content in the input and/or response
-is blocked due to safety settings. SafetyFeedback may not exist
-for every HarmCategory. Each SafetyFeedback will return the
-safety settings used by the request as well as the lowest
-HarmProbability that should be allowed in order to return a
-result.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`rating`
-
- |
-
-
-`google.ai.generativelanguage.SafetyRating`
-
-Safety rating evaluated from content.
-
- |
-
-
-
-`setting`
-
- |
-
-
-`google.ai.generativelanguage.SafetySetting`
-
-Safety settings applied to the request.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/SafetyRating.md b/docs/api/google/generativeai/protos/SafetyRating.md
deleted file mode 100644
index 6dd3fa3a6..000000000
--- a/docs/api/google/generativeai/protos/SafetyRating.md
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# google.generativeai.protos.SafetyRating
-
-
-
-
-
-
-
-Safety rating for a piece of content.
-
-
-
-The safety rating contains the category of harm and the harm
-probability level in that category for a piece of content.
-Content is classified for safety across a number of harm
-categories and the probability of the harm classification is
-included here.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`category`
-
- |
-
-
-`google.ai.generativelanguage.HarmCategory`
-
-Required. The category for this rating.
-
- |
-
-
-
-`probability`
-
- |
-
-
-`google.ai.generativelanguage.SafetyRating.HarmProbability`
-
-Required. The probability of harm for this
-content.
-
- |
-
-
-
-`blocked`
-
- |
-
-
-`bool`
-
-Was this content blocked because of this
-rating?
-
- |
-
-
-
-
-
-## Child Classes
-[`class HarmProbability`](../../../google/generativeai/types/HarmProbability.md)
-
diff --git a/docs/api/google/generativeai/protos/SafetySetting.md b/docs/api/google/generativeai/protos/SafetySetting.md
deleted file mode 100644
index 3a21e77d5..000000000
--- a/docs/api/google/generativeai/protos/SafetySetting.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# google.generativeai.protos.SafetySetting
-
-
-
-
-
-
-
-Safety setting, affecting the safety-blocking behavior.
-
-
-
-Passing a safety setting for a category changes the allowed
-probability that content is blocked.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`category`
-
- |
-
-
-`google.ai.generativelanguage.HarmCategory`
-
-Required. The category for this setting.
-
- |
-
-
-
-`threshold`
-
- |
-
-
-`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold`
-
-Required. Controls the probability threshold
-at which harm is blocked.
-
- |
-
-
-
-
-
-## Child Classes
-[`class HarmBlockThreshold`](../../../google/generativeai/types/HarmBlockThreshold.md)
-
diff --git a/docs/api/google/generativeai/protos/Schema.md b/docs/api/google/generativeai/protos/Schema.md
deleted file mode 100644
index 732e9cd43..000000000
--- a/docs/api/google/generativeai/protos/Schema.md
+++ /dev/null
@@ -1,186 +0,0 @@
-
-# google.generativeai.protos.Schema
-
-
-
-
-
-
-
-The ``Schema`` object allows the definition of input and output data types.
-
-
- These types can be objects, but also primitives and arrays.
-Represents a select subset of an `OpenAPI 3.0 schema
-object `__.
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`type_`
-
- |
-
-
-`google.ai.generativelanguage.Type`
-
-Required. Data type.
-
- |
-
-
-
-`format_`
-
- |
-
-
-`str`
-
-Optional. The format of the data. This is
-used only for primitive datatypes. Supported
-formats:
-
- for NUMBER type: float, double
- for INTEGER type: int32, int64
- for STRING type: enum
-
- |
-
-
-
-`description`
-
- |
-
-
-`str`
-
-Optional. A brief description of the
-parameter. This could contain examples of use.
-Parameter description may be formatted as
-Markdown.
-
- |
-
-
-
-`nullable`
-
- |
-
-
-`bool`
-
-Optional. Indicates if the value may be null.
-
- |
-
-
-
-`enum`
-
- |
-
-
-`MutableSequence[str]`
-
-Optional. Possible values of the element of Type.STRING with
-enum format. For example we can define an Enum Direction as
-: {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH",
-"WEST"]}
-
- |
-
-
-
-`items`
-
- |
-
-
-`google.ai.generativelanguage.Schema`
-
-Optional. Schema of the elements of
-Type.ARRAY.
-
-
- |
-
-
-
-`max_items`
-
- |
-
-
-`int`
-
-Optional. Maximum number of the elements for
-Type.ARRAY.
-
- |
-
-
-
-`min_items`
-
- |
-
-
-`int`
-
-Optional. Minimum number of the elements for
-Type.ARRAY.
-
- |
-
-
-
-`properties`
-
- |
-
-
-`MutableMapping[str, google.ai.generativelanguage.Schema]`
-
-Optional. Properties of Type.OBJECT.
-
- |
-
-
-
-`required`
-
- |
-
-
-`MutableSequence[str]`
-
-Optional. Required properties of Type.OBJECT.
-
- |
-
-
-
-
-
-## Child Classes
-[`class PropertiesEntry`](../../../google/generativeai/protos/Schema/PropertiesEntry.md)
-
diff --git a/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
deleted file mode 100644
index cf7b2299d..000000000
--- a/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-# google.generativeai.protos.Schema.PropertiesEntry
-
-
-
-
-
-
-
-The abstract base class for a message.
-
-
-
-
-
-
-
-Args |
-
-
-
-mapping (Union[dict, ~.Message]): A dictionary or message to be
-used to determine the values for this message.
-
- |
-
-
-
-
-`ignore_unknown_fields`
-
- |
-
-
-`Optional(bool`
-
-If True, do not raise errors for
- unknown fields. Only applied if `mapping` is a mapping type or there
- are keyword parameters.
-
- |
-
-
-
-`kwargs`
-
- |
-
-
-`dict`
-
-Keys and values corresponding to the fields of the
- message.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`key`
-
- |
-
-
-`string key`
-
- |
-
-
-
-`value`
-
- |
-
-
-`Schema value`
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/SearchEntryPoint.md b/docs/api/google/generativeai/protos/SearchEntryPoint.md
deleted file mode 100644
index 4f7799326..000000000
--- a/docs/api/google/generativeai/protos/SearchEntryPoint.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.SearchEntryPoint
-
-
-
-
-
-
-
-Google search entry point.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`rendered_content`
-
- |
-
-
-`str`
-
-Optional. Web content snippet that can be
-embedded in a web page or an app webview.
-
- |
-
-
-
-`sdk_blob`
-
- |
-
-
-`bytes`
-
-Optional. Base64 encoded JSON representing
-array of tuple.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Segment.md b/docs/api/google/generativeai/protos/Segment.md
deleted file mode 100644
index aaeb2240b..000000000
--- a/docs/api/google/generativeai/protos/Segment.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-# google.generativeai.protos.Segment
-
-
-
-
-
-
-
-Segment of the content.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`part_index`
-
- |
-
-
-`int`
-
-Output only. The index of a Part object
-within its parent Content object.
-
- |
-
-
-
-`start_index`
-
- |
-
-
-`int`
-
-Output only. Start index in the given Part,
-measured in bytes. Offset from the start of the
-Part, inclusive, starting at zero.
-
- |
-
-
-
-`end_index`
-
- |
-
-
-`int`
-
-Output only. End index in the given Part,
-measured in bytes. Offset from the start of the
-Part, exclusive, starting at zero.
-
- |
-
-
-
-`text`
-
- |
-
-
-`str`
-
-Output only. The text corresponding to the
-segment from the response.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
deleted file mode 100644
index d565a17a9..000000000
--- a/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-# google.generativeai.protos.SemanticRetrieverConfig
-
-
-
-
-
-
-
-Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API.
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`source`
-
- |
-
-
-`str`
-
-Required. Name of the resource for retrieval. Example:
-``corpora/123`` or ``corpora/123/documents/abc``.
-
- |
-
-
-
-`query`
-
- |
-
-
-`google.ai.generativelanguage.Content`
-
-Required. Query to use for matching ``Chunk``\ s in the
-given resource by similarity.
-
- |
-
-
-
-`metadata_filters`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
-
-Optional. Filters for selecting ``Document``\ s and/or
-``Chunk``\ s from the resource.
-
- |
-
-
-
-`max_chunks_count`
-
- |
-
-
-`int`
-
-Optional. Maximum number of relevant ``Chunk``\ s to
-retrieve.
-
-
- |
-
-
-
-`minimum_relevance_score`
-
- |
-
-
-`float`
-
-Optional. Minimum relevance score for retrieved relevant
-``Chunk``\ s.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/StringList.md b/docs/api/google/generativeai/protos/StringList.md
deleted file mode 100644
index 0a202da5d..000000000
--- a/docs/api/google/generativeai/protos/StringList.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.StringList
-
-
-
-
-
-
-
-User provided string values assigned to a single metadata key.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`values`
-
- |
-
-
-`MutableSequence[str]`
-
-The string values of the metadata to store.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TaskType.md b/docs/api/google/generativeai/protos/TaskType.md
deleted file mode 100644
index d01468a1f..000000000
--- a/docs/api/google/generativeai/protos/TaskType.md
+++ /dev/null
@@ -1,802 +0,0 @@
-
-# google.generativeai.protos.TaskType
-
-
-
-
-
-
-
-Type of task for which the embedding will be used.
-
-
-google.generativeai.protos.TaskType(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`TASK_TYPE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Unset value, which will default to one of the
-other enum values.
-
- |
-
-
-
-`RETRIEVAL_QUERY`
-
- |
-
-
-`1`
-
-Specifies the given text is a query in a
-search/retrieval setting.
-
- |
-
-
-
-`RETRIEVAL_DOCUMENT`
-
- |
-
-
-`2`
-
-Specifies the given text is a document from
-the corpus being searched.
-
- |
-
-
-
-`SEMANTIC_SIMILARITY`
-
- |
-
-
-`3`
-
-Specifies the given text will be used for
-STS.
-
- |
-
-
-
-`CLASSIFICATION`
-
- |
-
-
-`4`
-
-Specifies that the given text will be
-classified.
-
- |
-
-
-
-`CLUSTERING`
-
- |
-
-
-`5`
-
-Specifies that the embeddings will be used
-for clustering.
-
- |
-
-
-
-`QUESTION_ANSWERING`
-
- |
-
-
-`6`
-
-Specifies that the given text will be used
-for question answering.
-
- |
-
-
-
-`FACT_VERIFICATION`
-
- |
-
-
-`7`
-
-Specifies that the given text will be used
-for fact verification.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-CLASSIFICATION
-
- |
-
-
-``
-
- |
-
-
-
-CLUSTERING
-
- |
-
-
-``
-
- |
-
-
-
-FACT_VERIFICATION
-
- |
-
-
-``
-
- |
-
-
-
-QUESTION_ANSWERING
-
- |
-
-
-``
-
- |
-
-
-
-RETRIEVAL_DOCUMENT
-
- |
-
-
-``
-
- |
-
-
-
-RETRIEVAL_QUERY
-
- |
-
-
-``
-
- |
-
-
-
-SEMANTIC_SIMILARITY
-
- |
-
-
-``
-
- |
-
-
-
-TASK_TYPE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/TextCompletion.md b/docs/api/google/generativeai/protos/TextCompletion.md
deleted file mode 100644
index 09251aef6..000000000
--- a/docs/api/google/generativeai/protos/TextCompletion.md
+++ /dev/null
@@ -1,80 +0,0 @@
-
-# google.generativeai.protos.TextCompletion
-
-
-
-
-
-
-
-Output text returned from a model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`output`
-
- |
-
-
-`str`
-
-Output only. The generated text returned from
-the model.
-
- |
-
-
-
-`safety_ratings`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.SafetyRating]`
-
-Ratings for the safety of a response.
-
-There is at most one rating per category.
-
- |
-
-
-
-`citation_metadata`
-
- |
-
-
-`google.ai.generativelanguage.CitationMetadata`
-
-Output only. Citation information for model-generated
-``output`` in this ``TextCompletion``.
-
-This field may be populated with attribution information for
-any text included in the ``output``.
-
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TextPrompt.md b/docs/api/google/generativeai/protos/TextPrompt.md
deleted file mode 100644
index d35842d38..000000000
--- a/docs/api/google/generativeai/protos/TextPrompt.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.TextPrompt
-
-
-
-
-
-
-
-Text given to the model as a prompt.
-
-
-
-The Model will use this TextPrompt to Generate a text
-completion.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`text`
-
- |
-
-
-`str`
-
-Required. The prompt text.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Tool.md b/docs/api/google/generativeai/protos/Tool.md
deleted file mode 100644
index f76d4905f..000000000
--- a/docs/api/google/generativeai/protos/Tool.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-# google.generativeai.protos.Tool
-
-
-
-
-
-
-
-Tool details that the model may use to generate response.
-
-
-
-A ``Tool`` is a piece of code that enables the system to interact
-with external systems to perform an action, or set of actions,
-outside of knowledge and scope of the model.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`function_declarations`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.FunctionDeclaration]`
-
-Optional. A list of ``FunctionDeclarations`` available to
-the model that can be used for function calling.
-
-The model or system does not execute the function. Instead
-the defined function may be returned as a
-[FunctionCall][google.ai.generativelanguage.v1beta.Part.function_call]
-with arguments to the client side for execution. The model
-may decide to call a subset of these functions by populating
-[FunctionCall][google.ai.generativelanguage.v1beta.Part.function_call]
-in the response. The next conversation turn may contain a
-[FunctionResponse][google.ai.generativelanguage.v1beta.Part.function_response]
-with the
-[Content.role][google.ai.generativelanguage.v1beta.Content.role]
-"function" generation context for the next model turn.
-
- |
-
-
-
-`google_search_retrieval`
-
- |
-
-
-`google.ai.generativelanguage.GoogleSearchRetrieval`
-
-Optional. Retrieval tool that is powered by
-Google search.
-
- |
-
-
-
-`code_execution`
-
- |
-
-
-`google.ai.generativelanguage.CodeExecution`
-
-Optional. Enables the model to execute code
-as part of generation.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/ToolConfig.md b/docs/api/google/generativeai/protos/ToolConfig.md
deleted file mode 100644
index a9c3d2b5b..000000000
--- a/docs/api/google/generativeai/protos/ToolConfig.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.ToolConfig
-
-
-
-
-
-
-
-The Tool configuration containing parameters for specifying ``Tool`` use in the request.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`function_calling_config`
-
- |
-
-
-`google.ai.generativelanguage.FunctionCallingConfig`
-
-Optional. Function calling config.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TransferOwnershipRequest.md b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md
deleted file mode 100644
index 8ebaf7818..000000000
--- a/docs/api/google/generativeai/protos/TransferOwnershipRequest.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# google.generativeai.protos.TransferOwnershipRequest
-
-
-
-
-
-
-
-Request to transfer the ownership of the tuned model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Required. The resource name of the tuned model to transfer
-ownership.
-
-Format: ``tunedModels/my-model-id``
-
- |
-
-
-
-`email_address`
-
- |
-
-
-`str`
-
-Required. The email address of the user to
-whom the tuned model is being transferred to.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TransferOwnershipResponse.md b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md
deleted file mode 100644
index 225a8bef7..000000000
--- a/docs/api/google/generativeai/protos/TransferOwnershipResponse.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.protos.TransferOwnershipResponse
-
-
-
-
-
-
-
-Response from ``TransferOwnership``.
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TunedModel.md b/docs/api/google/generativeai/protos/TunedModel.md
deleted file mode 100644
index 36ca08291..000000000
--- a/docs/api/google/generativeai/protos/TunedModel.md
+++ /dev/null
@@ -1,255 +0,0 @@
-
-# google.generativeai.protos.TunedModel
-
-
-
-
-
-
-
-A fine-tuned model created using ModelService.CreateTunedModel.
-
-
-
-This message has `oneof`_ fields (mutually exclusive fields).
-For each oneof, at most one member field can be set at the same time.
-Setting any member of the oneof automatically clears all other
-members.
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`tuned_model_source`
-
- |
-
-
-`google.ai.generativelanguage.TunedModelSource`
-
-Optional. TunedModel to use as the starting
-point for training the new model.
-
-This field is a member of `oneof`_ ``source_model``.
-
- |
-
-
-
-`base_model`
-
- |
-
-
-`str`
-
-Immutable. The name of the ``Model`` to tune. Example:
-``models/gemini-1.5-flash-001``
-
-This field is a member of `oneof`_ ``source_model``.
-
- |
-
-
-
-`name`
-
- |
-
-
-`str`
-
-Output only. The tuned model name. A unique name will be
-generated on create. Example: ``tunedModels/az2mb0bpw6i`` If
-display_name is set on create, the id portion of the name
-will be set by concatenating the words of the display_name
-with hyphens and adding a random portion for uniqueness.
-
-Example:
-
-- display_name = ``Sentence Translator``
-- name = ``tunedModels/sentence-translator-u3b7m``
-
- |
-
-
-
-`display_name`
-
- |
-
-
-`str`
-
-Optional. The name to display for this model
-in user interfaces. The display name must be up
-to 40 characters including spaces.
-
- |
-
-
-
-`description`
-
- |
-
-
-`str`
-
-Optional. A short description of this model.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-`float`
-
-Optional. Controls the randomness of the output.
-
-Values can range over ``[0.0,1.0]``, inclusive. A value
-closer to ``1.0`` will produce responses that are more
-varied, while a value closer to ``0.0`` will typically
-result in less surprising responses from the model.
-
-This value specifies default to be the one used by the base
-model while creating the model.
-
-
- |
-
-
-
-`top_p`
-
- |
-
-
-`float`
-
-Optional. For Nucleus sampling.
-
-Nucleus sampling considers the smallest set of tokens whose
-probability sum is at least ``top_p``.
-
-This value specifies default to be the one used by the base
-model while creating the model.
-
-
- |
-
-
-
-`top_k`
-
- |
-
-
-`int`
-
-Optional. For Top-k sampling.
-
-Top-k sampling considers the set of ``top_k`` most probable
-tokens. This value specifies default to be used by the
-backend while making the call to the model.
-
-This value specifies default to be the one used by the base
-model while creating the model.
-
-
- |
-
-
-
-`state`
-
- |
-
-
-`google.ai.generativelanguage.TunedModel.State`
-
-Output only. The state of the tuned model.
-
- |
-
-
-
-`create_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp when this model
-was created.
-
- |
-
-
-
-`update_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp when this model
-was updated.
-
- |
-
-
-
-`tuning_task`
-
- |
-
-
-`google.ai.generativelanguage.TuningTask`
-
-Required. The tuning task that creates the
-tuned model.
-
- |
-
-
-
-`reader_project_numbers`
-
- |
-
-
-`MutableSequence[int]`
-
-Optional. List of project numbers that have
-read access to the tuned model.
-
- |
-
-
-
-
-
-## Child Classes
-[`class State`](../../../google/generativeai/types/TunedModelState.md)
-
diff --git a/docs/api/google/generativeai/protos/TunedModelSource.md b/docs/api/google/generativeai/protos/TunedModelSource.md
deleted file mode 100644
index 7a5a67e79..000000000
--- a/docs/api/google/generativeai/protos/TunedModelSource.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# google.generativeai.protos.TunedModelSource
-
-
-
-
-
-
-
-Tuned model as a source for training a new model.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`tuned_model`
-
- |
-
-
-`str`
-
-Immutable. The name of the ``TunedModel`` to use as the
-starting point for training the new model. Example:
-``tunedModels/my-tuned-model``
-
- |
-
-
-
-`base_model`
-
- |
-
-
-`str`
-
-Output only. The name of the base ``Model`` this
-``TunedModel`` was tuned from. Example:
-``models/gemini-1.5-flash-001``
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TuningExample.md b/docs/api/google/generativeai/protos/TuningExample.md
deleted file mode 100644
index ab5ea66cc..000000000
--- a/docs/api/google/generativeai/protos/TuningExample.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.protos.TuningExample
-
-
-
-
-
-
-
-A single example for tuning.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`text_input`
-
- |
-
-
-`str`
-
-Optional. Text model input.
-
-This field is a member of `oneof`_ ``model_input``.
-
- |
-
-
-
-`output`
-
- |
-
-
-`str`
-
-Required. The expected model output.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TuningExamples.md b/docs/api/google/generativeai/protos/TuningExamples.md
deleted file mode 100644
index 7d0f33ed2..000000000
--- a/docs/api/google/generativeai/protos/TuningExamples.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# google.generativeai.protos.TuningExamples
-
-
-
-
-
-
-
-A set of tuning examples. Can be training or validation data.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`examples`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.TuningExample]`
-
-Required. The examples. Example input can be
-for text or discuss, but all examples in a set
-must be of the same type.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TuningSnapshot.md b/docs/api/google/generativeai/protos/TuningSnapshot.md
deleted file mode 100644
index a74aea0fd..000000000
--- a/docs/api/google/generativeai/protos/TuningSnapshot.md
+++ /dev/null
@@ -1,87 +0,0 @@
-
-# google.generativeai.protos.TuningSnapshot
-
-
-
-
-
-
-
-Record for a single tuning step.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`step`
-
- |
-
-
-`int`
-
-Output only. The tuning step.
-
- |
-
-
-
-`epoch`
-
- |
-
-
-`int`
-
-Output only. The epoch this step was part of.
-
- |
-
-
-
-`mean_loss`
-
- |
-
-
-`float`
-
-Output only. The mean loss of the training
-examples for this step.
-
- |
-
-
-
-`compute_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp when this metric
-was computed.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/TuningTask.md b/docs/api/google/generativeai/protos/TuningTask.md
deleted file mode 100644
index aab2ccddd..000000000
--- a/docs/api/google/generativeai/protos/TuningTask.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-# google.generativeai.protos.TuningTask
-
-
-
-
-
-
-
-Tuning tasks that create tuned models.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`start_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp when tuning this
-model started.
-
- |
-
-
-
-`complete_time`
-
- |
-
-
-`google.protobuf.timestamp_pb2.Timestamp`
-
-Output only. The timestamp when tuning this
-model completed.
-
- |
-
-
-
-`snapshots`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.TuningSnapshot]`
-
-Output only. Metrics collected during tuning.
-
- |
-
-
-
-`training_data`
-
- |
-
-
-`google.ai.generativelanguage.Dataset`
-
-Required. Input only. Immutable. The model
-training data.
-
- |
-
-
-
-`hyperparameters`
-
- |
-
-
-`google.ai.generativelanguage.Hyperparameters`
-
-Immutable. Hyperparameters controlling the
-tuning process. If not provided, default values
-will be used.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/Type.md b/docs/api/google/generativeai/protos/Type.md
deleted file mode 100644
index 6f2263dc5..000000000
--- a/docs/api/google/generativeai/protos/Type.md
+++ /dev/null
@@ -1,770 +0,0 @@
-
-# google.generativeai.protos.Type
-
-
-
-
-
-
-
-Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
-
-
-google.generativeai.protos.Type(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`TYPE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Not specified, should not be used.
-
- |
-
-
-
-`STRING`
-
- |
-
-
-`1`
-
-String type.
-
- |
-
-
-
-`NUMBER`
-
- |
-
-
-`2`
-
-Number type.
-
- |
-
-
-
-`INTEGER`
-
- |
-
-
-`3`
-
-Integer type.
-
- |
-
-
-
-`BOOLEAN`
-
- |
-
-
-`4`
-
-Boolean type.
-
- |
-
-
-
-`ARRAY`
-
- |
-
-
-`5`
-
-Array type.
-
- |
-
-
-
-`OBJECT`
-
- |
-
-
-`6`
-
-Object type.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-ARRAY
-
- |
-
-
-``
-
- |
-
-
-
-BOOLEAN
-
- |
-
-
-``
-
- |
-
-
-
-INTEGER
-
- |
-
-
-``
-
- |
-
-
-
-NUMBER
-
- |
-
-
-``
-
- |
-
-
-
-OBJECT
-
- |
-
-
-``
-
- |
-
-
-
-STRING
-
- |
-
-
-``
-
- |
-
-
-
-TYPE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
deleted file mode 100644
index f4cd4e31c..000000000
--- a/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-# google.generativeai.protos.UpdateCachedContentRequest
-
-
-
-
-
-
-
-Request to update CachedContent.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`cached_content`
-
- |
-
-
-`google.ai.generativelanguage.CachedContent`
-
-Required. The content cache entry to update
-
- |
-
-
-
-`update_mask`
-
- |
-
-
-`google.protobuf.field_mask_pb2.FieldMask`
-
-The list of fields to update.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/UpdateChunkRequest.md b/docs/api/google/generativeai/protos/UpdateChunkRequest.md
deleted file mode 100644
index 9dc6d4cbc..000000000
--- a/docs/api/google/generativeai/protos/UpdateChunkRequest.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.UpdateChunkRequest
-
-
-
-
-
-
-
-Request to update a ``Chunk``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`chunk`
-
- |
-
-
-`google.ai.generativelanguage.Chunk`
-
-Required. The ``Chunk`` to update.
-
- |
-
-
-
-`update_mask`
-
- |
-
-
-`google.protobuf.field_mask_pb2.FieldMask`
-
-Required. The list of fields to update. Currently, this only
-supports updating ``custom_metadata`` and ``data``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/UpdateCorpusRequest.md b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md
deleted file mode 100644
index a08ba19e6..000000000
--- a/docs/api/google/generativeai/protos/UpdateCorpusRequest.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.UpdateCorpusRequest
-
-
-
-
-
-
-
-Request to update a ``Corpus``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`corpus`
-
- |
-
-
-`google.ai.generativelanguage.Corpus`
-
-Required. The ``Corpus`` to update.
-
- |
-
-
-
-`update_mask`
-
- |
-
-
-`google.protobuf.field_mask_pb2.FieldMask`
-
-Required. The list of fields to update. Currently, this only
-supports updating ``display_name``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/UpdateDocumentRequest.md b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md
deleted file mode 100644
index 9f52e983b..000000000
--- a/docs/api/google/generativeai/protos/UpdateDocumentRequest.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# google.generativeai.protos.UpdateDocumentRequest
-
-
-
-
-
-
-
-Request to update a ``Document``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`document`
-
- |
-
-
-`google.ai.generativelanguage.Document`
-
-Required. The ``Document`` to update.
-
- |
-
-
-
-`update_mask`
-
- |
-
-
-`google.protobuf.field_mask_pb2.FieldMask`
-
-Required. The list of fields to update. Currently, this only
-supports updating ``display_name`` and ``custom_metadata``.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/UpdatePermissionRequest.md b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md
deleted file mode 100644
index e6e64b15f..000000000
--- a/docs/api/google/generativeai/protos/UpdatePermissionRequest.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.protos.UpdatePermissionRequest
-
-
-
-
-
-
-
-Request to update the ``Permission``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`permission`
-
- |
-
-
-`google.ai.generativelanguage.Permission`
-
-Required. The permission to update.
-
-The permission's ``name`` field is used to identify the
-permission to update.
-
- |
-
-
-
-`update_mask`
-
- |
-
-
-`google.protobuf.field_mask_pb2.FieldMask`
-
-Required. The list of fields to update. Accepted ones:
-
-- role (Permission.role field)
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
deleted file mode 100644
index f30c5b7bb..000000000
--- a/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-# google.generativeai.protos.UpdateTunedModelRequest
-
-
-
-
-
-
-
-Request to update a TunedModel.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`tuned_model`
-
- |
-
-
-`google.ai.generativelanguage.TunedModel`
-
-Required. The tuned model to update.
-
- |
-
-
-
-`update_mask`
-
- |
-
-
-`google.protobuf.field_mask_pb2.FieldMask`
-
-Required. The list of fields to update.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/protos/VideoMetadata.md b/docs/api/google/generativeai/protos/VideoMetadata.md
deleted file mode 100644
index 20dfda9cf..000000000
--- a/docs/api/google/generativeai/protos/VideoMetadata.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.protos.VideoMetadata
-
-
-
-
-
-
-
-Metadata for a video ``File``.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`video_duration`
-
- |
-
-
-`google.protobuf.duration_pb2.Duration`
-
-Duration of the video.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types.md b/docs/api/google/generativeai/types.md
deleted file mode 100644
index a9e502516..000000000
--- a/docs/api/google/generativeai/types.md
+++ /dev/null
@@ -1,155 +0,0 @@
-
-# Module: google.generativeai.types
-
-
-
-
-
-
-
-A collection of type definitions used throughout the library.
-
-
-
-## Classes
-
-[`class AsyncGenerateContentResponse`](../../google/generativeai/types/AsyncGenerateContentResponse.md): This is the async version of `genai.GenerateContentResponse`.
-
-[`class BlobDict`](../../google/generativeai/types/BlobDict.md)
-
-[`class BlockedPromptException`](../../google/generativeai/types/BlockedPromptException.md): Common base class for all non-exit exceptions.
-
-[`class BlockedReason`](../../google/generativeai/types/BlockedReason.md): A list of reasons why content may have been blocked.
-
-[`class BrokenResponseError`](../../google/generativeai/types/BrokenResponseError.md): Common base class for all non-exit exceptions.
-
-[`class CallableFunctionDeclaration`](../../google/generativeai/types/CallableFunctionDeclaration.md): An extension of `FunctionDeclaration` that can be built from a python function, and is callable.
-
-[`class CitationMetadataDict`](../../google/generativeai/types/CitationMetadataDict.md): A collection of source attributions for a piece of content.
-
-[`class CitationSourceDict`](../../google/generativeai/types/CitationSourceDict.md): A citation to a source for a portion of a specific response.
-
-[`class ContentDict`](../../google/generativeai/types/ContentDict.md)
-
-[`class ContentFilterDict`](../../google/generativeai/types/ContentFilterDict.md): Content filtering metadata associated with processing a single request.
-
-[`class File`](../../google/generativeai/types/File.md)
-
-[`class FileDataDict`](../../google/generativeai/types/FileDataDict.md)
-
-[`class FunctionDeclaration`](../../google/generativeai/types/FunctionDeclaration.md)
-
-[`class FunctionLibrary`](../../google/generativeai/types/FunctionLibrary.md): A container for a set of `Tool` objects, manages lookup and execution of their functions.
-
-[`class GenerateContentResponse`](../../google/generativeai/types/GenerateContentResponse.md): Instances of this class manage the response of the `generate_content` method.
-
-[`class GenerationConfig`](../../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
-
-[`class GenerationConfigDict`](../../google/generativeai/types/GenerationConfigDict.md)
-
-[`class HarmBlockThreshold`](../../google/generativeai/types/HarmBlockThreshold.md): Block at and beyond a specified harm probability.
-
-[`class HarmCategory`](../../google/generativeai/types/HarmCategory.md): Harm Categories supported by the gemini-family model
-
-[`class HarmProbability`](../../google/generativeai/types/HarmProbability.md): The probability that a piece of content is harmful.
-
-[`class IncompleteIterationError`](../../google/generativeai/types/IncompleteIterationError.md): Common base class for all non-exit exceptions.
-
-[`class Model`](../../google/generativeai/types/Model.md): A dataclass representation of a protos.Model
.
-
-[`class PartDict`](../../google/generativeai/types/PartDict.md)
-
-[`class Permission`](../../google/generativeai/types/Permission.md): A permission to access a resource.
-
-[`class Permissions`](../../google/generativeai/types/Permissions.md)
-
-[`class RequestOptions`](../../google/generativeai/types/RequestOptions.md): Request options
-
-[`class SafetyFeedbackDict`](../../google/generativeai/types/SafetyFeedbackDict.md): Safety feedback for an entire request.
-
-[`class SafetyRatingDict`](../../google/generativeai/types/SafetyRatingDict.md): Safety rating for a piece of content.
-
-[`class SafetySettingDict`](../../google/generativeai/types/SafetySettingDict.md): Safety setting, affecting the safety-blocking behavior.
-
-[`class Status`](../../google/generativeai/types/Status.md): A ProtocolMessage
-
-[`class StopCandidateException`](../../google/generativeai/types/StopCandidateException.md): Common base class for all non-exit exceptions.
-
-[`class Tool`](../../google/generativeai/types/Tool.md): A wrapper for protos.Tool
, Contains a collection of related `FunctionDeclaration` objects, protos.CodeExecution object, and protos.GoogleSearchRetrieval object.
-
-[`class ToolDict`](../../google/generativeai/types/ToolDict.md)
-
-[`class TunedModel`](../../google/generativeai/types/TunedModel.md): A dataclass representation of a protos.TunedModel
.
-
-[`class TunedModelState`](../../google/generativeai/types/TunedModelState.md): The state of the tuned model.
-
-## Functions
-
-[`TypedDict(...)`](../../google/generativeai/types/TypedDict.md): A simple typed namespace. At runtime it is equivalent to a plain dict.
-
-[`get_default_file_client(...)`](../../google/generativeai/types/get_default_file_client.md)
-
-[`to_file_data(...)`](../../google/generativeai/types/to_file_data.md)
-
-## Type Aliases
-
-[`AnyModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md)
-
-[`BaseModelNameOptions`](../../google/generativeai/types/BaseModelNameOptions.md)
-
-[`BlobType`](../../google/generativeai/types/BlobType.md)
-
-[`ContentType`](../../google/generativeai/types/ContentType.md)
-
-[`ContentsType`](../../google/generativeai/types/ContentsType.md)
-
-[`FileDataType`](../../google/generativeai/types/FileDataType.md)
-
-[`FunctionDeclarationType`](../../google/generativeai/types/FunctionDeclarationType.md)
-
-[`FunctionLibraryType`](../../google/generativeai/types/FunctionLibraryType.md)
-
-[`GenerationConfigType`](../../google/generativeai/types/GenerationConfigType.md)
-
-[`ModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md)
-
-[`ModelsIterable`](../../google/generativeai/types/ModelsIterable.md)
-
-[`PartType`](../../google/generativeai/types/PartType.md)
-
-[`RequestOptionsType`](../../google/generativeai/types/RequestOptionsType.md)
-
-[`StrictContentType`](../../google/generativeai/types/StrictContentType.md)
-
-[`ToolsType`](../../google/generativeai/types/ToolsType.md)
-
-[`TunedModelNameOptions`](../../google/generativeai/types/TunedModelNameOptions.md)
-
-
-
-
-
-
-Other Members |
-
-
-
-
-annotations
-
- |
-
-
-Instance of `__future__._Feature`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/AnyModelNameOptions.md b/docs/api/google/generativeai/types/AnyModelNameOptions.md
deleted file mode 100644
index e77c9f771..000000000
--- a/docs/api/google/generativeai/types/AnyModelNameOptions.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# google.generativeai.types.AnyModelNameOptions
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-AnyModelNameOptions = Union[
- str,
- google.generativeai.types.Model
,
- google.generativeai.protos.Model
,
- google.generativeai.types.TunedModel
,
- google.generativeai.protos.TunedModel
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
deleted file mode 100644
index e67806a80..000000000
--- a/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
+++ /dev/null
@@ -1,161 +0,0 @@
-
-# google.generativeai.types.AsyncGenerateContentResponse
-
-
-
-
-
-
-
-This is the async version of `genai.GenerateContentResponse`.
-
-
-google.generativeai.types.AsyncGenerateContentResponse(
- done: bool,
- iterator: (None | Iterable[protos.GenerateContentResponse] | AsyncIterable[protos.
- GenerateContentResponse]),
- result: protos.GenerateContentResponse,
- chunks: (Iterable[protos.GenerateContentResponse] | None) = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidates`
-
- |
-
-
-The list of candidate responses.
-
- |
-
-
-
-`parts`
-
- |
-
-
-A quick accessor equivalent to `self.candidates[0].content.parts`
-
- |
-
-
-
-`prompt_feedback`
-
- |
-
-
-
-
- |
-
-
-
-`text`
-
- |
-
-
-A quick accessor equivalent to `self.candidates[0].content.parts[0].text`
-
- |
-
-
-
-`usage_metadata`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-from_aiterator
-
-View source
-
-
-from_aiterator(
- iterator
-)
-
-
-
-
-
-from_response
-
-View source
-
-
-@classmethod
-from_response(
- response: protos.GenerateContentResponse
-)
-
-
-
-
-
-resolve
-
-View source
-
-
-resolve()
-
-
-
-
-
-to_dict
-
-View source
-
-
-to_dict()
-
-
-Returns the result as a JSON-compatible dict.
-
-Note: This doesn't capture the iterator state when streaming, it only captures the accumulated
-`GenerateContentResponse` fields.
-
-```
->>> import json
->>> response = model.generate_content('Hello?')
->>> json.dumps(response.to_dict())
-```
-
-
-
diff --git a/docs/api/google/generativeai/types/BaseModelNameOptions.md b/docs/api/google/generativeai/types/BaseModelNameOptions.md
deleted file mode 100644
index 0df2d0a32..000000000
--- a/docs/api/google/generativeai/types/BaseModelNameOptions.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.BaseModelNameOptions
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-BaseModelNameOptions = Union[
- str,
- google.generativeai.types.Model
,
- google.generativeai.protos.Model
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/BlobDict.md b/docs/api/google/generativeai/types/BlobDict.md
deleted file mode 100644
index a2bc987e7..000000000
--- a/docs/api/google/generativeai/types/BlobDict.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.BlobDict
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/BlobType.md b/docs/api/google/generativeai/types/BlobType.md
deleted file mode 100644
index 213c89d53..000000000
--- a/docs/api/google/generativeai/types/BlobType.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# google.generativeai.types.BlobType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-BlobType = Union[
- google.generativeai.protos.Blob
,
- google.generativeai.types.BlobDict
,
- PIL.Image.Image,
- IPython.core.display.Image
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/BlockedPromptException.md b/docs/api/google/generativeai/types/BlockedPromptException.md
deleted file mode 100644
index 18c1628d3..000000000
--- a/docs/api/google/generativeai/types/BlockedPromptException.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.BlockedPromptException
-
-
-
-
-
-
-
-Common base class for all non-exit exceptions.
-
-
-
-
diff --git a/docs/api/google/generativeai/types/BlockedReason.md b/docs/api/google/generativeai/types/BlockedReason.md
deleted file mode 100644
index 8c4f2d822..000000000
--- a/docs/api/google/generativeai/types/BlockedReason.md
+++ /dev/null
@@ -1,683 +0,0 @@
-
-# google.generativeai.types.BlockedReason
-
-
-
-
-
-
-
-A list of reasons why content may have been blocked.
-
-
- View aliases
-
-Main aliases
-
`google.generativeai.protos.ContentFilter.BlockedReason`
-
-
-
-
-google.generativeai.types.BlockedReason(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`BLOCKED_REASON_UNSPECIFIED`
-
- |
-
-
-`0`
-
-A blocked reason was not specified.
-
- |
-
-
-
-`SAFETY`
-
- |
-
-
-`1`
-
-Content was blocked by safety settings.
-
- |
-
-
-
-`OTHER`
-
- |
-
-
-`2`
-
-Content was blocked, but the reason is
-uncategorized.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-BLOCKED_REASON_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-OTHER
-
- |
-
-
-``
-
- |
-
-
-
-SAFETY
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/BrokenResponseError.md b/docs/api/google/generativeai/types/BrokenResponseError.md
deleted file mode 100644
index 2a126a482..000000000
--- a/docs/api/google/generativeai/types/BrokenResponseError.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.BrokenResponseError
-
-
-
-
-
-
-
-Common base class for all non-exit exceptions.
-
-
-
-
diff --git a/docs/api/google/generativeai/types/CallableFunctionDeclaration.md b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md
deleted file mode 100644
index 346aad9d5..000000000
--- a/docs/api/google/generativeai/types/CallableFunctionDeclaration.md
+++ /dev/null
@@ -1,145 +0,0 @@
-
-# google.generativeai.types.CallableFunctionDeclaration
-
-
-
-
-
-
-
-An extension of `FunctionDeclaration` that can be built from a python function, and is callable.
-
-Inherits From: [`FunctionDeclaration`](../../../google/generativeai/types/FunctionDeclaration.md)
-
-
-google.generativeai.types.CallableFunctionDeclaration(
- *,
- name: str,
- description: str,
- parameters: (dict[str, Any] | None) = None,
- function: Callable[..., Any]
-)
-
-
-
-
-
-
-Note: The python function must have type annotations.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`description`
-
- |
-
-
-
-
- |
-
-
-
-`name`
-
- |
-
-
-
-
- |
-
-
-
-`parameters`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-from_function
-
-View source
-
-
-@staticmethod
-from_function(
- function: Callable[..., Any], descriptions: (dict[str, str] | None) = None
-)
-
-
-Builds a `CallableFunctionDeclaration` from a python function.
-
-The function should have type annotations.
-
-This method is able to generate the schema for arguments annotated with types:
-
-`AllowedTypes = float | int | str | list[AllowedTypes] | dict`
-
-This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary
-contents. But you can build these manually.
-
-from_proto
-
-View source
-
-
-@classmethod
-from_proto(
- proto
-) -> FunctionDeclaration
-
-
-
-
-
-to_proto
-
-View source
-
-
-to_proto() -> protos.FunctionDeclaration
-
-
-
-
-
-__call__
-
-View source
-
-
-__call__(
- fc: protos.FunctionCall
-) -> protos.FunctionResponse
-
-
-Call self as a function.
-
-
-
-
diff --git a/docs/api/google/generativeai/types/CitationMetadataDict.md b/docs/api/google/generativeai/types/CitationMetadataDict.md
deleted file mode 100644
index ea848145d..000000000
--- a/docs/api/google/generativeai/types/CitationMetadataDict.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# google.generativeai.types.CitationMetadataDict
-
-
-
-
-
-
-
-A collection of source attributions for a piece of content.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`citation_sources`
-
- |
-
-
-`MutableSequence[google.ai.generativelanguage.CitationSource]`
-
-Citations to sources for a specific response.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/CitationSourceDict.md b/docs/api/google/generativeai/types/CitationSourceDict.md
deleted file mode 100644
index 71bb9d35d..000000000
--- a/docs/api/google/generativeai/types/CitationSourceDict.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-# google.generativeai.types.CitationSourceDict
-
-
-
-
-
-
-
-A citation to a source for a portion of a specific response.
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`start_index`
-
- |
-
-
-`int`
-
-Optional. Start of segment of the response
-that is attributed to this source.
-
-Index indicates the start of the segment,
-measured in bytes.
-
- |
-
-
-
-`end_index`
-
- |
-
-
-`int`
-
-Optional. End of the attributed segment,
-exclusive.
-
- |
-
-
-
-`uri`
-
- |
-
-
-`str`
-
-Optional. URI that is attributed as a source
-for a portion of the text.
-
- |
-
-
-
-`license_`
-
- |
-
-
-`str`
-
-Optional. License for the GitHub project that
-is attributed as a source for segment.
-
-License info is required for code citations.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/ContentDict.md b/docs/api/google/generativeai/types/ContentDict.md
deleted file mode 100644
index 19da67166..000000000
--- a/docs/api/google/generativeai/types/ContentDict.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.ContentDict
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/ContentFilterDict.md b/docs/api/google/generativeai/types/ContentFilterDict.md
deleted file mode 100644
index fa1da46bb..000000000
--- a/docs/api/google/generativeai/types/ContentFilterDict.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# google.generativeai.types.ContentFilterDict
-
-
-
-
-
-
-
-Content filtering metadata associated with processing a single request.
-
-
-ContentFilter contains a reason and an optional supporting
-string. The reason may be unspecified.
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`reason`
-
- |
-
-
-`google.ai.generativelanguage.ContentFilter.BlockedReason`
-
-The reason content was blocked during request
-processing.
-
- |
-
-
-
-`message`
-
- |
-
-
-`str`
-
-A string that describes the filtering
-behavior in more detail.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/ContentType.md b/docs/api/google/generativeai/types/ContentType.md
deleted file mode 100644
index a9967a899..000000000
--- a/docs/api/google/generativeai/types/ContentType.md
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# google.generativeai.types.ContentType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-ContentType = Union[
- google.generativeai.protos.Content
,
- google.generativeai.types.ContentDict
,
- Iterable[google.generativeai.types.PartType
],
- google.generativeai.protos.Part
,
- google.generativeai.types.PartDict
,
- google.generativeai.protos.Blob
,
- google.generativeai.types.BlobDict
,
- PIL.Image.Image,
- IPython.core.display.Image,
- str,
- google.generativeai.protos.FunctionCall
,
- google.generativeai.protos.FunctionResponse
,
- google.generativeai.types.FileDataDict
,
- google.generativeai.protos.FileData
,
- google.generativeai.protos.File
,
- google.generativeai.types.File
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/ContentsType.md b/docs/api/google/generativeai/types/ContentsType.md
deleted file mode 100644
index dcfea9aeb..000000000
--- a/docs/api/google/generativeai/types/ContentsType.md
+++ /dev/null
@@ -1,36 +0,0 @@
-
-# google.generativeai.types.ContentsType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-ContentsType = Union[
- google.generativeai.protos.Content
,
- google.generativeai.types.ContentDict
,
- Iterable[google.generativeai.types.PartType
],
- google.generativeai.protos.Part
,
- google.generativeai.types.PartDict
,
- google.generativeai.protos.Blob
,
- google.generativeai.types.BlobDict
,
- PIL.Image.Image,
- IPython.core.display.Image,
- str,
- google.generativeai.protos.FunctionCall
,
- google.generativeai.protos.FunctionResponse
,
- google.generativeai.types.FileDataDict
,
- google.generativeai.protos.FileData
,
- google.generativeai.protos.File
,
- google.generativeai.types.File
,
- Iterable[google.generativeai.types.StrictContentType
],
- NoneType
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/File.md b/docs/api/google/generativeai/types/File.md
deleted file mode 100644
index 627659d71..000000000
--- a/docs/api/google/generativeai/types/File.md
+++ /dev/null
@@ -1,210 +0,0 @@
-
-# google.generativeai.types.File
-
-
-
-
-
-
-
-
-
-
-google.generativeai.types.File(
- proto: (protos.File | File | dict)
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`create_time`
-
- |
-
-
-
-
- |
-
-
-
-`display_name`
-
- |
-
-
-
-
- |
-
-
-
-`error`
-
- |
-
-
-
-
- |
-
-
-
-`expiration_time`
-
- |
-
-
-
-
- |
-
-
-
-`mime_type`
-
- |
-
-
-
-
- |
-
-
-
-`name`
-
- |
-
-
-
-
- |
-
-
-
-`sha256_hash`
-
- |
-
-
-
-
- |
-
-
-
-`size_bytes`
-
- |
-
-
-
-
- |
-
-
-
-`state`
-
- |
-
-
-
-
- |
-
-
-
-`update_time`
-
- |
-
-
-
-
- |
-
-
-
-`uri`
-
- |
-
-
-
-
- |
-
-
-
-`video_metadata`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-delete
-
-View source
-
-
-delete()
-
-
-
-
-
-to_dict
-
-View source
-
-
-to_dict() -> dict[str, Any]
-
-
-
-
-
-to_proto
-
-View source
-
-
-to_proto() -> protos.File
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/FileDataDict.md b/docs/api/google/generativeai/types/FileDataDict.md
deleted file mode 100644
index 39ba63522..000000000
--- a/docs/api/google/generativeai/types/FileDataDict.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.FileDataDict
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/FileDataType.md b/docs/api/google/generativeai/types/FileDataType.md
deleted file mode 100644
index 12f0f5d29..000000000
--- a/docs/api/google/generativeai/types/FileDataType.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# google.generativeai.types.FileDataType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-FileDataType = Union[
- google.generativeai.types.FileDataDict
,
- google.generativeai.protos.FileData
,
- google.generativeai.protos.File
,
- google.generativeai.types.File
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/FunctionDeclaration.md b/docs/api/google/generativeai/types/FunctionDeclaration.md
deleted file mode 100644
index 768502bdc..000000000
--- a/docs/api/google/generativeai/types/FunctionDeclaration.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-# google.generativeai.types.FunctionDeclaration
-
-
-
-
-
-
-
-
-
-
-google.generativeai.types.FunctionDeclaration(
- *, name: str, description: str, parameters: (dict[str, Any] | None) = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`description`
-
- |
-
-
-
-
- |
-
-
-
-`name`
-
- |
-
-
-
-
- |
-
-
-
-`parameters`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-from_function
-
-View source
-
-
-@staticmethod
-from_function(
- function: Callable[..., Any], descriptions: (dict[str, str] | None) = None
-)
-
-
-Builds a `CallableFunctionDeclaration` from a python function.
-
-The function should have type annotations.
-
-This method is able to generate the schema for arguments annotated with types:
-
-`AllowedTypes = float | int | str | list[AllowedTypes] | dict`
-
-This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary
-contents. But you can build these manually.
-
-from_proto
-
-View source
-
-
-@classmethod
-from_proto(
- proto
-) -> FunctionDeclaration
-
-
-
-
-
-to_proto
-
-View source
-
-
-to_proto() -> protos.FunctionDeclaration
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/FunctionDeclarationType.md b/docs/api/google/generativeai/types/FunctionDeclarationType.md
deleted file mode 100644
index 4bce40d90..000000000
--- a/docs/api/google/generativeai/types/FunctionDeclarationType.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# google.generativeai.types.FunctionDeclarationType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-FunctionDeclarationType = Union[
- google.generativeai.types.FunctionDeclaration
,
- google.generativeai.protos.FunctionDeclaration
,
- dict[str, Any],
- Callable[..., Any]
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/FunctionLibrary.md b/docs/api/google/generativeai/types/FunctionLibrary.md
deleted file mode 100644
index ab6e978a6..000000000
--- a/docs/api/google/generativeai/types/FunctionLibrary.md
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# google.generativeai.types.FunctionLibrary
-
-
-
-
-
-
-
-A container for a set of `Tool` objects, manages lookup and execution of their functions.
-
-
-google.generativeai.types.FunctionLibrary(
- tools: Iterable[ToolType]
-)
-
-
-
-
-
-
-
-## Methods
-
-to_proto
-
-View source
-
-
-to_proto()
-
-
-
-
-
-__call__
-
-View source
-
-
-__call__(
- fc: protos.FunctionCall
-) -> (protos.Part | None)
-
-
-Call self as a function.
-
-
-__getitem__
-
-View source
-
-
-__getitem__(
- name: (str | protos.FunctionCall)
-) -> (FunctionDeclaration | protos.FunctionDeclaration)
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/FunctionLibraryType.md b/docs/api/google/generativeai/types/FunctionLibraryType.md
deleted file mode 100644
index a8b931f95..000000000
--- a/docs/api/google/generativeai/types/FunctionLibraryType.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# google.generativeai.types.FunctionLibraryType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-FunctionLibraryType = Union[
- google.generativeai.types.FunctionLibrary
,
- Iterable[Union[str, google.generativeai.types.Tool
, google.generativeai.protos.Tool
, google.generativeai.types.ToolDict
, Iterable[google.generativeai.types.FunctionDeclarationType
], google.generativeai.types.FunctionDeclaration
, google.generativeai.protos.FunctionDeclaration
, dict[str, Any], Callable[..., Any]]],
- str,
- google.generativeai.types.Tool
,
- google.generativeai.protos.Tool
,
- google.generativeai.types.ToolDict
,
- Iterable[google.generativeai.types.FunctionDeclarationType
],
- google.generativeai.types.FunctionDeclaration
,
- google.generativeai.protos.FunctionDeclaration
,
- dict[str, Any],
- Callable[..., Any]
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/GenerateContentResponse.md b/docs/api/google/generativeai/types/GenerateContentResponse.md
deleted file mode 100644
index eee32531b..000000000
--- a/docs/api/google/generativeai/types/GenerateContentResponse.md
+++ /dev/null
@@ -1,193 +0,0 @@
-
-# google.generativeai.types.GenerateContentResponse
-
-
-
-
-
-
-
-Instances of this class manage the response of the `generate_content` method.
-
-
-google.generativeai.types.GenerateContentResponse(
- done: bool,
- iterator: (None | Iterable[protos.GenerateContentResponse] | AsyncIterable[protos.
- GenerateContentResponse]),
- result: protos.GenerateContentResponse,
- chunks: (Iterable[protos.GenerateContentResponse] | None) = None
-)
-
-
-
-
-
-
-These are returned by GenerativeModel.generate_content
and ChatSession.send_message
.
-This object is based on the low level protos.GenerateContentResponse
class which just has `prompt_feedback`
-and `candidates` attributes. This class adds several quick accessors for common use cases.
-
-The same object type is returned for both `stream=True/False`.
-
-### Streaming
-
-When you pass `stream=True` to GenerativeModel.generate_content
or ChatSession.send_message
,
-iterate over this object to receive chunks of the response:
-
-```
-response = model.generate_content(..., stream=True):
-for chunk in response:
- print(chunk.text)
-```
-
-GenerateContentResponse.prompt_feedback
is available immediately but
-GenerateContentResponse.candidates
, and all the attributes derived from them (`.text`, `.parts`),
-are only available after the iteration is complete.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidates`
-
- |
-
-
-The list of candidate responses.
-
- |
-
-
-
-`parts`
-
- |
-
-
-A quick accessor equivalent to `self.candidates[0].content.parts`
-
- |
-
-
-
-`prompt_feedback`
-
- |
-
-
-
-
- |
-
-
-
-`text`
-
- |
-
-
-A quick accessor equivalent to `self.candidates[0].content.parts[0].text`
-
- |
-
-
-
-`usage_metadata`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-from_iterator
-
-View source
-
-
-@classmethod
-from_iterator(
- iterator: Iterable[protos.GenerateContentResponse]
-)
-
-
-
-
-
-from_response
-
-View source
-
-
-@classmethod
-from_response(
- response: protos.GenerateContentResponse
-)
-
-
-
-
-
-resolve
-
-View source
-
-
-resolve()
-
-
-
-
-
-to_dict
-
-View source
-
-
-to_dict()
-
-
-Returns the result as a JSON-compatible dict.
-
-Note: This doesn't capture the iterator state when streaming, it only captures the accumulated
-`GenerateContentResponse` fields.
-
-```
->>> import json
->>> response = model.generate_content('Hello?')
->>> json.dumps(response.to_dict())
-```
-
-__iter__
-
-View source
-
-
-__iter__()
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/GenerationConfig.md b/docs/api/google/generativeai/types/GenerationConfig.md
deleted file mode 100644
index 429a8a639..000000000
--- a/docs/api/google/generativeai/types/GenerationConfig.md
+++ /dev/null
@@ -1,419 +0,0 @@
-
-# google.generativeai.types.GenerationConfig
-
-
-
-
-
-
-
-A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
-
-
- View aliases
-
-Main aliases
-
`google.generativeai.GenerationConfig`
-
-
-
-
-google.generativeai.types.GenerationConfig(
- candidate_count: (int | None) = None,
- stop_sequences: (Iterable[str] | None) = None,
- max_output_tokens: (int | None) = None,
- temperature: (float | None) = None,
- top_p: (float | None) = None,
- top_k: (int | None) = None,
- seed: (int | None) = None,
- response_mime_type: (str | None) = None,
- response_schema: (protos.Schema | Mapping[str, Any] | type | None) = None,
- presence_penalty: (float | None) = None,
- frequency_penalty: (float | None) = None,
- response_logprobs: (bool | None) = None,
- logprobs: (int | None) = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`candidate_count`
-
- |
-
-
- Number of generated responses to return.
-
- |
-
-
-
-`stop_sequences`
-
- |
-
-
- The set of character sequences (up
-to 5) that will stop output generation. If
-specified, the API will stop at the first
-appearance of a stop sequence. The stop sequence
-will not be included as part of the response.
-
- |
-
-
-
-`max_output_tokens`
-
- |
-
-
- The maximum number of tokens to include in a
-candidate.
-
-If unset, this will default to output_token_limit specified
-in the model's specification.
-
- |
-
-
-
-`temperature`
-
- |
-
-
- Controls the randomness of the output. Note: The
-default value varies by model, see the Model.temperature
-attribute of the `Model` returned the `genai.get_model`
-function.
-
-Values can range from [0.0,1.0], inclusive. A value closer
-to 1.0 will produce responses that are more varied and
-creative, while a value closer to 0.0 will typically result
-in more straightforward responses from the model.
-
- |
-
-
-
-`top_p`
-
- |
-
-
- Optional. The maximum cumulative probability of tokens to
-consider when sampling.
-
-The model uses combined Top-k and nucleus sampling.
-
-Tokens are sorted based on their assigned probabilities so
-that only the most likely tokens are considered. Top-k
-sampling directly limits the maximum number of tokens to
-consider, while Nucleus sampling limits number of tokens
-based on the cumulative probability.
-
-Note: The default value varies by model, see the
-Model.top_p attribute of the `Model` returned the
-`genai.get_model` function.
-
- |
-
-
-
-`top_k`
-
- |
-
-
-`int`
-
-Optional. The maximum number of tokens to consider when
-sampling.
-
-The model uses combined Top-k and nucleus sampling.
-
-Top-k sampling considers the set of `top_k` most probable
-tokens. Defaults to 40.
-
-Note: The default value varies by model, see the
-Model.top_k attribute of the `Model` returned the
-`genai.get_model` function.
-
- |
-
-
-
-`seed`
-
- |
-
-
- Optional. Seed used in decoding. If not set, the request uses a randomly generated seed.
-
- |
-
-
-
-`response_mime_type`
-
- |
-
-
- Optional. Output response mimetype of the generated candidate text.
-
-Supported mimetype:
- `text/plain`: (default) Text output.
- `text/x-enum`: for use with a string-enum in `response_schema`
- `application/json`: JSON response in the candidates.
-
- |
-
-
-
-`response_schema`
-
- |
-
-
- Optional. Specifies the format of the JSON requested if response_mime_type is
-`application/json`.
-
- |
-
-
-
-`presence_penalty`
-
- |
-
-
- Optional.
-
- |
-
-
-
-`frequency_penalty`
-
- |
-
-
- Optional.
-
- |
-
-
-
-`response_logprobs`
-
- |
-
-
- Optional. If true, export the `logprobs` results in response.
-
- |
-
-
-
-`logprobs`
-
- |
-
-
- Optional. Number of candidates of log probabilities to return at each step of decoding.
-
- |
-
-
-
-
-
-## Methods
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-candidate_count
-
- |
-
-
-`None`
-
- |
-
-
-
-frequency_penalty
-
- |
-
-
-`None`
-
- |
-
-
-
-logprobs
-
- |
-
-
-`None`
-
- |
-
-
-
-max_output_tokens
-
- |
-
-
-`None`
-
- |
-
-
-
-presence_penalty
-
- |
-
-
-`None`
-
- |
-
-
-
-response_logprobs
-
- |
-
-
-`None`
-
- |
-
-
-
-response_mime_type
-
- |
-
-
-`None`
-
- |
-
-
-
-response_schema
-
- |
-
-
-`None`
-
- |
-
-
-
-seed
-
- |
-
-
-`None`
-
- |
-
-
-
-stop_sequences
-
- |
-
-
-`None`
-
- |
-
-
-
-temperature
-
- |
-
-
-`None`
-
- |
-
-
-
-top_k
-
- |
-
-
-`None`
-
- |
-
-
-
-top_p
-
- |
-
-
-`None`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/GenerationConfigDict.md b/docs/api/google/generativeai/types/GenerationConfigDict.md
deleted file mode 100644
index f62f4b12d..000000000
--- a/docs/api/google/generativeai/types/GenerationConfigDict.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.GenerationConfigDict
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/GenerationConfigType.md b/docs/api/google/generativeai/types/GenerationConfigType.md
deleted file mode 100644
index a182edb70..000000000
--- a/docs/api/google/generativeai/types/GenerationConfigType.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.GenerationConfigType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-GenerationConfigType = Union[
- google.generativeai.protos.GenerationConfig
,
- google.generativeai.types.GenerationConfigDict
,
- google.generativeai.types.GenerationConfig
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/HarmBlockThreshold.md b/docs/api/google/generativeai/types/HarmBlockThreshold.md
deleted file mode 100644
index 05992883d..000000000
--- a/docs/api/google/generativeai/types/HarmBlockThreshold.md
+++ /dev/null
@@ -1,756 +0,0 @@
-
-# google.generativeai.types.HarmBlockThreshold
-
-
-
-
-
-
-
-Block at and beyond a specified harm probability.
-
-
- View aliases
-
-Main aliases
-
`google.generativeai.protos.SafetySetting.HarmBlockThreshold`
-
-
-
-
-google.generativeai.types.HarmBlockThreshold(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`HARM_BLOCK_THRESHOLD_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Threshold is unspecified.
-
- |
-
-
-
-`BLOCK_LOW_AND_ABOVE`
-
- |
-
-
-`1`
-
-Content with NEGLIGIBLE will be allowed.
-
- |
-
-
-
-`BLOCK_MEDIUM_AND_ABOVE`
-
- |
-
-
-`2`
-
-Content with NEGLIGIBLE and LOW will be
-allowed.
-
- |
-
-
-
-`BLOCK_ONLY_HIGH`
-
- |
-
-
-`3`
-
-Content with NEGLIGIBLE, LOW, and MEDIUM will
-be allowed.
-
- |
-
-
-
-`BLOCK_NONE`
-
- |
-
-
-`4`
-
-All content will be allowed.
-
- |
-
-
-
-`OFF`
-
- |
-
-
-`5`
-
-Turn off the safety filter.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-BLOCK_LOW_AND_ABOVE
-
- |
-
-
-``
-
- |
-
-
-
-BLOCK_MEDIUM_AND_ABOVE
-
- |
-
-
-``
-
- |
-
-
-
-BLOCK_NONE
-
- |
-
-
-``
-
- |
-
-
-
-BLOCK_ONLY_HIGH
-
- |
-
-
-``
-
- |
-
-
-
-HARM_BLOCK_THRESHOLD_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-OFF
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/HarmCategory.md b/docs/api/google/generativeai/types/HarmCategory.md
deleted file mode 100644
index bea90dd90..000000000
--- a/docs/api/google/generativeai/types/HarmCategory.md
+++ /dev/null
@@ -1,647 +0,0 @@
-
-# google.generativeai.types.HarmCategory
-
-
-
-
-
-
-
-Harm Categories supported by the gemini-family model
-
-
-google.generativeai.types.HarmCategory(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-HARM_CATEGORY_DANGEROUS_CONTENT
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_HARASSMENT
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_HATE_SPEECH
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_SEXUALLY_EXPLICIT
-
- |
-
-
-``
-
- |
-
-
-
-HARM_CATEGORY_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/HarmProbability.md b/docs/api/google/generativeai/types/HarmProbability.md
deleted file mode 100644
index 1d7e2f6d2..000000000
--- a/docs/api/google/generativeai/types/HarmProbability.md
+++ /dev/null
@@ -1,734 +0,0 @@
-
-# google.generativeai.types.HarmProbability
-
-
-
-
-
-
-
-The probability that a piece of content is harmful.
-
-
- View aliases
-
-Main aliases
-
`google.generativeai.protos.SafetyRating.HarmProbability`
-
-
-
-
-google.generativeai.types.HarmProbability(
- *args, **kwds
-)
-
-
-
-
-
-
-The classification system gives the probability of the content
-being unsafe. This does not indicate the severity of harm for a
-piece of content.
-
-
-
-
-Values |
-
-
-
-
-`HARM_PROBABILITY_UNSPECIFIED`
-
- |
-
-
-`0`
-
-Probability is unspecified.
-
- |
-
-
-
-`NEGLIGIBLE`
-
- |
-
-
-`1`
-
-Content has a negligible chance of being
-unsafe.
-
- |
-
-
-
-`LOW`
-
- |
-
-
-`2`
-
-Content has a low chance of being unsafe.
-
- |
-
-
-
-`MEDIUM`
-
- |
-
-
-`3`
-
-Content has a medium chance of being unsafe.
-
- |
-
-
-
-`HIGH`
-
- |
-
-
-`4`
-
-Content has a high chance of being unsafe.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-HARM_PROBABILITY_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
-HIGH
-
- |
-
-
-``
-
- |
-
-
-
-LOW
-
- |
-
-
-``
-
- |
-
-
-
-MEDIUM
-
- |
-
-
-``
-
- |
-
-
-
-NEGLIGIBLE
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/IncompleteIterationError.md b/docs/api/google/generativeai/types/IncompleteIterationError.md
deleted file mode 100644
index ee2aa8cda..000000000
--- a/docs/api/google/generativeai/types/IncompleteIterationError.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.IncompleteIterationError
-
-
-
-
-
-
-
-Common base class for all non-exit exceptions.
-
-
-
-
diff --git a/docs/api/google/generativeai/types/Model.md b/docs/api/google/generativeai/types/Model.md
deleted file mode 100644
index 5331cfa1d..000000000
--- a/docs/api/google/generativeai/types/Model.md
+++ /dev/null
@@ -1,257 +0,0 @@
-
-# google.generativeai.types.Model
-
-
-
-
-
-
-
-A dataclass representation of a protos.Model
.
-
-
-google.generativeai.types.Model(
- name: str,
- base_model_id: str,
- version: str,
- display_name: str,
- description: str,
- input_token_limit: int,
- output_token_limit: int,
- supported_generation_methods: list[str],
- temperature: (float | None) = None,
- max_temperature: (float | None) = None,
- top_p: (float | None) = None,
- top_k: (int | None) = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming
-convention of: "{base_model_id}-{version}". For example: `models/chat-bison-001`.
-
- |
-
-
-
-`base_model_id`
-
- |
-
-
-The base name of the model. For example: `chat-bison`.
-
- |
-
-
-
-`version`
-
- |
-
-
- The major version number of the model. For example: `001`.
-
- |
-
-
-
-`display_name`
-
- |
-
-
-The human-readable name of the model. E.g. `"Chat Bison"`. The name can be up
-to 128 characters long and can consist of any UTF-8 characters.
-
- |
-
-
-
-`description`
-
- |
-
-
-A short description of the model.
-
- |
-
-
-
-`input_token_limit`
-
- |
-
-
-Maximum number of input tokens allowed for this model.
-
- |
-
-
-
-`output_token_limit`
-
- |
-
-
-Maximum number of output tokens available for this model.
-
- |
-
-
-
-`supported_generation_methods`
-
- |
-
-
-lists which methods are supported by the model. The method
-names are defined as Pascal case strings, such as `generateMessage` which correspond to
-API methods.
-
- |
-
-
-
-`temperature`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`max_temperature`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`top_p`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`top_k`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-
-
-## Methods
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-max_temperature
-
- |
-
-
-`None`
-
- |
-
-
-
-temperature
-
- |
-
-
-`None`
-
- |
-
-
-
-top_k
-
- |
-
-
-`None`
-
- |
-
-
-
-top_p
-
- |
-
-
-`None`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/ModelsIterable.md b/docs/api/google/generativeai/types/ModelsIterable.md
deleted file mode 100644
index 0039b3154..000000000
--- a/docs/api/google/generativeai/types/ModelsIterable.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# google.generativeai.types.ModelsIterable
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-ModelsIterable = Iterable[
- google.generativeai.types.Model
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/PartDict.md b/docs/api/google/generativeai/types/PartDict.md
deleted file mode 100644
index d0df202ae..000000000
--- a/docs/api/google/generativeai/types/PartDict.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.PartDict
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/PartType.md b/docs/api/google/generativeai/types/PartType.md
deleted file mode 100644
index 73a60027f..000000000
--- a/docs/api/google/generativeai/types/PartType.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-# google.generativeai.types.PartType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-PartType = Union[
- google.generativeai.protos.Part
,
- google.generativeai.types.PartDict
,
- google.generativeai.protos.Blob
,
- google.generativeai.types.BlobDict
,
- PIL.Image.Image,
- IPython.core.display.Image,
- str,
- google.generativeai.protos.FunctionCall
,
- google.generativeai.protos.FunctionResponse
,
- google.generativeai.types.FileDataDict
,
- google.generativeai.protos.FileData
,
- google.generativeai.protos.File
,
- google.generativeai.types.File
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/Permission.md b/docs/api/google/generativeai/types/Permission.md
deleted file mode 100644
index 4c5bde2f4..000000000
--- a/docs/api/google/generativeai/types/Permission.md
+++ /dev/null
@@ -1,290 +0,0 @@
-
-# google.generativeai.types.Permission
-
-
-
-
-
-
-
-A permission to access a resource.
-
-
-google.generativeai.types.Permission(
- name: str,
- role: RoleOptions,
- grantee_type: Optional[GranteeTypeOptions] = None,
- email_address: Optional[str] = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`name`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`role`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`grantee_type`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`email_address`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-
-
-## Methods
-
-delete
-
-View source
-
-
-delete(
- client: (glm.PermissionServiceClient | None) = None
-) -> None
-
-
-Delete permission (self).
-
-
-delete_async
-
-View source
-
-
-delete_async(
- client=None
-)
-
-
-This is the async version of Permission.delete
.
-
-
-get
-
-View source
-
-
-@classmethod
-get(
- name: str, client: (glm.PermissionServiceClient | None) = None
-) -> Permission
-
-
-Get information about a specific permission.
-
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The name of the permission to get.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-Requested permission as an instance of `Permission`.
-
- |
-
-
-
-
-
-
-get_async
-
-View source
-
-
-get_async(
- name, client=None
-)
-
-
-This is the async version of Permission.get
.
-
-
-to_dict
-
-View source
-
-
-to_dict() -> dict[str, Any]
-
-
-
-
-
-update
-
-View source
-
-
-update(
- updates: dict[str, Any],
- client: (glm.PermissionServiceClient | None) = None
-) -> Permission
-
-
-Update a list of fields for a specified permission.
-
-
-
-
-
-Args |
-
-
-
-
-`updates`
-
- |
-
-
-The list of fields to update.
-Currently only `role` is supported as an update path.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-`Permission` object with specified updates.
-
- |
-
-
-
-
-
-
-update_async
-
-View source
-
-
-update_async(
- updates, client=None
-)
-
-
-This is the async version of Permission.update
.
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-email_address
-
- |
-
-
-`None`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/Permissions.md b/docs/api/google/generativeai/types/Permissions.md
deleted file mode 100644
index bb531c4e6..000000000
--- a/docs/api/google/generativeai/types/Permissions.md
+++ /dev/null
@@ -1,432 +0,0 @@
-
-# google.generativeai.types.Permissions
-
-
-
-
-
-
-
-
-
-
-google.generativeai.types.Permissions(
- parent
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`parent`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-create
-
-View source
-
-
-create(
- role: RoleOptions,
- grantee_type: Optional[GranteeTypeOptions] = None,
- email_address: Optional[str] = None,
- client: (glm.PermissionServiceClient | None) = None
-) -> Permission
-
-
-Create a new permission on a resource (self).
-
-
-
-
-
-Args |
-
-
-
-
-`parent`
-
- |
-
-
-The resource name of the parent resource in which the permission will be listed.
-
- |
-
-
-
-`role`
-
- |
-
-
-role that will be granted by the permission.
-
- |
-
-
-
-`grantee_type`
-
- |
-
-
-The type of the grantee for the permission.
-
- |
-
-
-
-`email_address`
-
- |
-
-
-The email address of the grantee.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-`Permission` object with specified parent, role, grantee type, and email address.
-
- |
-
-
-
-
-
-
-
-
-
-Raises |
-
-
-
-
-`ValueError`
-
- |
-
-
-When email_address is specified and grantee_type is set to EVERYONE.
-
- |
-
-
-
-`ValueError`
-
- |
-
-
-When email_address is not specified and grantee_type is not set to EVERYONE.
-
- |
-
-
-
-
-
-create_async
-
-View source
-
-
-create_async(
- role, grantee_type=None, email_address=None, client=None
-)
-
-
-This is the async version of `PermissionAdapter.create_permission`.
-
-
-get
-
-View source
-
-
-@classmethod
-get(
- name: str
-) -> Permission
-
-
-Get information about a specific permission.
-
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The name of the permission to get.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-Requested permission as an instance of `Permission`.
-
- |
-
-
-
-
-
-
-get_async
-
-View source
-
-
-get_async(
- name
-)
-
-
-Get information about a specific permission.
-
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-The name of the permission to get.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-Requested permission as an instance of `Permission`.
-
- |
-
-
-
-
-
-
-list
-
-View source
-
-
-list(
- page_size: Optional[int] = None,
- client: (glm.PermissionServiceClient | None) = None
-) -> Iterable[Permission]
-
-
-List `Permission`s enforced on a resource (self).
-
-
-
-
-
-Args |
-
-
-
-
-`parent`
-
- |
-
-
-The resource name of the parent resource in which the permission will be listed.
-
- |
-
-
-
-`page_size`
-
- |
-
-
-The maximum number of permissions to return (per page). The service may return fewer permissions.
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-Paginated list of `Permission` objects.
-
- |
-
-
-
-
-
-
-list_async
-
-View source
-
-
-list_async(
- page_size=None, client=None
-)
-
-
-This is the async version of `PermissionAdapter.list_permissions`.
-
-
-transfer_ownership
-
-View source
-
-
-transfer_ownership(
- email_address: str, client: (glm.PermissionServiceClient | None) = None
-) -> None
-
-
-Transfer ownership of a resource (self) to a new owner.
-
-
-
-
-
-Args |
-
-
-
-
-`name`
-
- |
-
-
-Name of the resource to transfer ownership.
-
- |
-
-
-
-`email_address`
-
- |
-
-
-Email address of the new owner.
-
- |
-
-
-
-
-
-transfer_ownership_async
-
-View source
-
-
-transfer_ownership_async(
- email_address, client=None
-)
-
-
-This is the async version of `PermissionAdapter.transfer_ownership`.
-
-
-__iter__
-
-View source
-
-
-__iter__()
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/RequestOptions.md b/docs/api/google/generativeai/types/RequestOptions.md
deleted file mode 100644
index bc23ea02e..000000000
--- a/docs/api/google/generativeai/types/RequestOptions.md
+++ /dev/null
@@ -1,209 +0,0 @@
-
-# google.generativeai.types.RequestOptions
-
-
-
-
-
-
-
-Request options
-
-
-google.generativeai.types.RequestOptions(
- *,
- retry: (google.api_core.retry.Retry | None) = None,
- timeout: (int | float | google.api_core.timeout.TimeToDeadlineTimeout | None) = None
-)
-
-
-
-
-
-
-
-```
->>> import google.generativeai as genai
->>> from google.generativeai.types import RequestOptions
->>> from google.api_core import retry
->>>
->>> model = genai.GenerativeModel()
->>> response = model.generate_content('Hello',
-... request_options=RequestOptions(
-... retry=retry.Retry(initial=10, multiplier=2, maximum=60, timeout=300)))
->>> response = model.generate_content('Hello',
-... request_options=RequestOptions(timeout=600)))
-```
-
-
-
-
-Args |
-
-
-
-
-`retry`
-
- |
-
-
-Refer to [retry docs](https://googleapis.dev/python/google-api-core/latest/retry.html) for details.
-
- |
-
-
-
-`timeout`
-
- |
-
-
-In seconds (or provide a [TimeToDeadlineTimeout](https://googleapis.dev/python/google-api-core/latest/timeout.html) object).
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`retry`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`timeout`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-
-
-## Methods
-
-get
-
-
-get(
- key, default=None
-)
-
-
-D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
-
-
-items
-
-
-items()
-
-
-D.items() -> a set-like object providing a view on D's items
-
-
-keys
-
-
-keys()
-
-
-D.keys() -> a set-like object providing a view on D's keys
-
-
-values
-
-
-values()
-
-
-D.values() -> an object providing a view on D's values
-
-
-__contains__
-
-
-__contains__(
- key
-)
-
-
-
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__getitem__
-
-View source
-
-
-__getitem__(
- item
-)
-
-
-
-
-
-__iter__
-
-View source
-
-
-__iter__()
-
-
-
-
-
-__len__
-
-View source
-
-
-__len__()
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/RequestOptionsType.md b/docs/api/google/generativeai/types/RequestOptionsType.md
deleted file mode 100644
index 8466d0e31..000000000
--- a/docs/api/google/generativeai/types/RequestOptionsType.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-# google.generativeai.types.RequestOptionsType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-RequestOptionsType = Union[
- google.generativeai.types.RequestOptions
,
- google.generativeai.types.helper_types.RequestOptionsDict
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/SafetyFeedbackDict.md b/docs/api/google/generativeai/types/SafetyFeedbackDict.md
deleted file mode 100644
index 9cd289074..000000000
--- a/docs/api/google/generativeai/types/SafetyFeedbackDict.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-# google.generativeai.types.SafetyFeedbackDict
-
-
-
-
-
-
-
-Safety feedback for an entire request.
-
-
-
-This field is populated if content in the input and/or response
-is blocked due to safety settings. SafetyFeedback may not exist
-for every HarmCategory. Each SafetyFeedback will return the
-safety settings used by the request as well as the lowest
-HarmProbability that should be allowed in order to return a
-result.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`rating`
-
- |
-
-
-`google.ai.generativelanguage.SafetyRating`
-
-Safety rating evaluated from content.
-
- |
-
-
-
-`setting`
-
- |
-
-
-`google.ai.generativelanguage.SafetySetting`
-
-Safety settings applied to the request.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/SafetyRatingDict.md b/docs/api/google/generativeai/types/SafetyRatingDict.md
deleted file mode 100644
index 69fce139a..000000000
--- a/docs/api/google/generativeai/types/SafetyRatingDict.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-# google.generativeai.types.SafetyRatingDict
-
-
-
-
-
-
-
-Safety rating for a piece of content.
-
-
-
-The safety rating contains the category of harm and the harm
-probability level in that category for a piece of content.
-Content is classified for safety across a number of harm
-categories and the probability of the harm classification is
-included here.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`category`
-
- |
-
-
-`google.ai.generativelanguage.HarmCategory`
-
-Required. The category for this rating.
-
- |
-
-
-
-`probability`
-
- |
-
-
-`google.ai.generativelanguage.SafetyRating.HarmProbability`
-
-Required. The probability of harm for this
-content.
-
- |
-
-
-
-`blocked`
-
- |
-
-
-`bool`
-
-Was this content blocked because of this
-rating?
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/SafetySettingDict.md b/docs/api/google/generativeai/types/SafetySettingDict.md
deleted file mode 100644
index cd8ec5731..000000000
--- a/docs/api/google/generativeai/types/SafetySettingDict.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# google.generativeai.types.SafetySettingDict
-
-
-
-
-
-
-
-Safety setting, affecting the safety-blocking behavior.
-
-
-
-Passing a safety setting for a category changes the allowed
-probability that content is blocked.
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`category`
-
- |
-
-
-`google.ai.generativelanguage.HarmCategory`
-
-Required. The category for this setting.
-
- |
-
-
-
-`threshold`
-
- |
-
-
-`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold`
-
-Required. Controls the probability threshold
-at which harm is blocked.
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/Status.md b/docs/api/google/generativeai/types/Status.md
deleted file mode 100644
index f86d6e31c..000000000
--- a/docs/api/google/generativeai/types/Status.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# google.generativeai.types.Status
-
-
-
-
-
-
-
-A ProtocolMessage
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`code`
-
- |
-
-
-`int32 code`
-
- |
-
-
-
-`details`
-
- |
-
-
-`repeated Any details`
-
- |
-
-
-
-`message`
-
- |
-
-
-`string message`
-
- |
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/StopCandidateException.md b/docs/api/google/generativeai/types/StopCandidateException.md
deleted file mode 100644
index c9e71b357..000000000
--- a/docs/api/google/generativeai/types/StopCandidateException.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.StopCandidateException
-
-
-
-
-
-
-
-Common base class for all non-exit exceptions.
-
-
-
-
diff --git a/docs/api/google/generativeai/types/StrictContentType.md b/docs/api/google/generativeai/types/StrictContentType.md
deleted file mode 100644
index 008b7b257..000000000
--- a/docs/api/google/generativeai/types/StrictContentType.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-# google.generativeai.types.StrictContentType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-StrictContentType = Union[
- google.generativeai.protos.Content
,
- google.generativeai.types.ContentDict
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/Tool.md b/docs/api/google/generativeai/types/Tool.md
deleted file mode 100644
index 68c3dfb50..000000000
--- a/docs/api/google/generativeai/types/Tool.md
+++ /dev/null
@@ -1,118 +0,0 @@
-
-# google.generativeai.types.Tool
-
-
-
-
-
-
-
-A wrapper for protos.Tool
, Contains a collection of related `FunctionDeclaration` objects, protos.CodeExecution object, and protos.GoogleSearchRetrieval object.
-
-
-google.generativeai.types.Tool(
- *,
- function_declarations: (Iterable[FunctionDeclarationType] | None) = None,
- google_search_retrieval: (GoogleSearchRetrievalType | None) = None,
- code_execution: (protos.CodeExecution | None) = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`code_execution`
-
- |
-
-
-
-
- |
-
-
-
-`function_declarations`
-
- |
-
-
-
-
- |
-
-
-
-`google_search_retrieval`
-
- |
-
-
-
-
- |
-
-
-
-
-
-## Methods
-
-to_proto
-
-View source
-
-
-to_proto()
-
-
-
-
-
-__call__
-
-View source
-
-
-__call__(
- fc: protos.FunctionCall
-) -> (protos.FunctionResponse | None)
-
-
-Call self as a function.
-
-
-__getitem__
-
-View source
-
-
-__getitem__(
- name: (str | protos.FunctionCall)
-) -> (FunctionDeclaration | protos.FunctionDeclaration)
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/ToolDict.md b/docs/api/google/generativeai/types/ToolDict.md
deleted file mode 100644
index 7c58c9de2..000000000
--- a/docs/api/google/generativeai/types/ToolDict.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.ToolDict
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/ToolsType.md b/docs/api/google/generativeai/types/ToolsType.md
deleted file mode 100644
index 9cf0a768f..000000000
--- a/docs/api/google/generativeai/types/ToolsType.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.types.ToolsType
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-ToolsType = Union[
- Iterable[Union[str, google.generativeai.types.Tool
, google.generativeai.protos.Tool
, google.generativeai.types.ToolDict
, Iterable[google.generativeai.types.FunctionDeclarationType
], google.generativeai.types.FunctionDeclaration
, google.generativeai.protos.FunctionDeclaration
, dict[str, Any], Callable[..., Any]]],
- str,
- google.generativeai.types.Tool
,
- google.generativeai.protos.Tool
,
- google.generativeai.types.ToolDict
,
- Iterable[google.generativeai.types.FunctionDeclarationType
],
- google.generativeai.types.FunctionDeclaration
,
- google.generativeai.protos.FunctionDeclaration
,
- dict[str, Any],
- Callable[..., Any]
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/TunedModel.md b/docs/api/google/generativeai/types/TunedModel.md
deleted file mode 100644
index 6316be660..000000000
--- a/docs/api/google/generativeai/types/TunedModel.md
+++ /dev/null
@@ -1,375 +0,0 @@
-
-# google.generativeai.types.TunedModel
-
-
-
-
-
-
-
-A dataclass representation of a protos.TunedModel
.
-
-
-google.generativeai.types.TunedModel(
- name: (str | None) = None,
- source_model: (str | None) = None,
- base_model: (str | None) = None,
- display_name: str = '',
- description: str = '',
- temperature: (float | None) = None,
- top_p: (float | None) = None,
- top_k: (float | None) = None,
- state: TunedModelState = TunedModelState.STATE_UNSPECIFIED,
- create_time: (datetime.datetime | None) = None,
- update_time: (datetime.datetime | None) = None,
- tuning_task: (TuningTask | None) = None,
- reader_project_numbers: (list[int] | None) = None
-)
-
-
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`permissions`
-
- |
-
-
-
-
- |
-
-
-
-`name`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`source_model`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`base_model`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`display_name`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`description`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`temperature`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`top_p`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`top_k`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`state`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`create_time`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`update_time`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`tuning_task`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-`reader_project_numbers`
-
- |
-
-
-Dataclass field
-
- |
-
-
-
-
-
-## Methods
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-base_model
-
- |
-
-
-`None`
-
- |
-
-
-
-create_time
-
- |
-
-
-`None`
-
- |
-
-
-
-description
-
- |
-
-
-`''`
-
- |
-
-
-
-display_name
-
- |
-
-
-`''`
-
- |
-
-
-
-name
-
- |
-
-
-`None`
-
- |
-
-
-
-reader_project_numbers
-
- |
-
-
-`None`
-
- |
-
-
-
-source_model
-
- |
-
-
-`None`
-
- |
-
-
-
-state
-
- |
-
-
-``
-
- |
-
-
-
-temperature
-
- |
-
-
-`None`
-
- |
-
-
-
-top_k
-
- |
-
-
-`None`
-
- |
-
-
-
-top_p
-
- |
-
-
-`None`
-
- |
-
-
-
-tuning_task
-
- |
-
-
-`None`
-
- |
-
-
-
-update_time
-
- |
-
-
-`None`
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/TunedModelNameOptions.md b/docs/api/google/generativeai/types/TunedModelNameOptions.md
deleted file mode 100644
index 0ab3d7879..000000000
--- a/docs/api/google/generativeai/types/TunedModelNameOptions.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# google.generativeai.types.TunedModelNameOptions
-
-
-This symbol is a **type alias**.
-
-
-
-#### Source:
-
-
-TunedModelNameOptions = Union[
- str,
- google.generativeai.types.TunedModel
,
- google.generativeai.protos.TunedModel
-]
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/TunedModelState.md b/docs/api/google/generativeai/types/TunedModelState.md
deleted file mode 100644
index bf55c2738..000000000
--- a/docs/api/google/generativeai/types/TunedModelState.md
+++ /dev/null
@@ -1,706 +0,0 @@
-
-# google.generativeai.types.TunedModelState
-
-
-
-
-
-
-
-The state of the tuned model.
-
-
- View aliases
-
-Main aliases
-
`google.generativeai.protos.TunedModel.State`
-
-
-
-
-google.generativeai.types.TunedModelState(
- *args, **kwds
-)
-
-
-
-
-
-
-
-
-
-
-Values |
-
-
-
-
-`STATE_UNSPECIFIED`
-
- |
-
-
-`0`
-
-The default value. This value is unused.
-
- |
-
-
-
-`CREATING`
-
- |
-
-
-`1`
-
-The model is being created.
-
- |
-
-
-
-`ACTIVE`
-
- |
-
-
-`2`
-
-The model is ready to be used.
-
- |
-
-
-
-`FAILED`
-
- |
-
-
-`3`
-
-The model failed to be created.
-
- |
-
-
-
-
-
-
-
-
-
-
-Attributes |
-
-
-
-
-`denominator`
-
- |
-
-
-the denominator of a rational number in lowest terms
-
- |
-
-
-
-`imag`
-
- |
-
-
-the imaginary part of a complex number
-
- |
-
-
-
-`numerator`
-
- |
-
-
-the numerator of a rational number in lowest terms
-
- |
-
-
-
-`real`
-
- |
-
-
-the real part of a complex number
-
- |
-
-
-
-
-
-## Methods
-
-as_integer_ratio
-
-
-as_integer_ratio()
-
-
-Return a pair of integers, whose ratio is equal to the original int.
-
-The ratio is in lowest terms and has a positive denominator.
-
-```
->>> (10).as_integer_ratio()
-(10, 1)
->>> (-10).as_integer_ratio()
-(-10, 1)
->>> (0).as_integer_ratio()
-(0, 1)
-```
-
-bit_count
-
-
-bit_count()
-
-
-Number of ones in the binary representation of the absolute value of self.
-
-Also known as the population count.
-
-```
->>> bin(13)
-'0b1101'
->>> (13).bit_count()
-3
-```
-
-bit_length
-
-
-bit_length()
-
-
-Number of bits necessary to represent self in binary.
-
-```
->>> bin(37)
-'0b100101'
->>> (37).bit_length()
-6
-```
-
-conjugate
-
-
-conjugate()
-
-
-Returns self, the complex conjugate of any int.
-
-
-from_bytes
-
-
-from_bytes(
- byteorder='big', *, signed=False
-)
-
-
-Return the integer represented by the given array of bytes.
-
-bytes
- Holds the array of bytes to convert. The argument must either
- support the buffer protocol or be an iterable object producing bytes.
- Bytes and bytearray are examples of built-in objects that support the
- buffer protocol.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Indicates whether two's complement is used to represent the integer.
-
-is_integer
-
-
-is_integer()
-
-
-Returns True. Exists for duck type compatibility with float.is_integer.
-
-
-to_bytes
-
-
-to_bytes(
- length=1, byteorder='big', *, signed=False
-)
-
-
-Return an array of bytes representing an integer.
-
-length
- Length of bytes object to use. An OverflowError is raised if the
- integer is not representable with the given number of bytes. Default
- is length 1.
-byteorder
- The byte order used to represent the integer. If byteorder is 'big',
- the most significant byte is at the beginning of the byte array. If
- byteorder is 'little', the most significant byte is at the end of the
- byte array. To request the native byte order of the host system, use
- `sys.byteorder' as the byte order value. Default is to use 'big'.
-signed
- Determines whether two's complement is used to represent the integer.
- If signed is False and a negative integer is given, an OverflowError
- is raised.
-
-__abs__
-
-
-__abs__()
-
-
-abs(self)
-
-
-__add__
-
-
-__add__(
- value, /
-)
-
-
-Return self+value.
-
-
-__and__
-
-
-__and__(
- value, /
-)
-
-
-Return self&value.
-
-
-__bool__
-
-
-__bool__()
-
-
-True if self else False
-
-
-__eq__
-
-
-__eq__(
- other
-)
-
-
-Return self==value.
-
-
-__floordiv__
-
-
-__floordiv__(
- value, /
-)
-
-
-Return self//value.
-
-
-__ge__
-
-
-__ge__(
- other
-)
-
-
-Return self>=value.
-
-
-__gt__
-
-
-__gt__(
- other
-)
-
-
-Return self>value.
-
-
-__invert__
-
-
-__invert__()
-
-
-~self
-
-
-__le__
-
-
-__le__(
- other
-)
-
-
-Return self<=value.
-
-
-__lshift__
-
-
-__lshift__(
- value, /
-)
-
-
-Return self<__lt__
-
-
-__lt__(
- other
-)
-
-
-Return self__mod__
-
-
-__mod__(
- value, /
-)
-
-
-Return self%value.
-
-
-__mul__
-
-
-__mul__(
- value, /
-)
-
-
-Return self*value.
-
-
-__ne__
-
-
-__ne__(
- other
-)
-
-
-Return self!=value.
-
-
-__neg__
-
-
-__neg__()
-
-
--self
-
-
-__or__
-
-
-__or__(
- value, /
-)
-
-
-Return self|value.
-
-
-__pos__
-
-
-__pos__()
-
-
-+self
-
-
-__pow__
-
-
-__pow__(
- value, mod, /
-)
-
-
-Return pow(self, value, mod).
-
-
-__radd__
-
-
-__radd__(
- value, /
-)
-
-
-Return value+self.
-
-
-__rand__
-
-
-__rand__(
- value, /
-)
-
-
-Return value&self.
-
-
-__rfloordiv__
-
-
-__rfloordiv__(
- value, /
-)
-
-
-Return value//self.
-
-
-__rlshift__
-
-
-__rlshift__(
- value, /
-)
-
-
-Return value<__rmod__
-
-
-__rmod__(
- value, /
-)
-
-
-Return value%self.
-
-
-__rmul__
-
-
-__rmul__(
- value, /
-)
-
-
-Return value*self.
-
-
-__ror__
-
-
-__ror__(
- value, /
-)
-
-
-Return value|self.
-
-
-__rpow__
-
-
-__rpow__(
- value, mod, /
-)
-
-
-Return pow(value, self, mod).
-
-
-__rrshift__
-
-
-__rrshift__(
- value, /
-)
-
-
-Return value>>self.
-
-
-__rshift__
-
-
-__rshift__(
- value, /
-)
-
-
-Return self>>value.
-
-
-__rsub__
-
-
-__rsub__(
- value, /
-)
-
-
-Return value-self.
-
-
-__rtruediv__
-
-
-__rtruediv__(
- value, /
-)
-
-
-Return value/self.
-
-
-__rxor__
-
-
-__rxor__(
- value, /
-)
-
-
-Return value^self.
-
-
-__sub__
-
-
-__sub__(
- value, /
-)
-
-
-Return self-value.
-
-
-__truediv__
-
-
-__truediv__(
- value, /
-)
-
-
-Return self/value.
-
-
-__xor__
-
-
-__xor__(
- value, /
-)
-
-
-Return self^value.
-
-
-
-
-
-
-
-
-
-Class Variables |
-
-
-
-
-ACTIVE
-
- |
-
-
-``
-
- |
-
-
-
-CREATING
-
- |
-
-
-``
-
- |
-
-
-
-FAILED
-
- |
-
-
-``
-
- |
-
-
-
-STATE_UNSPECIFIED
-
- |
-
-
-``
-
- |
-
-
-
diff --git a/docs/api/google/generativeai/types/TypedDict.md b/docs/api/google/generativeai/types/TypedDict.md
deleted file mode 100644
index 4e7aeb78d..000000000
--- a/docs/api/google/generativeai/types/TypedDict.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# google.generativeai.types.TypedDict
-
-
-
-
-
-
-
-A simple typed namespace. At runtime it is equivalent to a plain dict.
-
-
-
-google.generativeai.types.TypedDict(
- typename, fields, /, *, total=True, closed=False, **kwargs
-)
-
-
-
-
-
-
-TypedDict creates a dictionary type such that a type checker will expect all
-instances to have a certain set of keys, where each key is
-associated with a value of a consistent type. This expectation
-is not checked at runtime.
-
-Usage::
-
- class Point2D(TypedDict):
- x: int
- y: int
- label: str
-
- a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
- b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
-
- assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
-
-The type info can be accessed via the Point2D.__annotations__ dict, and
-the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
-TypedDict supports an additional equivalent form::
-
- Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
-
-By default, all keys must be present in a TypedDict. It is possible
-to override this by specifying totality::
-
- class Point2D(TypedDict, total=False):
- x: int
- y: int
-
-This means that a Point2D TypedDict can have any of the keys omitted. A type
-checker is only expected to support a literal False or True as the value of
-the total argument. True is the default, and makes all items defined in the
-class body be required.
-
-The Required and NotRequired special forms can also be used to mark
-individual keys as being required or not required::
-
- class Point2D(TypedDict):
- x: int # the "x" key must always be present (Required is the default)
- y: NotRequired[int] # the "y" key can be omitted
-
-See PEP 655 for more details on Required and NotRequired.
\ No newline at end of file
diff --git a/docs/api/google/generativeai/types/get_default_file_client.md b/docs/api/google/generativeai/types/get_default_file_client.md
deleted file mode 100644
index 9d575aa98..000000000
--- a/docs/api/google/generativeai/types/get_default_file_client.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# google.generativeai.types.get_default_file_client
-
-
-
-
-
-
-
-
-
-
-
-google.generativeai.types.get_default_file_client() -> glm.FilesServiceClient
-
-
-
-
-
diff --git a/docs/api/google/generativeai/types/to_file_data.md b/docs/api/google/generativeai/types/to_file_data.md
deleted file mode 100644
index 2b0f5a819..000000000
--- a/docs/api/google/generativeai/types/to_file_data.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# google.generativeai.types.to_file_data
-
-
-
-
-
-
-
-
-
-
-
-google.generativeai.types.to_file_data(
- file_data: FileDataType
-)
-
-
-
-
-
diff --git a/docs/api/google/generativeai/update_tuned_model.md b/docs/api/google/generativeai/update_tuned_model.md
deleted file mode 100644
index 469b3f6e6..000000000
--- a/docs/api/google/generativeai/update_tuned_model.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-# google.generativeai.update_tuned_model
-
-
-
-
-
-
-
-Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
-
-
-
-google.generativeai.update_tuned_model(
- tuned_model: (str | protos.TunedModel),
- updates: (dict[str, Any] | None) = None,
- *,
- client: (glm.ModelServiceClient | None) = None,
- request_options: (helper_types.RequestOptionsType | None) = None
-) -> model_types.TunedModel
-
-
-
-
-
diff --git a/docs/api/google/generativeai/upload_file.md b/docs/api/google/generativeai/upload_file.md
deleted file mode 100644
index 3dda9fab8..000000000
--- a/docs/api/google/generativeai/upload_file.md
+++ /dev/null
@@ -1,123 +0,0 @@
-
-# google.generativeai.upload_file
-
-
-
-
-
-
-
-Calls the API to upload a file using a supported file service.
-
-
-
-google.generativeai.upload_file(
- path: (str | pathlib.Path | os.PathLike | IOBase),
- *,
- mime_type: (str | None) = None,
- name: (str | None) = None,
- display_name: (str | None) = None,
- resumable: bool = True
-) -> file_types.File
-
-
-
-
-
-
-
-
-
-
-Args |
-
-
-
-
-`path`
-
- |
-
-
-The path to the file or a file-like object (e.g., BytesIO) to be uploaded.
-
- |
-
-
-
-`mime_type`
-
- |
-
-
-The MIME type of the file. If not provided, it will be
-inferred from the file extension.
-
- |
-
-
-
-`name`
-
- |
-
-
-The name of the file in the destination (e.g., 'files/sample-image').
-If not provided, a system generated ID will be created.
-
- |
-
-
-
-`display_name`
-
- |
-
-
-Optional display name of the file.
-
- |
-
-
-
-`resumable`
-
- |
-
-
-Whether to use the resumable upload protocol. By default, this is enabled.
-See details at
-https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http.MediaFileUpload-class.html#resumable
-
- |
-
-
-
-
-
-
-
-
-Returns |
-
-
-
-
-`file_types.File`
-
- |
-
-
-The response of the uploaded file.
-
- |
-
-
-