From 8a99fde0f46eaaa6068a02c4dc963628b7584668 Mon Sep 17 00:00:00 2001 From: Zachary Kent Date: Wed, 23 Oct 2024 18:13:26 -0400 Subject: [PATCH] drop anyscale --- .vscode/launch.json | 17 ----------------- README.md | 9 --------- cmd/moki/moki.go | 2 +- internal/tools/tools.go | 6 +----- 4 files changed, 2 insertions(+), 32 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index d3a59b2..a6b60bc 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -17,14 +17,6 @@ "args": ["-llm=replicate", "give me some python code to classify images"], "envFile": "${workspaceFolder}/.env", }, - { - "name": "🖥️ start anyscale 🖥️", - "type": "go", - "request": "launch", - "program": "${workspaceFolder}/cmd/moki/moki.go", - "args": ["-llm=anyscale", "give me some python code to classify images"], - "envFile": "${workspaceFolder}/.env", - }, { "name": "🖥️ conversation 🖥️", "type": "go", @@ -43,15 +35,6 @@ "envFile": "${workspaceFolder}/.env", "console": "integratedTerminal", }, - { - "name": "🖥️ anyscale conversation 🖥️", - "type": "go", - "request": "launch", - "program": "${workspaceFolder}/cmd/moki/moki.go", - "args": ["-c", "-llm=anyscale"], - "envFile": "${workspaceFolder}/.env", - "console": "integratedTerminal", - }, { "name": "🖥️ run moki with stdin 🖥️", "type": "go", diff --git a/README.md b/README.md index cb4a6ed..2313ad4 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,6 @@ Conversation mode can explain code snippets, generate unit tests, and scaffold n - Set your API key as an environment variable: ```bash export OPENAI_API_KEY= - export ANYSCALE_API_KEY= export REPLICATE_API_TOKEN= ``` @@ -39,7 +38,6 @@ Conversation mode can explain code snippets, generate unit tests, and scaffold n ## Configuration - There are a few options for the API provider: - OpenAI (https://platform.openai.com/docs/overview) - - Anyscale (https://www.anyscale.com/endpoints) - Replicate (https://replicate.com/docs) ``` Flags: @@ -55,12 +53,6 @@ Model Options: - [Default] gpt-3.5-turbo, aka: turbo35 - gpt-4-turbo, aka: turbo - gpt-4o, aka: gpt4o - - Anyscale: - - [Default] meta-llama-3-8b, aka: l3-8b - - meta-llama-3-70b, aka: l3-70b - - mistralai/Mixtral-8x7B-Instruct-v0.1, aka: m8x7b - - mistralai/Mistral-7B-Instruct-v0.1, aka: m7b - - codellama/CodeLlama-70b-Instruct-hf, aka: cl70b - Replicate: - [Default] meta-llama-3-8b, aka: l3-8b - meta-llama-3-8b-instruct, aka: l3-8b-instruct @@ -79,7 +71,6 @@ moki -c By default the assistant will use OpenAI. To use another, run the assistant with a flag. ```bash moki -llm=openai -moki -llm=anyscale moki -llm=replicate ``` diff --git a/cmd/moki/moki.go b/cmd/moki/moki.go index 7ff9e21..bfc7192 100644 --- a/cmd/moki/moki.go +++ b/cmd/moki/moki.go @@ -42,7 +42,7 @@ func main() { // Define the flags helpFlag := flag.Bool("h", false, "Show this message") convFlag := flag.Bool("c", false, "Start a conversation with Moki") - aiFlag := flag.String("llm", string(aiutil.Anyscale), "Selct the LLM provider, either OpenAI, Anyscale, or Replicate") + aiFlag := flag.String("llm", string(aiutil.OpenAI), "Selct the LLM provider, either OpenAI or Replicate") modelFlag := flag.String("m", "", "Set the model to use for the LLM response") temperatureFlag := flag.Float64("t", aiutil.DefaultTemp, "Set the temperature for the LLM response") maxTokensFlag := flag.Int("max-tokens", aiutil.DefaultMaxTokens, "Set the maximum number of tokens to generate per response") diff --git a/internal/tools/tools.go b/internal/tools/tools.go index a6c444a..b2420ff 100644 --- a/internal/tools/tools.go +++ b/internal/tools/tools.go @@ -44,7 +44,6 @@ Flags: API Keys: - export OPENAI_API_KEY= - - export ANYSCALE_API_KEY= - export REPLICATE_API_TOKEN= Model Options: @@ -57,8 +56,5 @@ Model Options: - meta-llama-3-8b-instruct, aka: l3-8b-instruct - meta-llama-3-70b, aka: l3-70b - meta-llama-3-70b-instruct, aka: l3-70b-instruct - - Anyscale: - - [Default] mistralai/Mixtral-8x7B-Instruct-v0.1, aka: m8x7b (default) - - mistralai/Mistral-7B-Instruct-v0.1, aka: m7b - - codellama/CodeLlama-70b-Instruct-hf, aka: cl70b + `