diff --git a/openai/LICENSE.txt b/openai/LICENSE.txt new file mode 100644 index 0000000..cdb0138 --- /dev/null +++ b/openai/LICENSE.txt @@ -0,0 +1,14 @@ +Copyright (c) 2017+ LocalStack contributors +Copyright (c) 2016 Atlassian Pty Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/openai/Makefile b/openai/Makefile new file mode 100644 index 0000000..d63477e --- /dev/null +++ b/openai/Makefile @@ -0,0 +1,39 @@ +VENV_BIN = python3 -m venv +VENV_DIR ?= .venv +VENV_ACTIVATE = $(VENV_DIR)/bin/activate +VENV_RUN = . $(VENV_ACTIVATE) + +venv: $(VENV_ACTIVATE) + +$(VENV_ACTIVATE): setup.py setup.cfg + test -d .venv || $(VENV_BIN) .venv + $(VENV_RUN); pip install --upgrade pip setuptools plux wheel + $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort + $(VENV_RUN); pip install -e . + touch $(VENV_DIR)/bin/activate + +clean: + rm -rf .venv/ + rm -rf build/ + rm -rf .eggs/ + rm -rf *.egg-info/ + +lint: ## Run code linter to check code style + ($(VENV_RUN); python -m pflake8 --show-source) + +format: ## Run black and isort code formatter + $(VENV_RUN); python -m isort .; python -m black . + +install: venv + $(VENV_RUN); python setup.py develop + +dist: venv + $(VENV_RUN); python setup.py sdist bdist_wheel + +publish: clean-dist venv dist + $(VENV_RUN); pip install --upgrade twine; twine upload dist/* + +clean-dist: clean + rm -rf dist/ + +.PHONY: clean clean-dist dist install publish diff --git a/openai/README.md b/openai/README.md new file mode 100644 index 0000000..83d68cf --- /dev/null +++ b/openai/README.md @@ -0,0 +1,61 @@ +# LocalStack OpenAI Extension + +![GitHub license](https://img.shields.io/badge/license-Apache%202.0-blue.svg) +![Python version](https://img.shields.io/badge/python-3.11%2B-blue) +[![Build Status](https://travis-ci.com/yourusername/localstack-openai-mock.svg?branch=master)](https://travis-ci.com/yourusername/localstack-openai-mock) + +This is a LocalStack extension that allows you to mock the OpenAI API for testing and development purposes. It provides a convenient way to interact with a mock OpenAI service locally using LocalStack. + +## Installation + +You can install this extension directly using the LocalStack extension manager: + +```bash +localstack extensions install localstack-extension-openai +``` + +## Using + +Once installed, you can access the OpenAI Mock API through `localhost:4510/v1`. + +### Example + +```python + +import openai +openai.organization = "org-test" +openai.api_key = "test" +openai.api_base = "http://localhost:4510/v1" + +completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] +) +print(completion.choices) +``` + +## Coverage +- [x] Chat completion +- [x] Engines Listing +- [x] Transcribe +- [x] Translate +- [x] Generate Image URL +- [ ] Generate Image Base64 +- [ ] Embeddings +- [ ] Fine Tuning +- [ ] Files +- [ ] Moderations + + + +## Authors +**Cristopher Pinzon** cristopher.pinzon@localstack.cloud + + +## Licensing +* The extension code is licensed under the Apache 2.0 License + +### Thank you for using the LocalStack OpenAI Extension! diff --git a/openai/localstack_openai/__init__.py b/openai/localstack_openai/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/openai/localstack_openai/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/openai/localstack_openai/extension.py b/openai/localstack_openai/extension.py new file mode 100644 index 0000000..eea5e87 --- /dev/null +++ b/openai/localstack_openai/extension.py @@ -0,0 +1,38 @@ +import logging + +from localstack import config +from localstack.extensions.api import Extension, http +from rolo.router import RuleAdapter, WithHost +from werkzeug.routing import Submount + +LOG = logging.getLogger(__name__) + + +class LocalstackOpenAIExtension(Extension): + name = "openai" + + submount = "/_extension/openai" + subdomain = "openai" + + def on_extension_load(self): + logging.getLogger("localstack_openai").setLevel( + logging.DEBUG if config.DEBUG else logging.INFO + ) + + def update_gateway_routes(self, router: http.Router[http.RouteHandler]): + from localstack_openai.mock_openai import Api + + api = RuleAdapter(Api()) + + # add path routes for localhost:4566/v1/chat/completion + router.add( + [ + Submount(self.submount, [api]), + WithHost(f"{self.subdomain}.{config.LOCALSTACK_HOST.host}<__host__>", [api]), + ] + ) + + LOG.info( + "OpenAI mock available at %s%s", str(config.LOCALSTACK_HOST).rstrip("/"), self.submount + ) + LOG.info("OpenAI mock available at %s", f"{self.subdomain}.{config.LOCALSTACK_HOST}") diff --git a/openai/localstack_openai/mock_openai.py b/openai/localstack_openai/mock_openai.py new file mode 100644 index 0000000..8b2cbc3 --- /dev/null +++ b/openai/localstack_openai/mock_openai.py @@ -0,0 +1,239 @@ +import json +import time + +from faker import Faker +from rolo import Request, Response, route + +faker = Faker() + +res_len = 20 + + +class ChunkReader: + def __init__(self, chunks, delay): + self.ID = "" + self.Created = 0 + self.Chunks = chunks + self.SentFinished = False + self.SentDone = False + self.Delay = delay + + +def new_chunk_reader(cs, d): + return ChunkReader(cs, d) + + +def done(r): + return r.SentFinished and r.SentDone + + +def next_chunk(r): + if r.SentDone: + return None, None + + if r.SentFinished: + b = b"data: [DONE]\n\n" + r.SentDone = True + return b, None + + if len(r.Chunks) == 0: + d = { + "id": r.ID, + "object": "chat.completion.chunk", + "created": r.Created, + "model": "gpt-3.5-turbo", + "choices": [ + { + "index": 0, + "delta": {}, + "finish_reason": "stop", + } + ], + } + + b = json.dumps(d).encode() + r.SentFinished = True + b = b"data: " + b + b"\n\n" + return b, None + + c = r.Chunks[0] + " " + d = { + "id": r.ID, + "object": "chat.completion.chunk", + "created": r.Created, + "model": "gpt-3.5-turbo", + "choices": [ + { + "index": 0, + "delta": { + "content": c, + }, + "finish_reason": None, + } + ], + } + b = json.dumps(d).encode() + r.Chunks = r.Chunks[1:] + b = b"data: " + b + b"\n\n" + return b, None + + +def read(r, p): + if done(r): + return 0, None + + if r.SentFinished: + b = b"data: [DONE]\n\n" + n = min(len(b), len(p)) + p[:n] = b[:n] + r.SentDone = True + return n, None + + if len(r.Chunks) == 0: + d = { + "id": r.ID, + "object": "chat.completion.chunk", + "created": r.Created, + "model": "gpt-3.5-turbo", + "choices": [ + { + "index": 0, + "delta": {}, + "finish_reason": "stop", + } + ], + } + b = json.dumps(d).encode() + b = b"data: " + b + b"\n\n" + n = min(len(b), len(p)) + p[:n] = b[:n] + r.SentFinished = True + return n, None + + c = r.Chunks[0] + " " + d = { + "id": r.ID, + "object": "chat.completion.chunk", + "created": r.Created, + "model": "gpt-3.5-turbo", + "choices": [ + { + "index": 0, + "delta": { + "content": c, + }, + "finish_reason": None, + } + ], + } + b = json.dumps(d).encode() + b = b"data: " + b + b"\n\n" + n = min(len(b), len(p)) + p[:n] = b[:n] + r.Chunks = r.Chunks[1:] + time.sleep(r.Delay) + return n, None + + +class Api: + + @route("/v1/chat/completions", methods=["POST"]) + def chat_completions(self, request: Request): + data = request.get_data() + req = json.loads(data) + + ws = [faker.word() for _ in range(res_len)] + ws = [" " + w if i > 0 else w for i, w in enumerate(ws)] + + if not req.get("stream"): + m = "".join(ws) + return { + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": m, + }, + } + ] + } + + id = faker.uuid4() + ct = int(time.time()) + sd = 0.5 + + def generate(): + for w in ws: + b, _ = next_chunk(chunk_reader) + if b is not None: + yield b + time.sleep(sd) + + b, _ = next_chunk(chunk_reader) + if b is not None: + yield b + + yield b"[done]\n" + + chunk_reader = new_chunk_reader(ws, sd) + return Response(generate(), content_type="text/event-stream") + + @route("/v1/audio/transcriptions", methods=["POST"]) + def transcribe(self, request: Request): + return { + "text": faker.sentence(), + } + + @route("/v1/audio/translations", methods=["POST"]) + def translate(self, request: Request): + return { + "text": faker.sentence(), + } + + @route("/v1/images/generations", methods=["POST"]) + def generate_image(self, request: Request): + return {"created": int(time.time()), "data": [{"url": faker.image_url()}]} + + @route("/v1/engines", methods=["GET"]) + def list_engines(self, request: Request): + return { + "object": "list", + "data": [ + { + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai", + }, + ], + } + + +def run(port=1323): + from rolo import Router + from rolo.dispatcher import handler_dispatcher + from werkzeug import Request, run_simple + + r = Router(dispatcher=handler_dispatcher()) + r.add(Api()) + + app = Request.application(r.dispatch) + + run_simple("0.0.0.0", port, app) + + +if __name__ == "__main__": + run() diff --git a/openai/pyproject.toml b/openai/pyproject.toml new file mode 100644 index 0000000..afc78f6 --- /dev/null +++ b/openai/pyproject.toml @@ -0,0 +1,12 @@ +[tool.black] +line_length = 100 +include = '(localstack_openai|tests)/.*\.py$' + +[tool.isort] +profile = 'black' +line_length = 100 + +[tool.flake8] +max-line-length = 100 +ignore = 'E501' +exclude = './setup.py,.venv*,dist,build' \ No newline at end of file diff --git a/openai/setup.cfg b/openai/setup.cfg new file mode 100644 index 0000000..643dc48 --- /dev/null +++ b/openai/setup.cfg @@ -0,0 +1,50 @@ +[metadata] +name = localstack-extension-openai +version = attr: localstack_openai.__version__ +url = https://github.com/localstack/localstack-extensions/tree/main/openai +author = Cristopher Pinzon +author_email = cristopher.pinzon@localstack.cloud +summary = LocalStack Extension: OpenAI +description = OpenAI extension for LocalStack +long_description = file: README.md +long_description_content_type = text/markdown; charset=UTF-8 +license = Apache License 2.0 +classifiers = + Development Status :: 5 - Production/Stable + License :: OSI Approved :: Apache Software License + Operating System :: OS Independent + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Topic :: Software Development :: Libraries + Topic :: Utilities + +[options] +zip_safe = False +packages = find: +install_requires = + faker>=8.12.1 + localstack>=3.1 + plux>=1.3 + rolo>=0.3 +test_requires = + openai>=0.10.2,<1.0 + pytest>=6.2.4 + +[options.extras_require] +dev = + openai>=0.10.2,<1.0 + pytest>=6.2.4 + black==22.3.0 + isort==5.10.1 + +[options.packages.find] +exclude = + tests* + +[options.package_data] +* = *.md + +[options.entry_points] +localstack.extensions = + localstack_openai = localstack_openai.extension:LocalstackOpenAIExtension \ No newline at end of file diff --git a/openai/setup.py b/openai/setup.py new file mode 100644 index 0000000..c63b511 --- /dev/null +++ b/openai/setup.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +from setuptools import setup + +entry_points = { + "localstack.extensions": [ + "localstack_openai=localstack_openai.extension:LocalstackOpenAIExtension" + ], +} + +setup(entry_points=entry_points) \ No newline at end of file diff --git a/openai/tests/sample.wav b/openai/tests/sample.wav new file mode 100644 index 0000000..794b832 Binary files /dev/null and b/openai/tests/sample.wav differ diff --git a/openai/tests/test_api.py b/openai/tests/test_api.py new file mode 100644 index 0000000..8dbe8cd --- /dev/null +++ b/openai/tests/test_api.py @@ -0,0 +1,37 @@ +import openai + +openai.organization = "org-test" +openai.api_key = "test" +# openai.api_base = "http://localhost:1323/v1" +openai.api_base = "http://localhost:4566/_extension/openai/v1" + + +def test_list_models(): + models = openai.Engine.list() + assert len(models.data) > 0 + + +def test_chat_completion(): + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"}, + ], + ) + assert len(completion.choices) > 0 + + +def test_transcribe(): + transcript = openai.Audio.transcribe("whisper-1", open("sample.wav", "rb")) + assert len(transcript.text) > 0 + + +def test_translate(): + translate = openai.Audio.translate("whisper-1", open("sample.wav", "rb")) + assert len(translate.text) > 0 + + +def test_generate_image(): + response = openai.Image.create(prompt="a white siamese cat", n=1, size="1024x1024") + assert response["data"][0]["url"]