Skip to content

new extension openai extension #36

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jan 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions openai/LICENSE.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
Copyright (c) 2017+ LocalStack contributors
Copyright (c) 2016 Atlassian Pty Ltd

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
39 changes: 39 additions & 0 deletions openai/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
VENV_BIN = python3 -m venv
VENV_DIR ?= .venv
VENV_ACTIVATE = $(VENV_DIR)/bin/activate
VENV_RUN = . $(VENV_ACTIVATE)

venv: $(VENV_ACTIVATE)

$(VENV_ACTIVATE): setup.py setup.cfg
test -d .venv || $(VENV_BIN) .venv
$(VENV_RUN); pip install --upgrade pip setuptools plux wheel
$(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
$(VENV_RUN); pip install -e .
touch $(VENV_DIR)/bin/activate

clean:
rm -rf .venv/
rm -rf build/
rm -rf .eggs/
rm -rf *.egg-info/

lint: ## Run code linter to check code style
($(VENV_RUN); python -m pflake8 --show-source)

format: ## Run black and isort code formatter
$(VENV_RUN); python -m isort .; python -m black .

install: venv
$(VENV_RUN); python setup.py develop

dist: venv
$(VENV_RUN); python setup.py sdist bdist_wheel

publish: clean-dist venv dist
$(VENV_RUN); pip install --upgrade twine; twine upload dist/*

clean-dist: clean
rm -rf dist/

.PHONY: clean clean-dist dist install publish
61 changes: 61 additions & 0 deletions openai/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# LocalStack OpenAI Extension

![GitHub license](https://img.shields.io/badge/license-Apache%202.0-blue.svg)
![Python version](https://img.shields.io/badge/python-3.11%2B-blue)
[![Build Status](https://travis-ci.com/yourusername/localstack-openai-mock.svg?branch=master)](https://travis-ci.com/yourusername/localstack-openai-mock)

This is a LocalStack extension that allows you to mock the OpenAI API for testing and development purposes. It provides a convenient way to interact with a mock OpenAI service locally using LocalStack.

## Installation

You can install this extension directly using the LocalStack extension manager:

```bash
localstack extensions install localstack-extension-openai
```

## Using

Once installed, you can access the OpenAI Mock API through `localhost:4510/v1`.

### Example

```python

import openai
openai.organization = "org-test"
openai.api_key = "test"
openai.api_base = "http://localhost:4510/v1"

completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(completion.choices)
```

## Coverage
- [x] Chat completion
- [x] Engines Listing
- [x] Transcribe
- [x] Translate
- [x] Generate Image URL
- [ ] Generate Image Base64
- [ ] Embeddings
- [ ] Fine Tuning
- [ ] Files
- [ ] Moderations



## Authors
**Cristopher Pinzon** [email protected]


## Licensing
* The extension code is licensed under the Apache 2.0 License

### Thank you for using the LocalStack OpenAI Extension!
1 change: 1 addition & 0 deletions openai/localstack_openai/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__version__ = "0.1.0"
38 changes: 38 additions & 0 deletions openai/localstack_openai/extension.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import logging

from localstack import config
from localstack.extensions.api import Extension, http
from rolo.router import RuleAdapter, WithHost
from werkzeug.routing import Submount

LOG = logging.getLogger(__name__)


class LocalstackOpenAIExtension(Extension):
name = "openai"

submount = "/_extension/openai"
subdomain = "openai"

def on_extension_load(self):
logging.getLogger("localstack_openai").setLevel(
logging.DEBUG if config.DEBUG else logging.INFO
)

def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
from localstack_openai.mock_openai import Api

api = RuleAdapter(Api())

# add path routes for localhost:4566/v1/chat/completion
router.add(
[
Submount(self.submount, [api]),
WithHost(f"{self.subdomain}.{config.LOCALSTACK_HOST.host}<__host__>", [api]),
]
)

LOG.info(
"OpenAI mock available at %s%s", str(config.LOCALSTACK_HOST).rstrip("/"), self.submount
)
LOG.info("OpenAI mock available at %s", f"{self.subdomain}.{config.LOCALSTACK_HOST}")
239 changes: 239 additions & 0 deletions openai/localstack_openai/mock_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,239 @@
import json
import time

from faker import Faker
from rolo import Request, Response, route

faker = Faker()

res_len = 20


class ChunkReader:
def __init__(self, chunks, delay):
self.ID = ""
self.Created = 0
self.Chunks = chunks
self.SentFinished = False
self.SentDone = False
self.Delay = delay


def new_chunk_reader(cs, d):
return ChunkReader(cs, d)


def done(r):
return r.SentFinished and r.SentDone


def next_chunk(r):
if r.SentDone:
return None, None

if r.SentFinished:
b = b"data: [DONE]\n\n"
r.SentDone = True
return b, None

if len(r.Chunks) == 0:
d = {
"id": r.ID,
"object": "chat.completion.chunk",
"created": r.Created,
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop",
}
],
}

b = json.dumps(d).encode()
r.SentFinished = True
b = b"data: " + b + b"\n\n"
return b, None

c = r.Chunks[0] + " "
d = {
"id": r.ID,
"object": "chat.completion.chunk",
"created": r.Created,
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
"delta": {
"content": c,
},
"finish_reason": None,
}
],
}
b = json.dumps(d).encode()
r.Chunks = r.Chunks[1:]
b = b"data: " + b + b"\n\n"
return b, None


def read(r, p):
if done(r):
return 0, None

if r.SentFinished:
b = b"data: [DONE]\n\n"
n = min(len(b), len(p))
p[:n] = b[:n]
r.SentDone = True
return n, None

if len(r.Chunks) == 0:
d = {
"id": r.ID,
"object": "chat.completion.chunk",
"created": r.Created,
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop",
}
],
}
b = json.dumps(d).encode()
b = b"data: " + b + b"\n\n"
n = min(len(b), len(p))
p[:n] = b[:n]
r.SentFinished = True
return n, None

c = r.Chunks[0] + " "
d = {
"id": r.ID,
"object": "chat.completion.chunk",
"created": r.Created,
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
"delta": {
"content": c,
},
"finish_reason": None,
}
],
}
b = json.dumps(d).encode()
b = b"data: " + b + b"\n\n"
n = min(len(b), len(p))
p[:n] = b[:n]
r.Chunks = r.Chunks[1:]
time.sleep(r.Delay)
return n, None


class Api:

@route("/v1/chat/completions", methods=["POST"])
def chat_completions(self, request: Request):
data = request.get_data()
req = json.loads(data)

ws = [faker.word() for _ in range(res_len)]
ws = [" " + w if i > 0 else w for i, w in enumerate(ws)]

if not req.get("stream"):
m = "".join(ws)
return {
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": m,
},
}
]
}

id = faker.uuid4()
ct = int(time.time())
sd = 0.5

def generate():
for w in ws:
b, _ = next_chunk(chunk_reader)
if b is not None:
yield b
time.sleep(sd)

b, _ = next_chunk(chunk_reader)
if b is not None:
yield b

yield b"[done]\n"

chunk_reader = new_chunk_reader(ws, sd)
return Response(generate(), content_type="text/event-stream")

@route("/v1/audio/transcriptions", methods=["POST"])
def transcribe(self, request: Request):
return {
"text": faker.sentence(),
}

@route("/v1/audio/translations", methods=["POST"])
def translate(self, request: Request):
return {
"text": faker.sentence(),
}

@route("/v1/images/generations", methods=["POST"])
def generate_image(self, request: Request):
return {"created": int(time.time()), "data": [{"url": faker.image_url()}]}

@route("/v1/engines", methods=["GET"])
def list_engines(self, request: Request):
return {
"object": "list",
"data": [
{
"id": "model-id-0",
"object": "model",
"created": 1686935002,
"owned_by": "organization-owner",
},
{
"id": "model-id-1",
"object": "model",
"created": 1686935002,
"owned_by": "organization-owner",
},
{
"id": "model-id-2",
"object": "model",
"created": 1686935002,
"owned_by": "openai",
},
],
}


def run(port=1323):
from rolo import Router
from rolo.dispatcher import handler_dispatcher
from werkzeug import Request, run_simple

r = Router(dispatcher=handler_dispatcher())
r.add(Api())

app = Request.application(r.dispatch)

run_simple("0.0.0.0", port, app)


if __name__ == "__main__":
run()
Loading