Skip to content

Commit

Permalink
Merge pull request #23 from Forward-Operators/feat/web
Browse files Browse the repository at this point in the history
web ui
  • Loading branch information
meal authored Jul 28, 2023
2 parents 27888ca + be1a363 commit 3dcceb9
Show file tree
Hide file tree
Showing 207 changed files with 54,046 additions and 850 deletions.
12 changes: 11 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Everyone is welcome to contribute!

## Features

- Command-line execution of prompts
- Command-line execution of prompts (now with web UI!)
- Quick iteration on prompt design and paramter refinement with `watch` command
- YAML configuration ties prompts to models and their configurations
- Write prompt-scripts with #!/usr/bin/prr shebang and execute them directly
Expand Down Expand Up @@ -108,6 +108,16 @@ ELEVEN_LABS_API_KEY="9db0...."
DEFAULT_SERVICE="openai/chat/gpt-3.5-turbo"
```

### Running web user interface

Simply run prr with 'ui' command and your prompt path (if it doesn't exist, it will be created), like so:

```sh
$ prr ui ~/Desktop/my-prompt
```

Web browser will be launched with the UI connected to your command that you will use to launch the runs.

#### For Google PaLM, you need to install the following dependencies:
You need to install [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) and you need to have access to a Vertex AI with Generative AI enabled.
`prr` assumes you're logged in into your Google Cloud account and have access to the project you want to use.
Expand Down
28 changes: 14 additions & 14 deletions examples/configured/chihuahua.yaml
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
version: 1
prompt:
# more advanced prompt definition.
# you can use either one of the two options
# - content_file
# - messages
#
# using content_file will make prr read the content
# of that template and render it into simple text to use.
# content_file: '_long_prompt_about_chihuahua'
#
# using 'messages' key instead give you finer control
# over what messages are sent with what roles.
# this mimics https://platform.openai.com/docs/guides/chat
# structures currently
messages:
# more advanced prompt definition.
# you can use either one of the two options
# - content_file
# - messages
#
# using content_file will make prr read the content
# of that template and render it into simple text to use.
# content_file: '_long_prompt_about_chihuahua'
#
# using 'messages' key instead give you finer control
# over what messages are sent with what roles.
# this mimics https://platform.openai.com/docs/guides/chat
# structures currently
messages:
- role: 'system'
content: 'You, Henry, are a little Chihuahua dog. That is all you need to know.'
- role: 'assistant'
Expand Down
1,201 changes: 574 additions & 627 deletions poetry.lock

Large diffs are not rendered by default.

20 changes: 18 additions & 2 deletions prr/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,19 @@

import argparse
import os
import sys

from prr.commands.run import RunPromptCommand
from prr.commands.ui import UIPromptCommand
from prr.commands.watch import WatchPromptCommand
from prr.prompt.model_options import ModelOptions
from prr.utils.config import load_config

config = load_config()


def check_if_prompt_exists(prompt_path):
return os.path.exists(prompt_path) or os.path.exists(prompt_path + ".yaml")


def main():
parser = argparse.ArgumentParser(
description="Run a prompt against configured models.",
Expand All @@ -30,6 +33,9 @@ def main():
script_parser = sub_parsers.add_parser(
"script", help="prompt script mode for use with #!/usr/bin/prr"
)
ui_parser = sub_parsers.add_parser(
"ui", help="launch a web UI to analyze saved runs"
)

def add_common_args(_parser):
_parser.add_argument(
Expand Down Expand Up @@ -89,22 +95,32 @@ def add_common_args(_parser):
action="store_true",
default=False,
)

_parser.add_argument("prompt_path", help="Path to prompt to run")

add_common_args(run_parser)
add_common_args(watch_parser)
add_common_args(script_parser)

ui_parser.add_argument("prompt_path", help="Path to prompt to analyze")

watch_parser.add_argument(
"--cooldown", "-c", type=int, help="How much to wait after a re-run", default=5
)

args, prompt_args = parser.parse_known_args()
parsed_args = vars(args)

if parsed_args["command"] == "ui":
if not check_if_prompt_exists(parsed_args["prompt_path"]):
raise Exception(f"Prompt file {parsed_args['prompt_path']} does not exist")
command = UIPromptCommand(parsed_args)
command.start()

if parsed_args["command"] == "script":
parsed_args["quiet"] = True
parsed_args["abbrev"] = False

command = RunPromptCommand(parsed_args, prompt_args)
command.run_prompt()

Expand Down
60 changes: 31 additions & 29 deletions prr/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,13 @@ def __init__(self, args, prompt_args=None):
else:
self.console = Console(log_time=False, log_path=False)

if self.args["log"]:
self.save_run = True
else:
self.save_run = False

self.load_prompt_for_path()
self.runner = Runner(self.prompt_config, self.prompt_args)
self.runner = Runner(self.prompt_config, self.save_run, self.prompt_args)

def print_prompt(self, request):
if self.args["abbrev"]:
Expand Down Expand Up @@ -88,26 +93,11 @@ def print_run_results(self, result, run_save_directory):

if run_save_directory:
self.console.log(f"💾 {run_save_directory}")
self.console.log("")

def run_prompt_on_service(self, service_name, save=False):
service_config = self.prompt_config.service_with_name(service_name)
service_config.process_option_overrides(self.args)
options = service_config.options

with self.console.status(
f":robot: [bold green]{service_name}[/bold green]"
) as status:
self.runner.prepare_service_run(service_name, self.args)

request = self.runner.current_run_request()

self.print_run_parameters(service_name, request)
self.print_prompt(request)

status.update(status="running model", spinner="dots8Bit")
result, run_save_directory = self.runner.run(service_name, save)

self.print_run_results(result, run_save_directory)
def on_request(self, service_name, request):
self.print_run_parameters(service_name, request)
self.print_prompt(request)

def load_prompt_for_path(self):
prompt_path = self.args["prompt_path"]
Expand All @@ -117,12 +107,12 @@ def load_prompt_for_path(self):
loader = PromptConfigLoader()
self.prompt_config = loader.load_from_path(prompt_path)

def run_prompt(self):
services_to_run = self.prompt_config.configured_services()
def services_to_run(self):
_services = self.prompt_config.configured_services()

if services_to_run == []:
if _services == []:
if self.args["service"]:
services_to_run = [self.args["service"]]
_services = [self.args["service"]]
self.console.log(
f":racing_car: Running service {self.args['service']}."
)
Expand All @@ -132,10 +122,22 @@ def run_prompt(self):
)
exit(-1)
else:
self.console.log(f":racing_car: Running services: {services_to_run}")
self.console.log(f":racing_car: Running services: {_services}")

for service_name in services_to_run:
if len(services_to_run) > 1:
self.console.log("")
return _services

self.run_prompt_on_service(service_name, self.args["log"])
def run_prompt(self):
services = self.services_to_run()

self.runner.run_services(
services,
self.args,
{
"on_request": lambda service_name, request: self.on_request(
service_name, request
),
"on_result": lambda service_name, result, run_save_directory: self.print_run_results(
result, run_save_directory
),
},
)
67 changes: 67 additions & 0 deletions prr/commands/ui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
#!/usr/bin/env python

import os
from io import StringIO

import uvicorn
from rich.console import Console

console = Console(log_time=False, log_path=False)

DEFAULT_TEMPLATE_PATH = os.path.join(
os.path.dirname(__file__), "..", "prompt_template.yaml"
)


class UIPromptCommand:
def __init__(self, args, prompt_args=None):
self.args = args
self.prompt_config = None
self.prompt_path = None

if self.args.get("quiet"):
self.console = Console(file=StringIO())
else:
self.console = Console(log_time=False, log_path=False)

def create_default_config(self, prompt_path):
if os.access(os.path.dirname(prompt_path), os.W_OK):
self.console.log(
f":magnifying_glass_tilted_left: {prompt_path} not found, creating it from template"
)

with open(prompt_path, "w") as dst:
with open(DEFAULT_TEMPLATE_PATH, "r") as src:
dst.write(src.read())

self.prompt_path = prompt_path
else:
raise Exception(f"Cannot create prompt file {prompt_path}")

def prepare_prompt_path(self):
prompt_path = os.path.abspath(self.args["prompt_path"])

if not prompt_path.endswith(".yaml"):
prompt_path = prompt_path + ".yaml"

if os.path.exists(prompt_path):
if os.access(prompt_path, os.R_OK):
self.console.log(
f":magnifying_glass_tilted_left: Reading prompt from {prompt_path}"
)

self.prompt_path = prompt_path
else:
raise Exception(f"Cannot access prompt file {prompt_path}")
else:
self.create_default_config(prompt_path)

def start(self):
self.prepare_prompt_path()

# a vital hack to pass the prompt path to the web ui
os.environ["__PRR_WEB_UI_PROMPT_PATH"] = self.prompt_path

uvicorn.run(
"prr.ui:app", host="localhost", port=8400, reload=False, access_log=False
)
11 changes: 0 additions & 11 deletions prr/prompt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,3 @@
import os

import jinja2
import yaml
from jinja2 import meta

from prr.prompt.prompt_config import PromptConfig
from prr.prompt.prompt_template import PromptTemplate
from prr.prompt.service_config import ServiceConfig


class Prompt:
def __init__(self, content, config=None, args=None):
self.content = content
Expand Down
3 changes: 3 additions & 0 deletions prr/prompt/model_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ def select(self, option_keys):
return ModelOptions(_options, False)

def update_options(self, options):
if options == None:
return

for key in options.keys():
if options[key] != None:
if key not in self.options_set:
Expand Down
9 changes: 3 additions & 6 deletions prr/prompt/prompt_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,10 @@ def option_for_service(self, service_name, option_name):
return self.options_for_service(service_name).value(option_name)

def file_dependencies(self):
_dependencies = []
for message in self.template.messages:
for dependency in message.file_dependencies:
if dependency != None and dependency not in _dependencies:
_dependencies.append(dependency)
if self.template:
return self.template.file_dependencies()

return _dependencies
return []

####################################################

Expand Down
1 change: 1 addition & 0 deletions prr/prompt/prompt_template.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os

import jinja2
from jinja2 import meta


class PromptMessage:
Expand Down
23 changes: 23 additions & 0 deletions prr/prompt_template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
version: 1
prompt:
messages:
- role: 'system'
content: >
You are sporting goods store assistant
and you answer customer queries with fun responses,
making up stock items and prices as you go, suggesting
irrelevant things.
- role: 'assistant'
content: 'How can I help you?'
name: 'Henry'
- role: 'user'
# content_file: '_user_prompt'
content: 'I am looking for a pair of running shoes.'
name: 'Jane'
services:
gpt35:
model: 'openai/chat/gpt-3.5-turbo'
options:
temperature: 0.9
options:
max_tokens: 128
Loading

0 comments on commit 3dcceb9

Please sign in to comment.