Skip to content

Commit

Permalink
Merge branch 'master' of github.com:neph1/LlamaTale
Browse files Browse the repository at this point in the history
  • Loading branch information
neph1 committed Aug 9, 2023
2 parents ec638f8 + 878c063 commit 1e1fea7
Show file tree
Hide file tree
Showing 11 changed files with 48 additions and 45 deletions.
10 changes: 8 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,17 @@ By default it uses KoboldCpp, but if you're feeling adventurous you can change l
2. Get and install KoboldCpp: https://github.com/LostRuins/koboldcpp/releases
3. Download a suitable llm model for koboldcpp. I recommend https://huggingface.co/TheBloke/Nous-Hermes-Llama2-GGML or https://huggingface.co/TheBloke/chronos-hermes-13B-GGML. Recently I've tested with llama-2-7b-chat-codeCherryPop.ggmlv3.q5_K_M, which seems to be working really well, too.
4. Make sure KoboldCpp works.
1. Download repo, either with 'git clone' or as a zip.
5. Download repo, either with 'git clone' or as a zip.
6. Run 'pip install -r requirements.txt'
7. Start KoboldCpp (port 5001 by default)
8. Start with ``python -m stories.prancingllama.story``
9. If you'd rather play in a browser, add the '--web' flag and connect to http://localhost:8180/tale/story

Optional:
1. If you'd rather play in a browser, add the '--web' flag and connect to http://localhost:8180/tale/story
2. If you have a v2 character card and want to skip character creation, add '--character path_to_character'
3. If you want to load a v2 character as a follower, type 'load_character path_to_character_relative_to_story_folder' in the game prompt



In the game:
You can probably read up on this, or use 'help' in the prompt, but you can move around using directions, like n, w, s, e, or type the location you want to go to.
Expand Down
3 changes: 2 additions & 1 deletion stories/prancingllama/npcs/npcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from tale.llm_ext import LivingNpc
from tale.player import Player
from tale.util import call_periodically, Context
from tale import lang

class InnKeeper(LivingNpc):

Expand Down Expand Up @@ -51,7 +52,7 @@ def do_random_move(self, ctx: Context) -> None:

@call_periodically(30, 60)
def do_pick_up_dishes(self, ctx: Context) -> None:
self.location.tell("%s wipes a table and picks up dishes." % capital(self.title), evoke=False)
self.location.tell(f"{lang.capital(self.title)} wipes a table and picks up dishes.", evoke=False, max_length=True)

class Patron(LivingNpc):

Expand Down
1 change: 1 addition & 0 deletions stories/prancingllama/story.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class Story(StoryBase):
config.startlocation_player = "prancingllama.entrance"
config.startlocation_wizard = "prancingllama.entrance"
config.zones = ["prancingllama"]
config.context = "The Prancing Llama is the final outpost high up in a cold, craggy mountain range. It's frequented by adventurers and those seeking to avoid attention."

def init(self, driver: Driver) -> None:
"""Called by the game driver when it is done with its initial initialization."""
Expand Down
9 changes: 4 additions & 5 deletions tale/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,6 @@ def tell(self, room_msg: str, exclude_living: 'Living'=None, specific_targets: S
that based on this message string. That will make it quite hard because you need to
parse the string again to figure out what happened... Use handle_verb / notify_action instead.
"""
#alt_prompt ="### Instruction: Location: [" + str(self.look(short=True)) + "]. Rewrite the following text in your own words using vivid language, use 'location' for context. Text:\n\n [{input_text}] \n\nEnd of text.\n\n### Response:\n"

targets = specific_targets or set()
assert isinstance(targets, (frozenset, set, list, tuple))
Expand Down Expand Up @@ -700,7 +699,7 @@ def nearby(self, no_traps: bool=True) -> Iterable['Location']:

def look(self, exclude_living: 'Living'=None, short: bool=False) -> Sequence[str]:
"""returns a list of paragraph strings describing the surroundings, possibly excluding one living from the description list"""
paragraphs = ["<location>[" + self.name + "]</>"]
paragraphs = ["<location>[" + self.title + "]</>"]
if short:
if self.exits and mud_context.config.show_exits_in_look:
paragraphs.append("Exits: " + ", ".join(sorted(set(self.exits.keys()))))
Expand Down Expand Up @@ -1273,9 +1272,9 @@ def display_direction(directions: Sequence[str]) -> str:
if not silent:
direction_txt = display_direction(direction_names or [])
if direction_txt:
message = "%s leaves %s." % (lang.capital(self.title), direction_txt)
message = f"{lang.capital(self.title)} leaves {direction_txt}."
else:
message = "%s leaves." % lang.capital(self.title)
message = f"{lang.capital(self.title)} leaves."
original_location.tell(message, exclude_living=self, evoke=False, max_length=True)
# queue event
if is_player:
Expand Down Expand Up @@ -1343,7 +1342,7 @@ def start_attack(self, victim: 'Living') -> None:
attacker_msg = "You attack %s! %s" % (victim.title, result)
victim.tell(victim_msg, evoke=True, max_length=False)
# TODO: try to get from config file instead
combat_prompt = f'### Instruction: Rewrite the following combat between user {name} and {victim.title} and result into a vivid description in less than 300 words. Location: {self.location}, {self.location.short_description}. Write one to two paragraphs, ending in either death, or a stalemate. Combat Result: {attacker_msg} ### Response:\n\n'
combat_prompt = mud_context.driver.llm_util.combat_prompt
victim.location.tell(room_msg, exclude_living=victim, specific_targets={self}, specific_target_msg=attacker_msg, evoke=True, max_length=False, alt_prompt=combat_prompt)
if dead:
remains = Container(f"remains of {dead.title}")
Expand Down
1 change: 0 additions & 1 deletion tale/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ class Commands:
def __init__(self) -> None:
self.commands_per_priv = {"": {}} # type: Dict[str, Dict[str, Callable]]
self.no_soul_parsing = set() # type: Set[str]
self.llm_util = LlmUtil()

def add(self, verb: str, func: Callable, privilege: str="") -> None:
self.validatefunc(func)
Expand Down
5 changes: 2 additions & 3 deletions tale/driver_if.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,8 @@ def _login_dialog_if(self, conn: PlayerConnection) -> Generator:
player.tell("\n")
prompt = self.story.welcome(player)

self.llm_util.story_background = self.resources["messages/welcome.txt"].text
player.llm_util = self.llm_util
player._llm_util = self.llm_util
self.llm_util.story_background = self.story.config.context

if prompt:
conn.input_direct("\n" + prompt) # blocks (note: cannot use yield here)
player.tell("\n")
Expand Down
11 changes: 6 additions & 5 deletions tale/llm_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@ STREAM_ENDPOINT: "/api/extra/generate/stream"
DATA_ENDPOINT: "/api/extra/generate/check"
WORD_LIMIT: 500
DEFAULT_BODY: '{"stop_sequence": "", "max_length":500, "max_context_length":4096, "temperature":1.0, "top_k":120, "top_a":0.0, "top_p":0.85, "typical_p":1.0, "tfs":1.0, "rep_pen":1.2, "rep_pen_range":256, "mirostat":2, "mirostat_tau":5.0, "mirostat_eta":0.1, "sampler_order":[6,0,1,3,4,2,5], "seed":-1}'
ANALYSIS_BODY: '{"banned_tokens":"\n\n", "stop_sequence": "", "max_length":500, "max_context_length":4096, "temperature":0.15, "top_k":120, "top_a":0.0, "top_p":0.85, "typical_p":1.0, "tfs":1.0, "rep_pen":1.2, "rep_pen_range":256, "mirostat":2, "mirostat_tau":5.0, "mirostat_eta":0.1, "sampler_order":[6,0,1,3,4,2,5], "seed":-1}'
ANALYSIS_BODY: '{"banned_tokens":"\n\n", "stop_sequence": "\n\n\n", "max_length":500, "max_context_length":4096, "temperature":0.15, "top_k":120, "top_a":0.0, "top_p":0.85, "typical_p":1.0, "tfs":1.0, "rep_pen":1.2, "rep_pen_range":256, "mirostat":2, "mirostat_tau":5.0, "mirostat_eta":0.1, "sampler_order":[6,0,1,3,4,2,5], "seed":-1}'
MEMORY_SIZE: 512
PRE_PROMPT: 'Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n'
BASE_PROMPT: "History: [{history}]. ### Instruction: Rewrite the following text in your own words using vivid language. 'History' can be used to create a context for what you write. Text:\n\n [{input_text}] \n\nEnd of text.\n\n### Response:\n"
DIALOGUE_PROMPT: 'The following is a conversation between {character1} and {character2}. {character2_description}. Chat history: {previous_conversation}\n\n {character2}s sentiment towards {character1}: {sentiment}. ### Instruction: Write a single response as {character2}, using {character2} description.\n\n### Response:\n'
ITEM_PROMPT: '### Instruction: Items:[{items}];Characters:[{character1},{character2}] Text:[{text}] \n\nIn the supplied text, was an item explicitly given, taken, dropped or put somewhere? Insert your thoughts about it in [my thoughts], and the results in "item", "from" and "to". Insert {character1}s sentiment towards {character2} in a single word in [sentiment assessment]. Write your response in JSON format. Example: {{ "thoughts":"[my thoughts]", "result": {{ "item":"", "from":"", "to":""}}, {{"sentiment":"[sentiment assessment]"}} }} End of example. \n\n Make sure the response is valid JSON\n\n### Response:\n'
BASE_PROMPT: "[Story context: {story_context}]; [History: {history}]; ### Instruction: Rewrite the following Text in your own words using the supplied Context and History to create a background for your text. Use about {max_words} words. Text:\n\n [{input_text}] \n\nEnd of text.\n\n### Response:\n"
ACTION_PROMPT: "[Story context: {story_context}]; ; [History: {history}]; The following Action is part of a roleplaying game. ### Instruction: Rewrite the Action, and nothing else, in your own words using the supplied Context, Location and History to create a background for your text. Use less than {max_words} words. Text:\n\n [{input_text}] \n\nEnd of text.\n\n### Response:\n"
DIALOGUE_PROMPT: '[Story context: {story_context}]; [Location: {location}] The following is a conversation between {character1} and {character2}. {character2_description}. [Chat history: {previous_conversation}]\n\n [{character2}s sentiment towards {character1}: {sentiment}]. ### Instruction: Write a single response for {character2} in third person pov, using {character2} description.\n\n### Response:\n'
ITEM_PROMPT: 'Items:[{items}];Characters:[{character1},{character2}] \n\n ### Instruction: Decide if there was an item explicitly given, taken, dropped or put somewhere in the following text:[Text:{text}]. Insert your thoughts about it in [my thoughts], and the results in "item", "from" and "to", or make them empty if no items was given, taken, put somewhere or dropped. Insert {character1}s sentiment towards {character2} in a single word in [sentiment assessment]. Write your response in JSON format. Example: {{ "thoughts":"[my thoughts]", "result": {{ "item":"", "from":"", "to":""}}, {{"sentiment":"[sentiment assessment]"}} }} End of example. \n\n Make sure the response is valid JSON\n\n### Response:\n'

COMBAT_PROMPT: '### Instruction: Rewrite the following combat result into a vivid description. Write one to two paragraphs, ending in either death, or a stalemate. Combat Result: {result}\n\n### Response:\n'
COMBAT_PROMPT: '### Instruction: Rewrite the following combat between user {name} and {victim.title} and result into a vivid description in less than 300 words. Location: {self.location}, {self.location.short_description}. Write one to two paragraphs, ending in either death, or a stalemate. Combat Result: {attacker_msg} ### Response:\n\n'


8 changes: 4 additions & 4 deletions tale/llm_ext.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from tale.llm_utils import LlmUtil
from tale import mud_context
from tale.base import Living, ParseResult
from tale.errors import TaleError
from tale.player import Player
Expand All @@ -12,7 +12,6 @@ def __init__(self, name: str, gender: str, *,
self.age = age
self.personality = personality
self.occupation = occupation
self.llm_util = LlmUtil()
self.conversation = ''
self.memory_size = 1024
self.sentiments = {}
Expand Down Expand Up @@ -41,11 +40,12 @@ def notify_action(self, parsed: ParseResult, actor: Living) -> None:
self.update_conversation(f"{self.title} says: \"Hi.\"")
elif parsed.verb == "say" and targeted:
self.update_conversation(f'{actor.title}:{parsed.unparsed}\n')
response, item_result, sentiment = self.llm_util.generate_dialogue(conversation=self.conversation,
response, item_result, sentiment = mud_context.driver.llm_util.generate_dialogue(conversation=self.conversation,
character_card = self.character_card,
character_name = self.title,
target = actor.title,
sentiment = self.sentiments.get(actor.title, ''))
sentiment = self.sentiments.get(actor.title, ''),
location_description=self.location.look(exclude_living=self))

self.update_conversation(f"{self.title} says: \"{response}\"")
if len(self.conversation) > self.memory_size:
Expand Down
25 changes: 14 additions & 11 deletions tale/llm_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import json
import os
import requests
import yaml
from json import JSONDecodeError
from tale.llm_io import IoUtil
Expand All @@ -26,28 +25,30 @@ def __init__(self):
self.pre_prompt = config_file['PRE_PROMPT']
self.base_prompt = config_file['BASE_PROMPT']
self.dialogue_prompt = config_file['DIALOGUE_PROMPT']
self.action_prompt = config_file['ACTION_PROMPT']
self.item_prompt = config_file['ITEM_PROMPT']
self.word_limit = config_file['WORD_LIMIT']
self._story_background = ''
self.io_util = IoUtil()
self.stream = config_file['STREAM']
self.connection = None

def evoke(self, player_io: TextBuffer, message: str, max_length : bool=False, rolling_prompt='', alt_prompt=''):
def evoke(self, player_io: TextBuffer, message: str, max_length : bool=False, rolling_prompt='', alt_prompt='', skip_history=True):
if len(message) > 0 and str(message) != "\n":
if not rolling_prompt:
rolling_prompt += self._story_background
trimmed_message = parse_utils.remove_special_chars(str(message))
base_prompt = alt_prompt if alt_prompt else self.base_prompt
amount = int(len(trimmed_message) * 2.5)
prompt = base_prompt.format(history=rolling_prompt if not alt_prompt else '', input_text=str(trimmed_message))
amount = int(len(trimmed_message) * 1.5)
prompt = base_prompt.format(
story_context=self._story_background,
history=rolling_prompt if not skip_history or alt_prompt else '',
max_words=self.word_limit if not max_length else amount,
input_text=str(trimmed_message))

rolling_prompt = self.update_memory(rolling_prompt, trimmed_message)

request_body = self.default_body
request_body['prompt'] = prompt
if max_length:
request_body['max_length'] = amount


if not self.stream:
text = self.io_util.synchronous_request(self.url + self.endpoint, request_body)
rolling_prompt = self.update_memory(rolling_prompt, text)
Expand All @@ -59,15 +60,17 @@ def evoke(self, player_io: TextBuffer, message: str, max_length : bool=False, ro
return '\n', rolling_prompt
return str(message), rolling_prompt

def generate_dialogue(self, conversation: str, character_card: str, character_name: str, target: str, sentiment = ''):
def generate_dialogue(self, conversation: str, character_card: str, character_name: str, target: str, sentiment = '', location_description = ''):
prompt = self.pre_prompt
prompt += self.dialogue_prompt.format(
story_context=self._story_background,
location=location_description,
previous_conversation=conversation,
character2_description=character_card,
character2=character_name,
character1=target,
sentiment=sentiment)

print('story context', self._story_background)
request_body = self.default_body
request_body['prompt'] = prompt
text = parse_utils.trim_response(self.io_util.synchronous_request(self.url + self.endpoint, request_body))
Expand Down
17 changes: 5 additions & 12 deletions tale/player.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def init_nonserializables(self) -> None:
self.input_is_available = Event()
self.transcript = None # type: Optional[IO[str]]
self._output = TextBuffer()
self._llm_util = None

def init_names(self, name: str, title: str, descr: str, short_descr: str) -> None:
title = lang.capital(title or name) # make sure the title of a player remains capitalized
Expand All @@ -79,7 +78,11 @@ def tell(self, message: str, *, end: bool=False, format: bool=True, evoke: bool=
if evoke:
if self.title in message:
message = message.replace(self.title, 'you')
msg, rolling_prompt = self._llm_util.evoke(self._output, message, max_length = max_length, rolling_prompt = self.rolling_prompt, alt_prompt = alt_prompt)
msg, rolling_prompt = mud_context.driver.llm_util.evoke(self._output,
message,
max_length = max_length,
rolling_prompt = self.rolling_prompt,
alt_prompt = alt_prompt,)
self.rolling_prompt = rolling_prompt
else:
msg = str(message)
Expand Down Expand Up @@ -275,9 +278,6 @@ def test_get_output_paragraphs(self) -> Sequence[Sequence[str]]:
return [strip_text_styles(paragraph_text) for paragraph_text, formatted in paragraphs]





class PlayerConnection:
"""
Represents a player and the i/o connection that is used for him/her.
Expand Down Expand Up @@ -397,10 +397,3 @@ def destroy(self) -> None:
self.player.destroy(ctx)
self.player = None # type: ignore

@property
def llm_util(self) -> LlmUtil:
return self._llm_util

@llm_util.setter
def llm_util(self, value: 'LlmUtil') -> None:
self._llm_util = value
3 changes: 2 additions & 1 deletion tale/story.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ def __init__(self) -> None:
self.server_mode = GameMode.IF # the actual game mode the server is operating in (will be set at startup time)
self.items = "" # items to populate the world with. only used by json loading
self.npcs = "" # npcs to populate the world with. only used by json loading

self.context = "" # context to giving background for the story.

def __eq__(self, other: Any) -> bool:
return isinstance(other, StoryConfig) and vars(self) == vars(other)

Expand Down

0 comments on commit 1e1fea7

Please sign in to comment.