Skip to content

Commit 2486c0b

Browse files
committed
feat: add changetone provider
Signed-off-by: Jana Peper <[email protected]>
1 parent 7a94d7d commit 2486c0b

File tree

3 files changed

+52
-1
lines changed

3 files changed

+52
-1
lines changed

lib/change_tone.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors
2+
# SPDX-License-Identifier: AGPL-3.0-or-later
3+
"""A chain that changes the tone of a text
4+
"""
5+
6+
from typing import Any
7+
8+
from langchain.prompts import PromptTemplate
9+
from langchain.schema.prompt_template import BasePromptTemplate
10+
from langchain_core.runnables import Runnable
11+
12+
class ChangeToneProcessor():
13+
14+
runnable: Runnable
15+
16+
"""
17+
A topics chain
18+
"""
19+
system_prompt: str = "You're an AI assistant tasked with finding the topic keywords of the text given to you by the user."
20+
user_prompt: BasePromptTemplate = PromptTemplate(
21+
input_variables=["text", "tone"],
22+
template="""
23+
Reformulate the following text in a " {tone} " tone in its original language. Output only the reformulation. Here is the text:
24+
25+
"
26+
{text}
27+
"
28+
29+
Output only the reformulated text, nothing else, no introductory sentence. Use the same language as the original text.
30+
"""
31+
)
32+
33+
def __init__(self, runnable: Runnable):
34+
self.runnable = runnable
35+
36+
37+
def __call__(self, inputs: dict[str,Any],
38+
) -> dict[str, Any]:
39+
output = self.runnable.invoke({"user_prompt": self.user_prompt.format_prompt(text=inputs['input'], tone=inputs['tone']), "system_prompt": self.system_prompt})
40+
return {'output': output}

lib/main.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from fastapi import FastAPI
1616
from nc_py_api import AsyncNextcloudApp, NextcloudApp, NextcloudException
1717
from nc_py_api.ex_app import LogLvl, persistent_storage, run_app, set_handlers
18-
from nc_py_api.ex_app.providers.task_processing import TaskProcessingProvider
18+
from nc_py_api.ex_app.providers.task_processing import TaskProcessingProvider, ShapeEnumValue
1919

2020
models_to_fetch = {
2121
"https://huggingface.co/bartowski/Meta-Llama-3.1-8B-Instruct-GGUF/resolve/4f0c246f125fc7594238ebe7beb1435a8335f519/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf": { "save_path": os.path.join(persistent_storage(), "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf") },
@@ -123,6 +123,15 @@ async def enabled_handler(enabled: bool, nc: AsyncNextcloudApp) -> str:
123123
name="Local Large language Model: " + model,
124124
task_type=task,
125125
expected_runtime=30,
126+
input_shape_enum_values= {
127+
"tone": [
128+
ShapeEnumValue(name= "Friendlier", value= "friendlier"),
129+
ShapeEnumValue(name= "More formal", value= "more formal"),
130+
ShapeEnumValue(name= "Funnier", value= "funnier"),
131+
ShapeEnumValue(name= "More casual", value= "more casual"),
132+
ShapeEnumValue(name= "More urgent", value= "more urgent"),
133+
],
134+
} if task == "core:text2text:changetone" else {}
126135
)
127136
await nc.providers.task_processing.register(provider)
128137
print(f"Registered {task_processor_name}", flush=True)

lib/task_processors.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from free_prompt import FreePromptProcessor
1818
from headline import HeadlineProcessor
1919
from proofread import ProofreadProcessor
20+
from change_tone import ChangeToneProcessor
2021
from chatwithtools import ChatWithToolsProcessor
2122
from topics import TopicsProcessor
2223
from summarize import SummarizeProcessor
@@ -134,6 +135,7 @@ def generate_task_processors_for_model(file_name, task_processors):
134135
task_processors[model_name + ":core:text2text"] = lambda: FreePromptProcessor(generate_llm_chain(file_name))
135136
task_processors[model_name + ":core:text2text:chat"] = lambda: ChatProcessor(generate_chat_chain(file_name))
136137
task_processors[model_name + ":core:text2text:proofread"] = lambda: ProofreadProcessor(generate_llm_chain(file_name))
138+
task_processors[model_name + ":core:text2text:changetone"] = lambda: ChangeToneProcessor(generate_llm_chain(file_name))
137139
task_processors[model_name + ":core:text2text:chatwithtools"] = lambda: ChatWithToolsProcessor(generate_chat_chain(file_name))
138140

139141
# chains[model_name + ":core:contextwrite"] = lambda: ContextWriteChain(llm_chain=llm_chain())

0 commit comments

Comments
 (0)