@@ -36,13 +36,11 @@ async def handle_request(request):
36
36
model ("gpt-4" ),
37
37
]
38
38
}
39
- # print(data)
40
39
return web .json_response (data )
41
40
else :
42
- # Handle POST requests here
43
41
data = await request .json ()
44
42
preprompt = [
45
- "[Use proper language regarding on a context.]"
43
+ "[Use proper language regarding on a context.]" # No use for forcing a specific language now.
46
44
]
47
45
system = {
48
46
"role" :"system" ,
@@ -51,7 +49,6 @@ async def handle_request(request):
51
49
system ['content' ] += ' ' .join (preprompt )
52
50
oldmsg = [system ] + data ['messages' ][:- 1 ]
53
51
msg = data ['messages' ][- 1 ]['content' ]
54
- print (oldmsg )
55
52
response = web .StreamResponse ()
56
53
response .headers ['Content-Type' ] = 'application/json'
57
54
await response .prepare (request )
@@ -65,7 +62,6 @@ async def handle_request(request):
65
62
"choices" : [{ "delta" : {"role" : "assistant" }, "finish_reason" : None , "index" : 0 }]
66
63
}
67
64
await response .write (f"data: { json .dumps (chunk )} \n \n " .encode ())
68
- print (chunk )
69
65
async for res_text in Wrtn .chat_by_json (await Wrtn .make_chatbot (),
70
66
msg = msg ,
71
67
oldmsg = oldmsg ,
@@ -80,7 +76,6 @@ async def handle_request(request):
80
76
"choices" : [{ "delta" : {"content" : res_text }, "finish_reason" : None , "index" : 0 }]
81
77
}
82
78
await response .write (f"data: { json .dumps (chunk )} \n \n " .encode ())
83
- # print(chunk)
84
79
except Exception as ex :
85
80
print (f"Error: { str (ex )} \n Chunk: { temp_res_text } " )
86
81
finally :
@@ -97,4 +92,4 @@ async def handle_request(request):
97
92
98
93
app = web .Application ()
99
94
app .router .add_route ('*' , '/{path:.*}' , handle_request )
100
- web .run_app (app , host = '127.0.0.1' , port = 41323 )
95
+ web .run_app (app , host = '127.0.0.1' , port = 41323 )
0 commit comments