Skip to content

Commit add29eb

Browse files
committed
fine tune reasoning css
1 parent 163e59c commit add29eb

File tree

4 files changed

+30
-16
lines changed

4 files changed

+30
-16
lines changed

main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ def fn_area_visibility_2(a):
217217
input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"]
218218
output_combo = [cookies, chatbot, history, status]
219219
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
220-
220+
221221
# 提交按钮、重置按钮
222222
multiplex_submit_btn.click(
223223
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")

request_llms/bridge_all.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1072,7 +1072,7 @@ def decode(self, *args, **kwargs):
10721072
})
10731073
except:
10741074
logger.error(trimmed_format_exc())
1075-
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
1075+
# -=-=-=-=-=-=- 幻方-深度求索本地大模型 -=-=-=-=-=-=-
10761076
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
10771077
try:
10781078
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui

request_llms/oai_std_model_template.py

+16-14
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,10 @@
11
import json
22
import time
33
import traceback
4-
54
import requests
6-
from loguru import logger
75

8-
# config_private.py放自己的秘密如API和代理网址
9-
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
10-
from toolbox import get_conf, is_the_upload_folder, update_ui
6+
from loguru import logger
7+
from toolbox import get_conf, is_the_upload_folder, update_ui, update_ui_lastest_msg
118

129
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf(
1310
"proxies", "TIMEOUT_SECONDS", "MAX_RETRY"
@@ -76,7 +73,7 @@ def decode_chunk(chunk):
7673
finish_reason = chunk["choices"][0]["finish_reason"]
7774
except:
7875
pass
79-
return response, reasoning_content, finish_reason
76+
return response, reasoning_content, finish_reason, str(chunk)
8077

8178

8279
def generate_message(input, model, key, history, max_output_token, system_prompt, temperature):
@@ -162,7 +159,7 @@ def predict_no_ui_long_connection(
162159
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
163160
"""
164161
from .bridge_all import model_info
165-
watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人(咬的也不是人
162+
watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人 (咬的也不是人)
166163
if len(APIKEY) == 0:
167164
raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}")
168165
if inputs == "":
@@ -215,7 +212,7 @@ def predict_no_ui_long_connection(
215212
break
216213
except requests.exceptions.ConnectionError:
217214
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
218-
response_text, reasoning_content, finish_reason = decode_chunk(chunk)
215+
response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk)
219216
# 返回的数据流第一次为空,继续等待
220217
if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False":
221218
continue
@@ -252,9 +249,8 @@ def predict_no_ui_long_connection(
252249
logger.error(error_msg)
253250
raise RuntimeError("Json解析不合常规")
254251
if reasoning:
255-
style = 'padding: 1em; line-height: 1.5; text-wrap: wrap; opacity: 0.8'
256252
paragraphs = ''.join([f'<p style="margin: 1.25em 0;">{line}</p>' for line in reasoning_buffer.split('\n')])
257-
return f'''<div style="{style}">{paragraphs}</div>\n\n''' + result
253+
return f'''<div class="reasoning_process" >{paragraphs}</div>\n\n''' + result
258254
return result
259255

260256
def predict(
@@ -348,14 +344,21 @@ def predict(
348344
gpt_reasoning_buffer = ""
349345

350346
stream_response = response.iter_lines()
347+
wait_counter = 0
351348
while True:
352349
try:
353350
chunk = next(stream_response)
354351
except StopIteration:
352+
if wait_counter != 0 and gpt_replying_buffer == "":
353+
yield from update_ui_lastest_msg(lastmsg="模型调用失败 ...", chatbot=chatbot, history=history, msg="failed")
355354
break
356355
except requests.exceptions.ConnectionError:
357356
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
358-
response_text, reasoning_content, finish_reason = decode_chunk(chunk)
357+
response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk)
358+
if decoded_chunk == ': keep-alive':
359+
wait_counter += 1
360+
yield from update_ui_lastest_msg(lastmsg="等待中 " + "".join(["."] * (wait_counter%10)), chatbot=chatbot, history=history, msg="waiting ...")
361+
continue
359362
# 返回的数据流第一次为空,继续等待
360363
if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False":
361364
status_text = f"finish_reason: {finish_reason}"
@@ -372,7 +375,7 @@ def predict(
372375
chunk_decoded = chunk.decode()
373376
chatbot[-1] = (
374377
chatbot[-1][0],
375-
f"[Local Message] {finish_reason},获得以下报错信息:\n"
378+
f"[Local Message] {finish_reason}, 获得以下报错信息:\n"
376379
+ chunk_decoded,
377380
)
378381
yield from update_ui(
@@ -390,9 +393,8 @@ def predict(
390393
if reasoning:
391394
gpt_replying_buffer += response_text
392395
gpt_reasoning_buffer += reasoning_content
393-
style = 'padding: 1em; line-height: 1.5; text-wrap: wrap; opacity: 0.8'
394396
paragraphs = ''.join([f'<p style="margin: 1.25em 0;">{line}</p>' for line in gpt_reasoning_buffer.split('\n')])
395-
history[-1] = f'<div style="{style}">{paragraphs}</div>\n\n' + gpt_replying_buffer
397+
history[-1] = f'<div class="reasoning_process">{paragraphs}</div>\n\n---\n\n' + gpt_replying_buffer
396398
else:
397399
gpt_replying_buffer += response_text
398400
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出

themes/common.css

+12
Original file line numberDiff line numberDiff line change
@@ -311,3 +311,15 @@
311311
backdrop-filter: blur(10px);
312312
background-color: rgba(var(--block-background-fill), 0.5);
313313
}
314+
315+
316+
.reasoning_process {
317+
font-size: smaller;
318+
font-style: italic;
319+
margin: 0px;
320+
padding: 1em;
321+
line-height: 1.5;
322+
text-wrap: wrap;
323+
opacity: 0.8;
324+
}
325+

0 commit comments

Comments
 (0)