Spaces:
Sleeping
Sleeping
Upload 132 files
Browse files- app.py +3 -3
- check_proxy.py +6 -0
- request_llm/bridge_all.py +2 -2
- requirements.txt +1 -0
app.py
CHANGED
@@ -17,7 +17,7 @@ def main():
|
|
17 |
|
18 |
from check_proxy import get_current_version
|
19 |
initial_prompt = "Serve me as a writing and programming assistant."
|
20 |
-
title_html = f
|
21 |
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
22 |
|
23 |
# 问询记录, python 版本建议3.9+(越新越好)
|
@@ -56,7 +56,7 @@ def main():
|
|
56 |
cancel_handles = []
|
57 |
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
58 |
gr.HTML(title_html)
|
59 |
-
gr.HTML('''<center
|
60 |
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
61 |
with gr_L1():
|
62 |
with gr_L2(scale=2):
|
@@ -107,7 +107,7 @@ def main():
|
|
107 |
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
108 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
109 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
110 |
-
max_length_sl = gr.Slider(minimum=256, maximum=
|
111 |
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
112 |
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
113 |
|
|
|
17 |
|
18 |
from check_proxy import get_current_version
|
19 |
initial_prompt = "Serve me as a writing and programming assistant."
|
20 |
+
title_html = f'<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}<a href="https://huggingface.co/fb700/chatglm-fitness-RLHF">(模型by帛凡Ai)</a></h1>'
|
21 |
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
22 |
|
23 |
# 问询记录, python 版本建议3.9+(越新越好)
|
|
|
56 |
cancel_handles = []
|
57 |
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
58 |
gr.HTML(title_html)
|
59 |
+
gr.HTML('''<center>本应用基于chatglm6b【帛凡 AI】的微调模型进行部署,模型中文总结能力优于GPT3.5,欢迎体验<a href="https://huggingface.co/fb700/chatglm-fitness-RLHF">下载地址</a></center>''')
|
60 |
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
61 |
with gr_L1():
|
62 |
with gr_L2(scale=2):
|
|
|
107 |
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
108 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
109 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
110 |
+
max_length_sl = gr.Slider(minimum=256, maximum=40960, value=5120, step=1, interactive=True, label="Local LLM MaxLength",)
|
111 |
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
112 |
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
113 |
|
check_proxy.py
CHANGED
@@ -150,6 +150,12 @@ def warm_up_modules():
|
|
150 |
enc.encode("模块预热", disallowed_special=())
|
151 |
enc = model_info["gpt-4"]['tokenizer']
|
152 |
enc.encode("模块预热", disallowed_special=())
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
if __name__ == '__main__':
|
155 |
import os
|
|
|
150 |
enc.encode("模块预热", disallowed_special=())
|
151 |
enc = model_info["gpt-4"]['tokenizer']
|
152 |
enc.encode("模块预热", disallowed_special=())
|
153 |
+
from request_llm.bridge_chatglm import predict_no_ui_long_connection
|
154 |
+
llm_kwargs = {'max_length': 51200,'top_p': 1,'temperature': 1,}
|
155 |
+
result = predict_no_ui_long_connection( inputs="请问什么是质子?",
|
156 |
+
llm_kwargs=llm_kwargs,
|
157 |
+
history=["你好", "我好!"],
|
158 |
+
sys_prompt="")
|
159 |
|
160 |
if __name__ == '__main__':
|
161 |
import os
|
request_llm/bridge_all.py
CHANGED
@@ -157,7 +157,7 @@ model_info = {
|
|
157 |
"fn_with_ui": chatglm_ui,
|
158 |
"fn_without_ui": chatglm_noui,
|
159 |
"endpoint": None,
|
160 |
-
"max_token":
|
161 |
"tokenizer": tokenizer_gpt35,
|
162 |
"token_cnt": get_token_num_gpt35,
|
163 |
},
|
@@ -165,7 +165,7 @@ model_info = {
|
|
165 |
"fn_with_ui": chatglm_ui,
|
166 |
"fn_without_ui": chatglm_noui,
|
167 |
"endpoint": None,
|
168 |
-
"max_token":
|
169 |
"tokenizer": tokenizer_gpt35,
|
170 |
"token_cnt": get_token_num_gpt35,
|
171 |
},
|
|
|
157 |
"fn_with_ui": chatglm_ui,
|
158 |
"fn_without_ui": chatglm_noui,
|
159 |
"endpoint": None,
|
160 |
+
"max_token": 40960,
|
161 |
"tokenizer": tokenizer_gpt35,
|
162 |
"token_cnt": get_token_num_gpt35,
|
163 |
},
|
|
|
165 |
"fn_with_ui": chatglm_ui,
|
166 |
"fn_without_ui": chatglm_noui,
|
167 |
"endpoint": None,
|
168 |
+
"max_token": 40960,
|
169 |
"tokenizer": tokenizer_gpt35,
|
170 |
"token_cnt": get_token_num_gpt35,
|
171 |
},
|
requirements.txt
CHANGED
@@ -15,6 +15,7 @@ openai
|
|
15 |
numpy
|
16 |
arxiv
|
17 |
rich
|
|
|
18 |
protobuf
|
19 |
transformers==4.27.1
|
20 |
cpm_kernels
|
|
|
15 |
numpy
|
16 |
arxiv
|
17 |
rich
|
18 |
+
pdfminer
|
19 |
protobuf
|
20 |
transformers==4.27.1
|
21 |
cpm_kernels
|