Spaces:
Running
on
T4
Running
on
T4
ffreemt
commited on
Commit
•
5f4757d
1
Parent(s):
e8c556f
Update tangger/Qwen-7B-Chat since Qwen/Qwen-7B-Chat is gone
Browse files- .gitignore +1 -0
- app.py +7 -2
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
.ruff_cache
|
|
|
|
1 |
.ruff_cache
|
2 |
+
__pycache__
|
app.py
CHANGED
@@ -60,7 +60,8 @@ except Exception:
|
|
60 |
# Windows
|
61 |
logger.warning("Windows, cant run time.tzset()")
|
62 |
|
63 |
-
model_name = "Qwen/Qwen-7B-Chat"
|
|
|
64 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
65 |
|
66 |
n_gpus = torch.cuda.device_count()
|
@@ -168,9 +169,13 @@ def bot(chat_history, **kwargs):
|
|
168 |
|
169 |
|
170 |
def bot_stream(chat_history, **kwargs):
|
171 |
-
logger.trace(f"{chat_history=}")
|
172 |
logger.trace(f"{kwargs=}")
|
173 |
|
|
|
|
|
|
|
|
|
|
|
174 |
try:
|
175 |
message = chat_history[-1][0]
|
176 |
except Exception as exc:
|
|
|
60 |
# Windows
|
61 |
logger.warning("Windows, cant run time.tzset()")
|
62 |
|
63 |
+
model_name = "Qwen/Qwen-7B-Chat" # gone!
|
64 |
+
model_name = "tangger/Qwen-7B-Chat" # try
|
65 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
66 |
|
67 |
n_gpus = torch.cuda.device_count()
|
|
|
169 |
|
170 |
|
171 |
def bot_stream(chat_history, **kwargs):
|
|
|
172 |
logger.trace(f"{kwargs=}")
|
173 |
|
174 |
+
# somehow, empty chat_history
|
175 |
+
if chat_history is None or not chat_history:
|
176 |
+
logger.trace(f" *** {chat_history=}")
|
177 |
+
chat_history.append(["", ""])
|
178 |
+
|
179 |
try:
|
180 |
message = chat_history[-1][0]
|
181 |
except Exception as exc:
|