Spaces:
Runtime error
Runtime error
Matt
commited on
Commit
•
434878e
1
Parent(s):
41f3764
Add description
Browse files
app.py
CHANGED
@@ -4,14 +4,11 @@ import json
|
|
4 |
|
5 |
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
|
6 |
|
7 |
-
|
|
|
8 |
{"role": "user", "content": "Hi there!"},
|
9 |
-
{"role": "assistant", "content": "Hello, human!"}
|
10 |
-
|
11 |
-
|
12 |
-
demo_conversation2 = """[
|
13 |
-
{"role": "system", "content": "You are a helpful chatbot."},
|
14 |
-
{"role": "user", "content": "Hi there!"}
|
15 |
]"""
|
16 |
|
17 |
default_template = """{% for message in messages %}
|
@@ -21,27 +18,26 @@ default_template = """{% for message in messages %}
|
|
21 |
{{ "<|im_start|>assistant\\n" }}
|
22 |
{% endif %}"""
|
23 |
|
24 |
-
|
|
|
|
|
25 |
|
26 |
-
def apply_chat_template(template,
|
27 |
if cleanup_whitespace:
|
28 |
template = "".join([line.strip() for line in template.split('\n')])
|
29 |
tokenizer.chat_template = template
|
30 |
outputs = []
|
31 |
-
|
32 |
-
|
33 |
-
outputs.append(tokenizer.apply_chat_template(conversation, tokenize=False))
|
34 |
-
outputs.append(tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True))
|
35 |
-
return tuple(outputs)
|
36 |
|
37 |
-
output_names = ["Conversation 1 without generation prompt", "Conversation 1 with generation prompt", "Conversation 2 without generation prompt", "Conversation 2 with generation prompt"]
|
38 |
iface = gr.Interface(
|
|
|
39 |
fn=apply_chat_template,
|
40 |
inputs=[
|
41 |
gr.TextArea(value=default_template, lines=10, max_lines=30, label="Chat Template"),
|
42 |
-
gr.TextArea(value=
|
43 |
-
gr.
|
44 |
gr.Checkbox(value=True, label="Cleanup template whitespace"),
|
45 |
],
|
46 |
-
outputs=
|
47 |
iface.launch()
|
|
|
4 |
|
5 |
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
|
6 |
|
7 |
+
demo_conversation = """[
|
8 |
+
{"role": "system": "content": "You are a helpful chatbot."},
|
9 |
{"role": "user", "content": "Hi there!"},
|
10 |
+
{"role": "assistant", "content": "Hello, human!"},
|
11 |
+
{"role": "user", "content": "Can I ask a question?"}
|
|
|
|
|
|
|
|
|
12 |
]"""
|
13 |
|
14 |
default_template = """{% for message in messages %}
|
|
|
18 |
{{ "<|im_start|>assistant\\n" }}
|
19 |
{% endif %}"""
|
20 |
|
21 |
+
description_text = """This space is a helper app for writing [Chat Templates](https://huggingface.co/docs/transformers/main/en/chat_templating). Please see the linked
|
22 |
+
documentation for more information! When you're happy with the outputs from your template, you can use the code block at the end to create a PR to add your template to a
|
23 |
+
model"""
|
24 |
|
25 |
+
def apply_chat_template(template, test_conversation, add_generation_prompt, cleanup_whitespace):
|
26 |
if cleanup_whitespace:
|
27 |
template = "".join([line.strip() for line in template.split('\n')])
|
28 |
tokenizer.chat_template = template
|
29 |
outputs = []
|
30 |
+
conversation = json.loads(test_conversation)
|
31 |
+
return tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=add_generation_prompt)
|
|
|
|
|
|
|
32 |
|
|
|
33 |
iface = gr.Interface(
|
34 |
+
description=description_text,
|
35 |
fn=apply_chat_template,
|
36 |
inputs=[
|
37 |
gr.TextArea(value=default_template, lines=10, max_lines=30, label="Chat Template"),
|
38 |
+
gr.TextArea(value=demo_conversation, lines=6, label="Conversation"),
|
39 |
+
gr.Checkbox(value=False, label="Add generation prompt"),
|
40 |
gr.Checkbox(value=True, label="Cleanup template whitespace"),
|
41 |
],
|
42 |
+
outputs=gr.TextArea(label="Formatted conversation"))
|
43 |
iface.launch()
|