File size: 2,408 Bytes
ec62405
 
 
 
2b6b068
ec62405
 
 
 
 
 
 
2b6b068
 
ec62405
 
 
 
846ab7e
 
ec62405
 
 
 
 
 
2b6b068
 
 
 
19b3783
23f6a23
2b6b068
 
23f6a23
 
ec62405
 
23f6a23
 
 
 
 
 
 
 
 
 
 
 
 
 
2b6b068
ec62405
 
 
 
 
2b6b068
b7c6d1d
 
 
 
 
 
2b6b068
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import pprint
import subprocess
from pathlib import Path

import gradio as gr
from test_prompt_generator.test_prompt_generator import _preset_tokenizers, generate_prompt

# log system info for debugging purposes
result = subprocess.run(["lscpu"], text=True, capture_output=True)
pprint.pprint(result.stdout)
result = subprocess.run(["pip", "freeze"], text=True, capture_output=True)
pprint.pprint(result.stdout)


def generate(tokenizer_id, num_tokens, prefix=None, source_text=None):
    output_path = Path(f"prompt_{num_tokens}.jsonl")
    if output_path.exists():
        output_path.unlink()
    if prefix == "":
        prefix = None
    prompt = generate_prompt(
        tokenizer_id, int(num_tokens), prefix=prefix, source_text=source_text, output_file=output_path
    )
    if tokenizer_id in _preset_tokenizers:
        tokenizer_id = _preset_tokenizers[tokenizer_id]
    return prompt, str(output_path), tokenizer_id


demo = gr.Interface(
    fn=generate,
    title="Test Prompt Generator",
    description="Generate prompts with a given number of tokens for testing transformer models. "
    "Prompt source: https://archive.org/stream/alicesadventures19033gut/19033.txt",
    inputs=[
        gr.Dropdown(
            label="Tokenizer",
            choices=_preset_tokenizers,
            value="mistral",
            allow_custom_value=True,
            info="Select a tokenizer from this list or paste a model_id from a model on the Hugging Face Hub",
        ),
        gr.Number(
            label="Number of Tokens", minimum=4, maximum=2048, value=32, info="Enter a number between 4 and 2048."
        ),
        gr.Textbox(
            label="Prefix (optional)",
            info="If given, the start of the prompt will be this prefix. Example: 'Summarize the following text:'",
        ),
        gr.Textbox(
            label="Source text (optional)",
            info="By default, prompts will be generated from Alice in Wonderland. Enter text here to use that instead.",
        ),
    ],
    outputs=[
        gr.Textbox(label="prompt", show_copy_button=True),
        gr.File(label="Json file"),
        gr.Markdown(label="tokenizer"),
    ],
    examples=[
        ["falcon", 32],
        ["falcon", 64],
        ["falcon", 128],
        ["falcon", 512],
        ["falcon", 1024],
        ["falcon", 2048],
    ],
    cache_examples=False,
    allow_flagging=False,
)

demo.launch()