import gradio as gr import transformers import peft import os model_id = 'freQuensy23/toxic-llama2' model = peft.AutoPeftModelForCausalLM.from_pretrained(model_id, token=os.getenv('hf_token')) model.to_bettertransformer() tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, token=os.getenv('hf_token')) def generate(text, temp): input_ids = tokenizer(f"User: {text}\nBot:", return_tensors='pt').input_ids generated_ids = model.generate(input_ids=input_ids.to(model.device), temperature=temp, max_new_tokens=64)[0][len(input_ids[0]):] return tokenizer.decode(generated_ids).split('\n')[0] iface = gr.Interface(concurrency_limit=2, fn=generate, inputs=[gr.Textbox(lines=5, placeholder="Type your prompt here...", value='''I am clever?'''), gr.Slider(0.1, 1.5, value=1.1)], outputs=gr.Textbox()) iface.launch()