lkl / app.py
gaur3009's picture
Update app.py
569e83e verified
raw
history blame contribute delete
627 Bytes
import torch
from transformers import LLaMAForConditionalGeneration, LLaMATokenizer
model = LLaMAForConditionalGeneration.from_pretrained("meta-llama/llama-3-1-405b")
tokenizer = LLaMATokenizer.from_pretrained("meta-llama/llama-3-1-405b")
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
output = model.generate(**inputs)
return tokenizer.decode(output, skip_special_tokens=True)
demo = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="LLaMA Text Generation",
description="Enter a prompt to generate text using the LLaMA model."
)
demo.launch()