import os import torch from transformers import AutoTokenizer, AutoModelForCausalLM import gradio as gr import spaces huggingface_token = os.getenv("HUGGINGFACE_TOKEN") if not huggingface_token: raise ValueError("HUGGINGFACE_TOKEN environment variable is not set") model_id = "meta-llama/Llama-Guard-3-8B" device = "cuda" dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id, token=huggingface_token) @spaces.GPU def moderate(chat): model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=dtype, device_map=device, token=huggingface_token ) input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device) output = model.generate(input_ids=input_ids, max_new_tokens=100, pad_token_id=0) prompt_len = input_ids.shape[-1] return tokenizer.decode(output[0][prompt_len:], skip_special_tokens=True) def gradio_moderate(user_input, assistant_response): chat = [ {"role": "user", "content": user_input}, ] if assistant_response: chat.append({"role": "assistant", "content": assistant_response}) return moderate(chat) iface = gr.Interface( fn=gradio_moderate, inputs=[ gr.Textbox(lines=3, label="User Input"), gr.Textbox(lines=3, label="Assistant Response (optional)"), ], outputs=gr.Textbox(label="Moderation Result"), title="Llama Guard Moderation", description="Enter a user input and an optional assistant response to check for content moderation.", ) if __name__ == "__main__": iface.launch()