import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Set up the Hugging Face API token HF_token = "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" # Load the tokenizer and model model_name = "Qwen/Qwen1.5-7B" tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_token) model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=HF_token) # Function to generate article def generate_article(topic): inputs = tokenizer(f"Generate article for the NY times tweet {topic}", return_tensors="pt") outputs = model.generate(inputs['input_ids'], max_new_tokens=512, temperature=0.5) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Streamlit app interface st.title("Article Generator") topic = st.text_input("Enter a topic:") if st.button("Generate"): if topic: article = generate_article(topic) st.write(article) else: st.write("Please enter a topic.")