Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
# 砖诇讬驻转 讛诪驻转讞 诪诪砖转谞讛 讛住讘讬讘讛 | |
api_key = os.getenv("HF_API_KEY") | |
client = InferenceClient(api_key=api_key) | |
def chat_with_model(prompt): | |
response_text = "" | |
for message in client.chat_completion( | |
model="google/gemma-2-2b-it", | |
messages=[{"role": "user", "content": prompt}], | |
max_tokens=250, | |
stream=True, | |
): | |
response_text += message.choices[0].delta.content | |
return response_text | |
interface = gr.Interface(fn=chat_with_model, inputs="text", outputs="text", title="Chat with Hugging Face Model") | |
interface.launch() | |