from transformers import pipeline
messages = [
{"role": "user", "content": "Who are you?"},
]
pipe = pipeline("text-generation", model="heegyu/gemma-2-9b-lima", device_map="auto", torch_dtype="auto")
print(pipe(messages, max_new_tokens=128, eos_token_id=107))
output: I am an AI assistant, how can I help you today?
- Downloads last month
- 17
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.