Edit model card
  • Train Config
  • base_model: allganize/Llama-3-Alpha-Ko-8B-Instruct
  • model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer

HOW TO USE

from transformers import AutoTokenizer, AutoModelForCausalLM

model_id = "MRAIRR/minillama3_8b_all"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype="auto",
    device_map="auto",
)

PROMPT_TEMPLATE = """
# μ§€μ‹œ:
당신은 인곡지λŠ₯ μ–΄μ‹œμŠ€ν„΄νŠΈμž…λ‹ˆλ‹€. μ‚¬μš©μžκ°€ λ¬»λŠ” 말에 μΉœμ ˆν•˜κ³  μ •ν™•ν•˜κ²Œ λ‹΅λ³€ν•˜μ„Έμš”.
"""

messages = [
    {"role": "system", "content":PROMPT_TEMPLATE},
    {"role": "user", "content": "μ•ˆλ…•? λ‚΄ 이름은 ν˜„μˆ˜ γ…Žγ…Ž λ§Œλ‚˜μ„œ λ°˜κ°€μ›Œ"},
]

input_ids = tokenizer.apply_chat_template(
    messages,
    add_generation_prompt=True,
    return_tensors="pt"
).to(model.device)

terminators = [
    tokenizer.eos_token_id,
    tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

outputs = model.generate(
    input_ids,
    max_new_tokens=256,
    temperature = 0.3,
    eos_token_id=terminators,
    do_sample=True,
    repetition_penalty=1.05,
)
response = outputs[0][input_ids.shape[-1]:]
response_text = tokenizer.decode(response, skip_special_tokens=True)
completion = '\n'.join(response_text.split("."))
print(completion)
Downloads last month
1,756
Inference Examples
Inference API (serverless) is not available, repository is disabled.