Better formatting of hyperparams and code snippet
Browse files
README.md
CHANGED
@@ -11,6 +11,7 @@ License: mit
|
|
11 |
---
|
12 |
hyperparams used to train this model:
|
13 |
|
|
|
14 |
lr = 5e-4,
|
15 |
lr_schedule = constant,
|
16 |
wd=0.1,
|
@@ -18,17 +19,16 @@ adam_beta1=0.9, adam_beta2 = 0.95,
|
|
18 |
context_length=512,
|
19 |
batch_size=80,
|
20 |
gradient_accumulation_steps=16
|
|
|
21 |
|
22 |
------ EXAMPLE USAGE ---
|
23 |
|
|
|
24 |
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
25 |
|
26 |
model = AutoModelForCausalLM.from_pretrained('roneneldan/TinyStories-33M')
|
27 |
-
|
28 |
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
|
29 |
-
|
30 |
prompt = "Once upon a time there was"
|
31 |
-
|
32 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
33 |
|
34 |
# Generate completion
|
@@ -38,4 +38,5 @@ output = model.generate(input_ids, max_length = 1000, num_beams=1)
|
|
38 |
output_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
39 |
|
40 |
# Print the generated text
|
41 |
-
print(output_text)
|
|
|
|
11 |
---
|
12 |
hyperparams used to train this model:
|
13 |
|
14 |
+
```
|
15 |
lr = 5e-4,
|
16 |
lr_schedule = constant,
|
17 |
wd=0.1,
|
|
|
19 |
context_length=512,
|
20 |
batch_size=80,
|
21 |
gradient_accumulation_steps=16
|
22 |
+
```
|
23 |
|
24 |
------ EXAMPLE USAGE ---
|
25 |
|
26 |
+
```py
|
27 |
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
28 |
|
29 |
model = AutoModelForCausalLM.from_pretrained('roneneldan/TinyStories-33M')
|
|
|
30 |
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
|
|
|
31 |
prompt = "Once upon a time there was"
|
|
|
32 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
33 |
|
34 |
# Generate completion
|
|
|
38 |
output_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
39 |
|
40 |
# Print the generated text
|
41 |
+
print(output_text)
|
42 |
+
```
|