texasdave2
commited on
Commit
•
69933d2
1
Parent(s):
c71a48b
Upload model
Browse files- README.md +4 -19
- adapter_config.json +7 -3
- adapter_model.safetensors +3 -0
README.md
CHANGED
@@ -18,6 +18,7 @@ base_model: facebook/opt-1.3b
|
|
18 |
|
19 |
|
20 |
- **Developed by:** [More Information Needed]
|
|
|
21 |
- **Shared by [optional]:** [More Information Needed]
|
22 |
- **Model type:** [More Information Needed]
|
23 |
- **Language(s) (NLP):** [More Information Needed]
|
@@ -76,7 +77,7 @@ Use the code below to get started with the model.
|
|
76 |
|
77 |
### Training Data
|
78 |
|
79 |
-
<!-- This should link to a
|
80 |
|
81 |
[More Information Needed]
|
82 |
|
@@ -107,7 +108,7 @@ Use the code below to get started with the model.
|
|
107 |
|
108 |
#### Testing Data
|
109 |
|
110 |
-
<!-- This should link to a
|
111 |
|
112 |
[More Information Needed]
|
113 |
|
@@ -198,22 +199,6 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
|
|
198 |
[More Information Needed]
|
199 |
|
200 |
|
201 |
-
## Training procedure
|
202 |
-
|
203 |
-
|
204 |
-
The following `bitsandbytes` quantization config was used during training:
|
205 |
-
- quant_method: bitsandbytes
|
206 |
-
- load_in_8bit: False
|
207 |
-
- load_in_4bit: True
|
208 |
-
- llm_int8_threshold: 6.0
|
209 |
-
- llm_int8_skip_modules: None
|
210 |
-
- llm_int8_enable_fp32_cpu_offload: False
|
211 |
-
- llm_int8_has_fp16_weight: False
|
212 |
-
- bnb_4bit_quant_type: nf4
|
213 |
-
- bnb_4bit_use_double_quant: False
|
214 |
-
- bnb_4bit_compute_dtype: float16
|
215 |
-
|
216 |
### Framework versions
|
217 |
|
218 |
-
|
219 |
-
- PEFT 0.6.0.dev0
|
|
|
18 |
|
19 |
|
20 |
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
- **Shared by [optional]:** [More Information Needed]
|
23 |
- **Model type:** [More Information Needed]
|
24 |
- **Language(s) (NLP):** [More Information Needed]
|
|
|
77 |
|
78 |
### Training Data
|
79 |
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
|
82 |
[More Information Needed]
|
83 |
|
|
|
108 |
|
109 |
#### Testing Data
|
110 |
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
|
113 |
[More Information Needed]
|
114 |
|
|
|
199 |
[More Information Needed]
|
200 |
|
201 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
### Framework versions
|
203 |
|
204 |
+
- PEFT 0.7.2.dev0
|
|
adapter_config.json
CHANGED
@@ -8,16 +8,20 @@
|
|
8 |
"init_lora_weights": true,
|
9 |
"layers_pattern": null,
|
10 |
"layers_to_transform": null,
|
|
|
11 |
"lora_alpha": 32,
|
12 |
"lora_dropout": 0.05,
|
|
|
|
|
13 |
"modules_to_save": null,
|
14 |
"peft_type": "LORA",
|
15 |
"r": 16,
|
16 |
"rank_pattern": {},
|
17 |
"revision": null,
|
18 |
"target_modules": [
|
19 |
-
"
|
20 |
-
"
|
21 |
],
|
22 |
-
"task_type": "CAUSAL_LM"
|
|
|
23 |
}
|
|
|
8 |
"init_lora_weights": true,
|
9 |
"layers_pattern": null,
|
10 |
"layers_to_transform": null,
|
11 |
+
"loftq_config": {},
|
12 |
"lora_alpha": 32,
|
13 |
"lora_dropout": 0.05,
|
14 |
+
"megatron_config": null,
|
15 |
+
"megatron_core": "megatron.core",
|
16 |
"modules_to_save": null,
|
17 |
"peft_type": "LORA",
|
18 |
"r": 16,
|
19 |
"rank_pattern": {},
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
+
"q_proj",
|
23 |
+
"v_proj"
|
24 |
],
|
25 |
+
"task_type": "CAUSAL_LM",
|
26 |
+
"use_rslora": false
|
27 |
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3b51bd673ae9f92e32e2792745e3ce56f5bf2378e224f1ca956aedb8cc3620
|
3 |
+
size 12596472
|