update config.json for transformers GPTQ support
Browse files- config.json +12 -1
config.json
CHANGED
@@ -22,5 +22,16 @@
|
|
22 |
"torch_dtype": "float16",
|
23 |
"transformers_version": "4.36.2",
|
24 |
"use_cache": false,
|
25 |
-
"vocab_size": 32002
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
}
|
|
|
22 |
"torch_dtype": "float16",
|
23 |
"transformers_version": "4.36.2",
|
24 |
"use_cache": false,
|
25 |
+
"vocab_size": 32002,
|
26 |
+
"quantization_config" : {
|
27 |
+
"bits": 4,
|
28 |
+
"group_size": 128,
|
29 |
+
"damp_percent": 0.1,
|
30 |
+
"desc_act": true,
|
31 |
+
"static_groups": false,
|
32 |
+
"sym": true,
|
33 |
+
"true_sequential": true,
|
34 |
+
"model_name_or_path": "/kaggle/working/v0_quantized_model_desc_act/",
|
35 |
+
"model_file_base_name": "model"
|
36 |
+
}
|
37 |
}
|