Update README.md
Browse files
README.md
CHANGED
@@ -4,7 +4,80 @@ license_name: other
|
|
4 |
license_link: LICENSE
|
5 |
---
|
6 |
license for Llama 2 model checkpoints is Llama 2 Community license. \
|
7 |
-
License for Lumina-T2I 5B checkpoints is Apache-2.
|
8 |
|
9 |
In this repo, you will find FP32 (original, un-changed), BF16 and FP16 PTH and FP32, BF16, FP16 safetensor files for Lumina T2I 5B text-to-image model. \
|
10 |
-
None of the files were confirmed to work yet, I plan to check that later.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
license_link: LICENSE
|
5 |
---
|
6 |
license for Llama 2 model checkpoints is Llama 2 Community license. \
|
7 |
+
License for Lumina-T2I 5B checkpoints is Apache-2.
|
8 |
|
9 |
In this repo, you will find FP32 (original, un-changed), BF16 and FP16 PTH and FP32, BF16, FP16 safetensor files for Lumina T2I 5B text-to-image model. \
|
10 |
+
None of the files were confirmed to work yet, I plan to check that later. There could be some code missing in `safetensors` files due to it being removed during conversion, I don't know. If you try to run any of the files, let me know how they work.
|
11 |
+
|
12 |
+
You can also find un-gated files for Llama 2 7B 4-bit (bnb) and 16-bit. Both are simply copies of those files from unsloth repos. I have not run Lumina locally yet to confirm, but I believe both should work.
|
13 |
+
|
14 |
+
Script used for converting FP32 pth to FP16 pth
|
15 |
+
|
16 |
+
```
|
17 |
+
import torch
|
18 |
+
|
19 |
+
# Load the FP32 model
|
20 |
+
fp32_model_path = "consolidated.00-of-01.pth"
|
21 |
+
fp32_model = torch.load(fp32_model_path, map_location='cpu')
|
22 |
+
|
23 |
+
# Convert the model to FP16
|
24 |
+
fp16_model = {}
|
25 |
+
for key, value in fp32_model.items():
|
26 |
+
if isinstance(value, torch.Tensor):
|
27 |
+
fp16_model[key] = value.half()
|
28 |
+
elif isinstance(value, dict):
|
29 |
+
fp16_model[key] = {k: v.half() if isinstance(v, torch.Tensor) else v for k, v in value.items()}
|
30 |
+
else:
|
31 |
+
fp16_model[key] = value
|
32 |
+
|
33 |
+
# Save the FP16 model
|
34 |
+
fp16_model_path = "consolidated.00-of-01_fp16.pth"
|
35 |
+
torch.save(fp16_model, fp16_model_path)
|
36 |
+
```
|
37 |
+
|
38 |
+
Script used for converting FP32 pth to FP32, BF16, FP16 safetensors and BF16 pth
|
39 |
+
|
40 |
+
```
|
41 |
+
import torch
|
42 |
+
from safetensors.torch import save_file, load_file
|
43 |
+
|
44 |
+
# Load the FP32 model
|
45 |
+
fp32_model_path = "consolidated.00-of-01.pth"
|
46 |
+
fp32_model = torch.load(fp32_model_path, map_location='cpu')
|
47 |
+
|
48 |
+
# Convert the model to BF16
|
49 |
+
bf16_model = {}
|
50 |
+
for key, value in fp32_model.items():
|
51 |
+
if isinstance(value, torch.Tensor):
|
52 |
+
bf16_model[key] = value.to(torch.bfloat16)
|
53 |
+
elif isinstance(value, dict):
|
54 |
+
bf16_model[key] = {k: v.to(torch.bfloat16) if isinstance(v, torch.Tensor) else v for k, v in value.items()}
|
55 |
+
else:
|
56 |
+
bf16_model[key] = value
|
57 |
+
|
58 |
+
# Convert the model to FP16
|
59 |
+
fp16_model = {}
|
60 |
+
for key, value in fp32_model.items():
|
61 |
+
if isinstance(value, torch.Tensor):
|
62 |
+
fp16_model[key] = value.half()
|
63 |
+
elif isinstance(value, dict):
|
64 |
+
fp16_model[key] = {k: v.half() if isinstance(v, torch.Tensor) else v for k, v in value.items()}
|
65 |
+
else:
|
66 |
+
fp16_model[key] = value
|
67 |
+
|
68 |
+
# Save the FP32 model in safetensors format
|
69 |
+
fp32_safetensors_path = "consolidated.00-of-01_fp32.safetensors"
|
70 |
+
save_file(fp32_model, fp32_safetensors_path)
|
71 |
+
|
72 |
+
# Save the BF16 model in safetensors format
|
73 |
+
bf16_safetensors_path = "consolidated.00-of-01_bf16.safetensors"
|
74 |
+
save_file(bf16_model, bf16_safetensors_path)
|
75 |
+
|
76 |
+
# Save the FP16 model in safetensors format
|
77 |
+
fp16_safetensors_path = "consolidated.00-of-01_fp16.safetensors"
|
78 |
+
save_file(fp16_model, fp16_safetensors_path)
|
79 |
+
|
80 |
+
# Save the BF16 model in .pth format
|
81 |
+
bf16_model_path = "consolidated.00-of-01_bf16.pth"
|
82 |
+
torch.save(bf16_model, bf16_model_path)
|
83 |
+
```
|