Add files using large-upload tool
Browse files- .gitattributes +3 -0
- model_weights/model.decoder.layers.mlp.linear_fc2.weight/40.0.2 +3 -0
- model_weights/model.decoder.layers.mlp.linear_fc2.weight/55.0.0 +3 -0
- model_weights/model.decoder.layers.mlp.linear_fc2.weight/70.0.3 +3 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/.zarray +16 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/12.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/14.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/25.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/27.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/29.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/30.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/37.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/44.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/45.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/47.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/55.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/58.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/63.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/66.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/74.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/79.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/85.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/89.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/93.0 +0 -0
- model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/95.0 +0 -0
- model_weights/model.embedding.word_embeddings.weight/.zarray +16 -0
.gitattributes
CHANGED
@@ -232,3 +232,6 @@ model_weights/model.decoder.layers.mlp.linear_fc2.weight/15.0.0 filter=lfs diff=
|
|
232 |
model_weights/model.decoder.layers.mlp.linear_fc2.weight/83.0.2 filter=lfs diff=lfs merge=lfs -text
|
233 |
model_weights/model.decoder.layers.mlp.linear_fc2.weight/36.0.5 filter=lfs diff=lfs merge=lfs -text
|
234 |
model_weights/model.decoder.layers.mlp.linear_fc2.weight/3.0.3 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
232 |
model_weights/model.decoder.layers.mlp.linear_fc2.weight/83.0.2 filter=lfs diff=lfs merge=lfs -text
|
233 |
model_weights/model.decoder.layers.mlp.linear_fc2.weight/36.0.5 filter=lfs diff=lfs merge=lfs -text
|
234 |
model_weights/model.decoder.layers.mlp.linear_fc2.weight/3.0.3 filter=lfs diff=lfs merge=lfs -text
|
235 |
+
model_weights/model.decoder.layers.mlp.linear_fc2.weight/40.0.2 filter=lfs diff=lfs merge=lfs -text
|
236 |
+
model_weights/model.decoder.layers.mlp.linear_fc2.weight/55.0.0 filter=lfs diff=lfs merge=lfs -text
|
237 |
+
model_weights/model.decoder.layers.mlp.linear_fc2.weight/70.0.3 filter=lfs diff=lfs merge=lfs -text
|
model_weights/model.decoder.layers.mlp.linear_fc2.weight/40.0.2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f851fa60e2134794ad628d177da7b380b41f25099cd301ec2a5196004f59f5ca
|
3 |
+
size 339738624
|
model_weights/model.decoder.layers.mlp.linear_fc2.weight/55.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a62f6c62151b61257e610af77909f5de3c600b1cd1df729cc7c9c79316ed1550
|
3 |
+
size 339738624
|
model_weights/model.decoder.layers.mlp.linear_fc2.weight/70.0.3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aaad7b642fbdb62aa18ff8c2c3005778d6184f2c1e1ec66fed95089cf137c2d8
|
3 |
+
size 339738624
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/.zarray
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chunks": [
|
3 |
+
1,
|
4 |
+
18432
|
5 |
+
],
|
6 |
+
"compressor": null,
|
7 |
+
"dtype": "bfloat16",
|
8 |
+
"fill_value": null,
|
9 |
+
"filters": null,
|
10 |
+
"order": "C",
|
11 |
+
"shape": [
|
12 |
+
96,
|
13 |
+
18432
|
14 |
+
],
|
15 |
+
"zarr_format": 2
|
16 |
+
}
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/12.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/14.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/25.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/27.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/29.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/30.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/37.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/44.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/45.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/47.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/55.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/58.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/63.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/66.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/74.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/79.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/85.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/89.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/93.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_weight/95.0
ADDED
Binary file (36.9 kB). View file
|
|
model_weights/model.embedding.word_embeddings.weight/.zarray
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chunks": [
|
3 |
+
32000,
|
4 |
+
18432
|
5 |
+
],
|
6 |
+
"compressor": null,
|
7 |
+
"dtype": "bfloat16",
|
8 |
+
"fill_value": null,
|
9 |
+
"filters": null,
|
10 |
+
"order": "C",
|
11 |
+
"shape": [
|
12 |
+
256000,
|
13 |
+
18432
|
14 |
+
],
|
15 |
+
"zarr_format": 2
|
16 |
+
}
|