Transformers
PyTorch
English
bridgetower
gaudi
Inference Endpoints
shaoyent commited on
Commit
5c296b7
1 Parent(s): beac4dd
Files changed (5) hide show
  1. config.json +55 -0
  2. preprocessor_config.json +6 -0
  3. pytorch_model.bin +3 -0
  4. tokenizer.json +0 -0
  5. vocab.json +0 -0
config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "share_cross_modal_transformer_layers": true,
3
+ "drop_rate": 0.1,
4
+ "head_hidden_scale": 2,
5
+ "hidden_act": "gelu",
6
+ "hidden_size": 1024,
7
+ "initializer_factor": 1,
8
+ "is_encoder_decoder": false,
9
+ "layer_norm_eps": 1e-05,
10
+ "share_link_tower_layers": false,
11
+ "link_tower_type": "add",
12
+ "num_attention_heads": 16,
13
+ "num_hidden_layers": 6,
14
+ "tie_word_embeddings": false,
15
+ "text_config_dict": null,
16
+ "init_layernorm_from_vision_encoder": false,
17
+ "text_config": {
18
+ "architectures": [
19
+ "BridgeTowerTextModel"
20
+ ],
21
+ "vocab_size": 50265,
22
+ "hidden_size": 1024,
23
+ "num_hidden_layers": 24,
24
+ "num_attention_heads": 16,
25
+ "intermediate_size": 4096,
26
+ "hidden_act": "gelu",
27
+ "hidden_dropout_prob": 0.1,
28
+ "attention_probs_dropout_prob": 0.1,
29
+ "max_position_embeddings": 514,
30
+ "type_vocab_size": 1,
31
+ "initializer_factor": 1,
32
+ "initializer_range": 0.02,
33
+ "layer_norm_eps": 1e-05,
34
+ "pad_token_id": 1,
35
+ "bos_token_id": 0,
36
+ "eos_token_id": 2,
37
+ "position_embedding_type": "absolute",
38
+ "use_cache": true,
39
+ "classifier_dropout": null
40
+ },
41
+ "vision_config_dict": null,
42
+ "vision_config": {
43
+ "architectures": [
44
+ "BridgeTowerVisionModel"
45
+ ],
46
+ "hidden_size": 1024,
47
+ "num_hidden_layers": 24,
48
+ "patch_size": 14,
49
+ "image_size": 294,
50
+ "initializer_factor": 1,
51
+ "stop_gradient": false,
52
+ "share_layernorm": true,
53
+ "remove_last_layer": false
54
+ }
55
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "max_text_len":50,
3
+ "size":294,
4
+ "tokenizer":"roberta-large",
5
+ "vocab_size":50265
6
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71db1ed29917be01ea08c8de6fb586e0344df0fed3188ff7c19082c99f384d33
3
+ size 3668732213
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff