Fill-Mask
Transformers
PyTorch
Safetensors
English
perceiver
nielsr HF staff commited on
Commit
1d65fc8
1 Parent(s): 260b471

Update config

Browse files
Files changed (1) hide show
  1. config.json +7 -3
config.json CHANGED
@@ -9,10 +9,10 @@
9
  "d_latents": 1280,
10
  "d_model": 768,
11
  "hidden_act": "gelu",
12
- "hidden_dropout_prob": 0.1,
13
  "image_size": 56,
14
  "initializer_range": 0.02,
15
  "layer_norm_eps": 1e-12,
 
16
  "model_type": "perceiver",
17
  "num_blocks": 1,
18
  "num_cross_attention_heads": 8,
@@ -20,11 +20,15 @@
20
  "num_latents": 256,
21
  "num_self_attends_per_block": 26,
22
  "num_self_attention_heads": 8,
23
- "position_embedding_init_scale": 0.02,
 
 
 
 
 
24
  "qk_channels": 256,
25
  "samples_per_patch": 16,
26
  "self_attention_widening_factor": 1,
27
- "seq_len": 2048,
28
  "torch_dtype": "float32",
29
  "train_size": [
30
  368,
 
9
  "d_latents": 1280,
10
  "d_model": 768,
11
  "hidden_act": "gelu",
 
12
  "image_size": 56,
13
  "initializer_range": 0.02,
14
  "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 2048,
16
  "model_type": "perceiver",
17
  "num_blocks": 1,
18
  "num_cross_attention_heads": 8,
 
20
  "num_latents": 256,
21
  "num_self_attends_per_block": 26,
22
  "num_self_attention_heads": 8,
23
+ "output_shape": [
24
+ 1,
25
+ 16,
26
+ 224,
27
+ 224
28
+ ],
29
  "qk_channels": 256,
30
  "samples_per_patch": 16,
31
  "self_attention_widening_factor": 1,
 
32
  "torch_dtype": "float32",
33
  "train_size": [
34
  368,