tarekziade commited on
Commit
6fafb82
1 Parent(s): 3083a3c

Model save

Browse files
README.md CHANGED
@@ -1,71 +1,66 @@
1
- ---
2
- tags:
3
- - image-to-text
4
- - image-captioning
5
- license: apache-2.0
6
- metrics:
7
- - rouge
8
- datasets:
9
- - nlphuji/flickr30k
10
- widget:
11
- - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg
12
- example_title: Savanna
13
- - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg
14
- example_title: Football Match
15
- - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg
16
- example_title: Airport
17
- base_model:
18
- - google/vit-base-patch16-224-in21k
19
-
20
- model-index:
21
- - name: mozilla/distilvit
22
- results:
23
- - task:
24
- type: image-to-text
25
- name: Image To Text
26
- dataset:
27
- name: nlphuji/flickr30k
28
- type: nlphuji/flickr30k
29
- metrics:
30
- - name: ROUGE-1
31
- type: rouge
32
- value: 43.006
33
- verified: true
34
- - name: ROUGE-2
35
- type: rouge
36
- value: 16.9939
37
- verified: true
38
- - name: ROUGE-L
39
- type: rouge
40
- value: 38.8923
41
- verified: true
42
- - name: ROUGE-LSUM
43
- type: rouge
44
- value: 38.8877
45
- verified: true
46
- - name: loss
47
- type: loss
48
- value: 0.19939416646957397
49
- - name: gen_len
50
- type: gen_len
51
- value: 11.327256736227712
52
- verified: true
53
- ---
54
-
55
- This model is a work in progress.
56
-
57
- Fine-tuned version of those base models:
58
-
59
- - a VIT model for the image encoder: https://huggingface.co/google/vit-base-patch16-224-in21k
60
- - a Distilled GPT-2 model for the text decoder: https://huggingface.co/distilbert/distilgpt2
61
-
62
- This model was trained on:
63
-
64
- - Flickr30k : https://huggingface.co/datasets/nlphuji/flickr30k
65
- - COCO 2017: https://cocodataset.org
66
-
67
- You can find the code used to create the model here: https://github.com/mozilla/distilvit
68
-
69
-
70
-
71
-
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mozilla/distilvit
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - rouge
8
+ model-index:
9
+ - name: distilvit
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # distilvit
17
+
18
+ This model is a fine-tuned version of [mozilla/distilvit](https://huggingface.co/mozilla/distilvit) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Gen Len: 10.6487
21
+ - Loss: 0.1739
22
+ - Meteor: 0.4120
23
+ - Rouge1: 50.0916
24
+ - Rouge2: 24.7223
25
+ - Rougel: 46.9416
26
+ - Rougelsum: 46.9372
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-05
46
+ - train_batch_size: 100
47
+ - eval_batch_size: 100
48
+ - seed: 42
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: linear
51
+ - num_epochs: 1
52
+
53
+ ### Training results
54
+
55
+ | Training Loss | Epoch | Step | Gen Len | Validation Loss | Meteor | Rouge1 | Rouge2 | Rougel | Rougelsum |
56
+ |:-------------:|:------:|:----:|:-------:|:---------------:|:------:|:-------:|:-------:|:-------:|:---------:|
57
+ | No log | 0.3891 | 100 | 10.4163 | 0.1764 | 0.4117 | 50.0198 | 24.6331 | 46.9071 | 46.8907 |
58
+ | No log | 0.7782 | 200 | 10.6487 | 0.1739 | 0.4120 | 50.0916 | 24.7223 | 46.9416 | 46.9372 |
59
+
60
+
61
+ ### Framework versions
62
+
63
+ - Transformers 4.40.2
64
+ - Pytorch 2.3.0+cu121
65
+ - Datasets 2.19.1
66
+ - Tokenizers 0.19.1
 
 
 
 
 
config.json CHANGED
@@ -1,183 +1,184 @@
1
- {
2
- "_name_or_path": "distilvit-flickr",
3
- "architectures": [
4
- "VisionEncoderDecoderModel"
5
- ],
6
- "decoder": {
7
- "_name_or_path": "distilbert/distilgpt2",
8
- "_num_labels": 1,
9
- "activation_function": "gelu_new",
10
- "add_cross_attention": true,
11
- "architectures": [
12
- "GPT2LMHeadModel"
13
- ],
14
- "attn_pdrop": 0.1,
15
- "bad_words_ids": null,
16
- "begin_suppress_tokens": null,
17
- "bos_token_id": 50256,
18
- "chunk_size_feed_forward": 0,
19
- "cross_attention_hidden_size": null,
20
- "decoder_start_token_id": null,
21
- "diversity_penalty": 0.0,
22
- "do_sample": false,
23
- "early_stopping": false,
24
- "embd_pdrop": 0.1,
25
- "encoder_no_repeat_ngram_size": 0,
26
- "eos_token_id": 50256,
27
- "exponential_decay_length_penalty": null,
28
- "finetuning_task": null,
29
- "forced_bos_token_id": null,
30
- "forced_eos_token_id": null,
31
- "id2label": {
32
- "0": "LABEL_0"
33
- },
34
- "initializer_range": 0.02,
35
- "is_decoder": true,
36
- "is_encoder_decoder": false,
37
- "label2id": {
38
- "LABEL_0": 0
39
- },
40
- "layer_norm_epsilon": 1e-05,
41
- "length_penalty": 1.0,
42
- "max_length": 20,
43
- "min_length": 0,
44
- "model_type": "gpt2",
45
- "n_ctx": 1024,
46
- "n_embd": 768,
47
- "n_head": 12,
48
- "n_inner": null,
49
- "n_layer": 6,
50
- "n_positions": 1024,
51
- "no_repeat_ngram_size": 0,
52
- "num_beam_groups": 1,
53
- "num_beams": 1,
54
- "num_return_sequences": 1,
55
- "output_attentions": false,
56
- "output_hidden_states": false,
57
- "output_scores": false,
58
- "pad_token_id": null,
59
- "prefix": null,
60
- "problem_type": null,
61
- "pruned_heads": {},
62
- "remove_invalid_values": false,
63
- "reorder_and_upcast_attn": false,
64
- "repetition_penalty": 1.0,
65
- "resid_pdrop": 0.1,
66
- "return_dict": true,
67
- "return_dict_in_generate": false,
68
- "scale_attn_by_inverse_layer_idx": false,
69
- "scale_attn_weights": true,
70
- "sep_token_id": null,
71
- "summary_activation": null,
72
- "summary_first_dropout": 0.1,
73
- "summary_proj_to_labels": true,
74
- "summary_type": "cls_index",
75
- "summary_use_proj": true,
76
- "suppress_tokens": null,
77
- "task_specific_params": {
78
- "text-generation": {
79
- "do_sample": true,
80
- "max_length": 50
81
- }
82
- },
83
- "temperature": 1.0,
84
- "tf_legacy_loss": false,
85
- "tie_encoder_decoder": false,
86
- "tie_word_embeddings": true,
87
- "tokenizer_class": null,
88
- "top_k": 50,
89
- "top_p": 1.0,
90
- "torch_dtype": null,
91
- "torchscript": false,
92
- "typical_p": 1.0,
93
- "use_bfloat16": false,
94
- "use_cache": true,
95
- "vocab_size": 50257
96
- },
97
- "decoder_start_token_id": 50256,
98
- "encoder": {
99
- "_name_or_path": "google/vit-base-patch16-224-in21k",
100
- "add_cross_attention": false,
101
- "architectures": [
102
- "ViTModel"
103
- ],
104
- "attention_probs_dropout_prob": 0.0,
105
- "bad_words_ids": null,
106
- "begin_suppress_tokens": null,
107
- "bos_token_id": null,
108
- "chunk_size_feed_forward": 0,
109
- "cross_attention_hidden_size": null,
110
- "decoder_start_token_id": null,
111
- "diversity_penalty": 0.0,
112
- "do_sample": false,
113
- "early_stopping": false,
114
- "encoder_no_repeat_ngram_size": 0,
115
- "encoder_stride": 16,
116
- "eos_token_id": null,
117
- "exponential_decay_length_penalty": null,
118
- "finetuning_task": null,
119
- "forced_bos_token_id": null,
120
- "forced_eos_token_id": null,
121
- "hidden_act": "gelu",
122
- "hidden_dropout_prob": 0.0,
123
- "hidden_size": 768,
124
- "id2label": {
125
- "0": "LABEL_0",
126
- "1": "LABEL_1"
127
- },
128
- "image_size": 224,
129
- "initializer_range": 0.02,
130
- "intermediate_size": 3072,
131
- "is_decoder": false,
132
- "is_encoder_decoder": false,
133
- "label2id": {
134
- "LABEL_0": 0,
135
- "LABEL_1": 1
136
- },
137
- "layer_norm_eps": 1e-12,
138
- "length_penalty": 1.0,
139
- "max_length": 20,
140
- "min_length": 0,
141
- "model_type": "vit",
142
- "no_repeat_ngram_size": 0,
143
- "num_attention_heads": 12,
144
- "num_beam_groups": 1,
145
- "num_beams": 1,
146
- "num_channels": 3,
147
- "num_hidden_layers": 12,
148
- "num_return_sequences": 1,
149
- "output_attentions": false,
150
- "output_hidden_states": false,
151
- "output_scores": false,
152
- "pad_token_id": null,
153
- "patch_size": 16,
154
- "prefix": null,
155
- "problem_type": null,
156
- "pruned_heads": {},
157
- "qkv_bias": true,
158
- "remove_invalid_values": false,
159
- "repetition_penalty": 1.0,
160
- "return_dict": true,
161
- "return_dict_in_generate": false,
162
- "sep_token_id": null,
163
- "suppress_tokens": null,
164
- "task_specific_params": null,
165
- "temperature": 1.0,
166
- "tf_legacy_loss": false,
167
- "tie_encoder_decoder": false,
168
- "tie_word_embeddings": true,
169
- "tokenizer_class": null,
170
- "top_k": 50,
171
- "top_p": 1.0,
172
- "torch_dtype": null,
173
- "torchscript": false,
174
- "typical_p": 1.0,
175
- "use_bfloat16": false
176
- },
177
- "eos_token_id": 50256,
178
- "is_encoder_decoder": true,
179
- "model_type": "vision-encoder-decoder",
180
- "pad_token_id": 50256,
181
- "tie_word_embeddings": false,
182
- "transformers_version": "4.36.2"
183
- }
 
 
1
+ {
2
+ "_name_or_path": "mozilla/distilvit",
3
+ "architectures": [
4
+ "VisionEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "distilbert/distilgpt2",
8
+ "_num_labels": 1,
9
+ "activation_function": "gelu_new",
10
+ "add_cross_attention": true,
11
+ "architectures": [
12
+ "GPT2LMHeadModel"
13
+ ],
14
+ "attn_pdrop": 0.1,
15
+ "bad_words_ids": null,
16
+ "begin_suppress_tokens": null,
17
+ "bos_token_id": 50256,
18
+ "chunk_size_feed_forward": 0,
19
+ "cross_attention_hidden_size": null,
20
+ "decoder_start_token_id": null,
21
+ "diversity_penalty": 0.0,
22
+ "do_sample": false,
23
+ "early_stopping": false,
24
+ "embd_pdrop": 0.1,
25
+ "encoder_no_repeat_ngram_size": 0,
26
+ "eos_token_id": 50256,
27
+ "exponential_decay_length_penalty": null,
28
+ "finetuning_task": null,
29
+ "forced_bos_token_id": null,
30
+ "forced_eos_token_id": null,
31
+ "id2label": {
32
+ "0": "LABEL_0"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "is_decoder": true,
36
+ "is_encoder_decoder": false,
37
+ "label2id": {
38
+ "LABEL_0": 0
39
+ },
40
+ "layer_norm_epsilon": 1e-05,
41
+ "length_penalty": 1.0,
42
+ "max_length": 20,
43
+ "min_length": 0,
44
+ "model_type": "gpt2",
45
+ "n_ctx": 1024,
46
+ "n_embd": 768,
47
+ "n_head": 12,
48
+ "n_inner": null,
49
+ "n_layer": 6,
50
+ "n_positions": 1024,
51
+ "no_repeat_ngram_size": 0,
52
+ "num_beam_groups": 1,
53
+ "num_beams": 1,
54
+ "num_return_sequences": 1,
55
+ "output_attentions": false,
56
+ "output_hidden_states": false,
57
+ "output_scores": false,
58
+ "pad_token_id": null,
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "reorder_and_upcast_attn": false,
64
+ "repetition_penalty": 1.0,
65
+ "resid_pdrop": 0.1,
66
+ "return_dict": true,
67
+ "return_dict_in_generate": false,
68
+ "scale_attn_by_inverse_layer_idx": false,
69
+ "scale_attn_weights": true,
70
+ "sep_token_id": null,
71
+ "summary_activation": null,
72
+ "summary_first_dropout": 0.1,
73
+ "summary_proj_to_labels": true,
74
+ "summary_type": "cls_index",
75
+ "summary_use_proj": true,
76
+ "suppress_tokens": null,
77
+ "task_specific_params": {
78
+ "text-generation": {
79
+ "do_sample": true,
80
+ "max_length": 50
81
+ }
82
+ },
83
+ "temperature": 1.0,
84
+ "tf_legacy_loss": false,
85
+ "tie_encoder_decoder": false,
86
+ "tie_word_embeddings": true,
87
+ "tokenizer_class": null,
88
+ "top_k": 50,
89
+ "top_p": 1.0,
90
+ "torch_dtype": null,
91
+ "torchscript": false,
92
+ "typical_p": 1.0,
93
+ "use_bfloat16": false,
94
+ "use_cache": true,
95
+ "vocab_size": 50257
96
+ },
97
+ "decoder_start_token_id": 50256,
98
+ "encoder": {
99
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
100
+ "add_cross_attention": false,
101
+ "architectures": [
102
+ "ViTModel"
103
+ ],
104
+ "attention_probs_dropout_prob": 0.0,
105
+ "bad_words_ids": null,
106
+ "begin_suppress_tokens": null,
107
+ "bos_token_id": null,
108
+ "chunk_size_feed_forward": 0,
109
+ "cross_attention_hidden_size": null,
110
+ "decoder_start_token_id": null,
111
+ "diversity_penalty": 0.0,
112
+ "do_sample": false,
113
+ "early_stopping": false,
114
+ "encoder_no_repeat_ngram_size": 0,
115
+ "encoder_stride": 16,
116
+ "eos_token_id": null,
117
+ "exponential_decay_length_penalty": null,
118
+ "finetuning_task": null,
119
+ "forced_bos_token_id": null,
120
+ "forced_eos_token_id": null,
121
+ "hidden_act": "gelu",
122
+ "hidden_dropout_prob": 0.0,
123
+ "hidden_size": 768,
124
+ "id2label": {
125
+ "0": "LABEL_0",
126
+ "1": "LABEL_1"
127
+ },
128
+ "image_size": 224,
129
+ "initializer_range": 0.02,
130
+ "intermediate_size": 3072,
131
+ "is_decoder": false,
132
+ "is_encoder_decoder": false,
133
+ "label2id": {
134
+ "LABEL_0": 0,
135
+ "LABEL_1": 1
136
+ },
137
+ "layer_norm_eps": 1e-12,
138
+ "length_penalty": 1.0,
139
+ "max_length": 20,
140
+ "min_length": 0,
141
+ "model_type": "vit",
142
+ "no_repeat_ngram_size": 0,
143
+ "num_attention_heads": 12,
144
+ "num_beam_groups": 1,
145
+ "num_beams": 1,
146
+ "num_channels": 3,
147
+ "num_hidden_layers": 12,
148
+ "num_return_sequences": 1,
149
+ "output_attentions": false,
150
+ "output_hidden_states": false,
151
+ "output_scores": false,
152
+ "pad_token_id": null,
153
+ "patch_size": 16,
154
+ "prefix": null,
155
+ "problem_type": null,
156
+ "pruned_heads": {},
157
+ "qkv_bias": true,
158
+ "remove_invalid_values": false,
159
+ "repetition_penalty": 1.0,
160
+ "return_dict": true,
161
+ "return_dict_in_generate": false,
162
+ "sep_token_id": null,
163
+ "suppress_tokens": null,
164
+ "task_specific_params": null,
165
+ "temperature": 1.0,
166
+ "tf_legacy_loss": false,
167
+ "tie_encoder_decoder": false,
168
+ "tie_word_embeddings": true,
169
+ "tokenizer_class": null,
170
+ "top_k": 50,
171
+ "top_p": 1.0,
172
+ "torch_dtype": null,
173
+ "torchscript": false,
174
+ "typical_p": 1.0,
175
+ "use_bfloat16": false
176
+ },
177
+ "eos_token_id": 50256,
178
+ "is_encoder_decoder": true,
179
+ "model_type": "vision-encoder-decoder",
180
+ "pad_token_id": 50256,
181
+ "tie_word_embeddings": false,
182
+ "torch_dtype": "float32",
183
+ "transformers_version": "4.40.2"
184
+ }
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
- {
2
- "bos_token_id": 50256,
3
- "eos_token_id": 50256,
4
- "transformers_version": "4.36.2"
5
- }
 
1
+ {
2
+ "bos_token_id": 50256,
3
+ "eos_token_id": 50256,
4
+ "transformers_version": "4.40.2"
5
+ }
metrics.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ {'eval_loss': 0.17637203633785248, 'eval_rouge1': 50.0198, 'eval_rouge2': 24.6331, 'eval_rougeL': 46.9071, 'eval_rougeLsum': 46.8907, 'eval_meteor': 0.4116637061220643, 'eval_gen_len': 10.41630231105559, 'eval_runtime': 233.9623, 'eval_samples_per_second': 13.686, 'eval_steps_per_second': 0.141, 'epoch': 0.38910505836575876}
2
+ {'eval_loss': 0.17389048635959625, 'eval_rouge1': 50.0916, 'eval_rouge2': 24.7223, 'eval_rougeL': 46.9416, 'eval_rougeLsum': 46.9372, 'eval_meteor': 0.41204454830693554, 'eval_gen_len': 10.648657089319176, 'eval_runtime': 232.7742, 'eval_samples_per_second': 13.756, 'eval_steps_per_second': 0.142, 'epoch': 0.7782101167315175}
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfbf6a06c87f2ecf5debe4ca7a967db958ef5ef91b9e95d0968db292b29bd044
3
  size 729979160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb10c7e6caaf59d39c22b873daa806f504ea670c4757dbd67a8b5e3be97be6b3
3
  size 729979160
preprocessor_config.json CHANGED
@@ -1,22 +1,36 @@
1
- {
2
- "do_normalize": true,
3
- "do_rescale": true,
4
- "do_resize": true,
5
- "image_mean": [
6
- 0.5,
7
- 0.5,
8
- 0.5
9
- ],
10
- "image_processor_type": "ViTFeatureExtractor",
11
- "image_std": [
12
- 0.5,
13
- 0.5,
14
- 0.5
15
- ],
16
- "resample": 2,
17
- "rescale_factor": 0.00392156862745098,
18
- "size": {
19
- "height": 224,
20
- "width": 224
21
- }
22
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTFeatureExtractor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd3daca9a2e8e9dd775d2cd9f9a0e33ee454cf4e9d1bb12d250711d49ddadcfd
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d6dbda62eac800cee527380ece0a33f7df91a801dc0529c3c35871cb1123276
3
+ size 5176