mike157 commited on
Commit
55c393a
1 Parent(s): 3132c7e

Initial commit

Browse files
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - stackexchange_titlebody_best_voted_answer_jsonl
7
+ metrics:
8
+ - rouge
9
+ model-index:
10
+ - name: flan-t5-base-flant5-apple-support
11
+ results:
12
+ - task:
13
+ name: Sequence-to-sequence Language Modeling
14
+ type: text2text-generation
15
+ dataset:
16
+ name: stackexchange_titlebody_best_voted_answer_jsonl
17
+ type: stackexchange_titlebody_best_voted_answer_jsonl
18
+ config: apple
19
+ split: train[:10%]
20
+ args: apple
21
+ metrics:
22
+ - name: Rouge1
23
+ type: rouge
24
+ value: 12.7991
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # flan-t5-base-flant5-apple-support
31
+
32
+ This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the stackexchange_titlebody_best_voted_answer_jsonl dataset.
33
+ It achieves the following results on the evaluation set:
34
+ - Loss: 2.9676
35
+ - Rouge1: 12.7991
36
+ - Rouge2: 2.244
37
+ - Rougel: 9.8075
38
+ - Rougelsum: 11.3618
39
+ - Gen Len: 18.9087
40
+
41
+ ## Model description
42
+
43
+ More information needed
44
+
45
+ ## Intended uses & limitations
46
+
47
+ More information needed
48
+
49
+ ## Training and evaluation data
50
+
51
+ More information needed
52
+
53
+ ## Training procedure
54
+
55
+ ### Training hyperparameters
56
+
57
+ The following hyperparameters were used during training:
58
+ - learning_rate: 5e-05
59
+ - train_batch_size: 8
60
+ - eval_batch_size: 8
61
+ - seed: 42
62
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
63
+ - lr_scheduler_type: linear
64
+ - num_epochs: 5
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
69
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:------:|:---------:|:-------:|
70
+ | 3.2673 | 1.0 | 1157 | 3.0350 | 12.4094 | 2.1794 | 9.5255 | 10.9739 | 18.9723 |
71
+ | 3.1854 | 2.0 | 2314 | 2.9992 | 12.4579 | 2.1512 | 9.5232 | 11.0049 | 18.9647 |
72
+ | 3.1006 | 3.0 | 3471 | 2.9792 | 12.9794 | 2.2794 | 9.9245 | 11.5019 | 18.9436 |
73
+ | 3.0751 | 4.0 | 4628 | 2.9711 | 12.6779 | 2.1828 | 9.6962 | 11.221 | 18.9137 |
74
+ | 3.0532 | 5.0 | 5785 | 2.9676 | 12.7991 | 2.244 | 9.8075 | 11.3618 | 18.9087 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - Transformers 4.25.1
80
+ - Pytorch 1.13.1+cu117
81
+ - Datasets 2.8.0
82
+ - Tokenizers 0.13.2
checkpoint-4628/config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.25.1",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
checkpoint-4628/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba6efbea1cc4e2a52a4400842c8902ece53a429d9946aab214b6a8293e5c79d2
3
+ size 1980790149
checkpoint-4628/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01b0fd3cf40fbe1c1ef54548a1ea439eb0d9fe8132569e85d76352f14795ea27
3
+ size 990408885
checkpoint-4628/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:955ee0bfe6a04dee79ba68207a08001a87c23be240e4119d895aeba2919b4ca7
3
+ size 14575
checkpoint-4628/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f911251a52e1b1400cce870f1603d8d9c0a0d8654830e32c7ee2bf795f8b668
3
+ size 627
checkpoint-4628/trainer_state.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.971137762069702,
3
+ "best_model_checkpoint": "flan-t5-base-flant5-apple-support/checkpoint-4628",
4
+ "epoch": 4.0,
5
+ "global_step": 4628,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.43,
12
+ "learning_rate": 4.567847882454624e-05,
13
+ "loss": 3.3318,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.86,
18
+ "learning_rate": 4.135695764909248e-05,
19
+ "loss": 3.2673,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 1.0,
24
+ "eval_gen_len": 18.972318339100347,
25
+ "eval_loss": 3.0350406169891357,
26
+ "eval_rouge1": 12.4094,
27
+ "eval_rouge2": 2.1794,
28
+ "eval_rougeL": 9.5255,
29
+ "eval_rougeLsum": 10.9739,
30
+ "eval_runtime": 319.5992,
31
+ "eval_samples_per_second": 14.468,
32
+ "eval_steps_per_second": 1.809,
33
+ "step": 1157
34
+ },
35
+ {
36
+ "epoch": 1.3,
37
+ "learning_rate": 3.7035436473638726e-05,
38
+ "loss": 3.2145,
39
+ "step": 1500
40
+ },
41
+ {
42
+ "epoch": 1.73,
43
+ "learning_rate": 3.271391529818496e-05,
44
+ "loss": 3.1854,
45
+ "step": 2000
46
+ },
47
+ {
48
+ "epoch": 2.0,
49
+ "eval_gen_len": 18.964749134948097,
50
+ "eval_loss": 2.9991722106933594,
51
+ "eval_rouge1": 12.4579,
52
+ "eval_rouge2": 2.1512,
53
+ "eval_rougeL": 9.5232,
54
+ "eval_rougeLsum": 11.0049,
55
+ "eval_runtime": 319.123,
56
+ "eval_samples_per_second": 14.49,
57
+ "eval_steps_per_second": 1.811,
58
+ "step": 2314
59
+ },
60
+ {
61
+ "epoch": 2.16,
62
+ "learning_rate": 2.8392394122731204e-05,
63
+ "loss": 3.1388,
64
+ "step": 2500
65
+ },
66
+ {
67
+ "epoch": 2.59,
68
+ "learning_rate": 2.4070872947277444e-05,
69
+ "loss": 3.1006,
70
+ "step": 3000
71
+ },
72
+ {
73
+ "epoch": 3.0,
74
+ "eval_gen_len": 18.9435553633218,
75
+ "eval_loss": 2.9792306423187256,
76
+ "eval_rouge1": 12.9794,
77
+ "eval_rouge2": 2.2794,
78
+ "eval_rougeL": 9.9245,
79
+ "eval_rougeLsum": 11.5019,
80
+ "eval_runtime": 317.3217,
81
+ "eval_samples_per_second": 14.572,
82
+ "eval_steps_per_second": 1.821,
83
+ "step": 3471
84
+ },
85
+ {
86
+ "epoch": 3.03,
87
+ "learning_rate": 1.9749351771823683e-05,
88
+ "loss": 3.1209,
89
+ "step": 3500
90
+ },
91
+ {
92
+ "epoch": 3.46,
93
+ "learning_rate": 1.5427830596369925e-05,
94
+ "loss": 3.0729,
95
+ "step": 4000
96
+ },
97
+ {
98
+ "epoch": 3.89,
99
+ "learning_rate": 1.1106309420916162e-05,
100
+ "loss": 3.0751,
101
+ "step": 4500
102
+ },
103
+ {
104
+ "epoch": 4.0,
105
+ "eval_gen_len": 18.91371107266436,
106
+ "eval_loss": 2.971137762069702,
107
+ "eval_rouge1": 12.6779,
108
+ "eval_rouge2": 2.1828,
109
+ "eval_rougeL": 9.6962,
110
+ "eval_rougeLsum": 11.221,
111
+ "eval_runtime": 320.2879,
112
+ "eval_samples_per_second": 14.437,
113
+ "eval_steps_per_second": 1.805,
114
+ "step": 4628
115
+ }
116
+ ],
117
+ "max_steps": 5785,
118
+ "num_train_epochs": 5,
119
+ "total_flos": 2.533328301116621e+16,
120
+ "trial_name": null,
121
+ "trial_params": null
122
+ }
checkpoint-4628/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1095378c2c7db3c0c46c33986d0a4a6429dd87fcac7b6a289b7bb86cacd1f51b
3
+ size 3579
checkpoint-5785/config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.25.1",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
checkpoint-5785/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b32b70900bec2518a12ae77e7901cce9ab80babbb93a3480294e3547d2f5cd
3
+ size 1980790149
checkpoint-5785/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72a33370e685330c539eed67a76d44b89b501ff3993051c75af9cbe463f04141
3
+ size 990408885
checkpoint-5785/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68cb5f9002319a3a5765f2485fa622072931bef72ce0e0d8671a3e2ebc2d6b37
3
+ size 14575
checkpoint-5785/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad4c035a57b447214e0a9d68e2c34ae9bd233103f8203cd632a545abed7363e9
3
+ size 627
checkpoint-5785/trainer_state.json ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.967562437057495,
3
+ "best_model_checkpoint": "flan-t5-base-flant5-apple-support/checkpoint-5785",
4
+ "epoch": 5.0,
5
+ "global_step": 5785,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.43,
12
+ "learning_rate": 4.567847882454624e-05,
13
+ "loss": 3.3318,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.86,
18
+ "learning_rate": 4.135695764909248e-05,
19
+ "loss": 3.2673,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 1.0,
24
+ "eval_gen_len": 18.972318339100347,
25
+ "eval_loss": 3.0350406169891357,
26
+ "eval_rouge1": 12.4094,
27
+ "eval_rouge2": 2.1794,
28
+ "eval_rougeL": 9.5255,
29
+ "eval_rougeLsum": 10.9739,
30
+ "eval_runtime": 319.5992,
31
+ "eval_samples_per_second": 14.468,
32
+ "eval_steps_per_second": 1.809,
33
+ "step": 1157
34
+ },
35
+ {
36
+ "epoch": 1.3,
37
+ "learning_rate": 3.7035436473638726e-05,
38
+ "loss": 3.2145,
39
+ "step": 1500
40
+ },
41
+ {
42
+ "epoch": 1.73,
43
+ "learning_rate": 3.271391529818496e-05,
44
+ "loss": 3.1854,
45
+ "step": 2000
46
+ },
47
+ {
48
+ "epoch": 2.0,
49
+ "eval_gen_len": 18.964749134948097,
50
+ "eval_loss": 2.9991722106933594,
51
+ "eval_rouge1": 12.4579,
52
+ "eval_rouge2": 2.1512,
53
+ "eval_rougeL": 9.5232,
54
+ "eval_rougeLsum": 11.0049,
55
+ "eval_runtime": 319.123,
56
+ "eval_samples_per_second": 14.49,
57
+ "eval_steps_per_second": 1.811,
58
+ "step": 2314
59
+ },
60
+ {
61
+ "epoch": 2.16,
62
+ "learning_rate": 2.8392394122731204e-05,
63
+ "loss": 3.1388,
64
+ "step": 2500
65
+ },
66
+ {
67
+ "epoch": 2.59,
68
+ "learning_rate": 2.4070872947277444e-05,
69
+ "loss": 3.1006,
70
+ "step": 3000
71
+ },
72
+ {
73
+ "epoch": 3.0,
74
+ "eval_gen_len": 18.9435553633218,
75
+ "eval_loss": 2.9792306423187256,
76
+ "eval_rouge1": 12.9794,
77
+ "eval_rouge2": 2.2794,
78
+ "eval_rougeL": 9.9245,
79
+ "eval_rougeLsum": 11.5019,
80
+ "eval_runtime": 317.3217,
81
+ "eval_samples_per_second": 14.572,
82
+ "eval_steps_per_second": 1.821,
83
+ "step": 3471
84
+ },
85
+ {
86
+ "epoch": 3.03,
87
+ "learning_rate": 1.9749351771823683e-05,
88
+ "loss": 3.1209,
89
+ "step": 3500
90
+ },
91
+ {
92
+ "epoch": 3.46,
93
+ "learning_rate": 1.5427830596369925e-05,
94
+ "loss": 3.0729,
95
+ "step": 4000
96
+ },
97
+ {
98
+ "epoch": 3.89,
99
+ "learning_rate": 1.1106309420916162e-05,
100
+ "loss": 3.0751,
101
+ "step": 4500
102
+ },
103
+ {
104
+ "epoch": 4.0,
105
+ "eval_gen_len": 18.91371107266436,
106
+ "eval_loss": 2.971137762069702,
107
+ "eval_rouge1": 12.6779,
108
+ "eval_rouge2": 2.1828,
109
+ "eval_rougeL": 9.6962,
110
+ "eval_rougeLsum": 11.221,
111
+ "eval_runtime": 320.2879,
112
+ "eval_samples_per_second": 14.437,
113
+ "eval_steps_per_second": 1.805,
114
+ "step": 4628
115
+ },
116
+ {
117
+ "epoch": 4.32,
118
+ "learning_rate": 6.784788245462403e-06,
119
+ "loss": 3.0641,
120
+ "step": 5000
121
+ },
122
+ {
123
+ "epoch": 4.75,
124
+ "learning_rate": 2.4632670700086435e-06,
125
+ "loss": 3.0532,
126
+ "step": 5500
127
+ },
128
+ {
129
+ "epoch": 5.0,
130
+ "eval_gen_len": 18.908737024221452,
131
+ "eval_loss": 2.967562437057495,
132
+ "eval_rouge1": 12.7991,
133
+ "eval_rouge2": 2.244,
134
+ "eval_rougeL": 9.8075,
135
+ "eval_rougeLsum": 11.3618,
136
+ "eval_runtime": 318.8859,
137
+ "eval_samples_per_second": 14.5,
138
+ "eval_steps_per_second": 1.813,
139
+ "step": 5785
140
+ }
141
+ ],
142
+ "max_steps": 5785,
143
+ "num_train_epochs": 5,
144
+ "total_flos": 3.166660376395776e+16,
145
+ "trial_name": null,
146
+ "trial_params": null
147
+ }
checkpoint-5785/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1095378c2c7db3c0c46c33986d0a4a6429dd87fcac7b6a289b7bb86cacd1f51b
3
+ size 3579
logs/1673395208.2439482/events.out.tfevents.1673395208.150-136-90-253.68652.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86dfff20b60f34f7e8f93ac0e9489054ea07479cab3744788c84370ccf4e6a90
3
+ size 5790
logs/1673443771.419182/events.out.tfevents.1673443771.150-136-90-253.70371.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e97fab284b8eddf54fa571d0d91a38899d7da6dd0b23e8d9d76b044d7d44b751
3
+ size 5790
logs/events.out.tfevents.1673395208.150-136-90-253.68652.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c299afa661465f8093b7e4162ccb54d721a3afdbdad49e9b24bd07b60d59b361
3
+ size 9675
logs/events.out.tfevents.1673443771.150-136-90-253.70371.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adefc99b38d3c35eb06388fb05730ca5d0a9e1594dab417f63f3c29b2f2019c7
3
+ size 9460
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "extra_ids": 100,
106
+ "model_max_length": 512,
107
+ "name_or_path": "google/flan-t5-base",
108
+ "pad_token": "<pad>",
109
+ "sp_model_kwargs": {},
110
+ "special_tokens_map_file": "/home/younes_huggingface_co/.cache/huggingface/hub/models--google--t5-v1_1-base/snapshots/650d7745bf1e502d6949b22cc19155cd656d3d4e/special_tokens_map.json",
111
+ "tokenizer_class": "T5Tokenizer",
112
+ "unk_token": "<unk>"
113
+ }