File size: 2,359 Bytes
a4fb3d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# Path to pretrained model or model identifier from huggingface.co/models
model_name_or_path: "bert-base-uncased"


train_file: "./data/train.json"

dev_file: "./data/dev.json"

# Pretrained config name or path if not the same as model_name
config_name: null

# Pretrained tokenizer name or path if not the same as model_name
tokenizer_name: null

# Directory to save downloaded pretrained model
# Default to ~/.cache/huggingface/transformers
cache_dir: null

# The maximum total input sequence length.
# Sequence longer max_seq_length will be splitted into different chunks.
max_seq_length: 512

# How many tokens should the first span have in each chunk.
# Note that it may not be honored when the span is too long.
doc_stride: 64

# The maximum number of tokens for the hypothesis.
# Hypotheses longer than this will be truncated.
max_query_length: 256

# Set this flag if you are using an uncased model.
do_lower_case: true

per_gpu_train_batch_size: 8

per_gpu_eval_batch_size: 8

learning_rate: !!float 3e-5

# Number of updates steps to accumulate before performing a backward/update pass.
gradient_accumulation_steps: 1

weight_decay: 0.0

adam_epsilon: !!float 1e-8

max_grad_norm: 1.0

num_epochs: 5.0

# If set, total number of training steps to perform. Conflicts with num_epochs.
max_steps: null

# Linear warmup over warmup_steps
warmup_steps: 200

# language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)
lang_id: null

# Validate every n steps
valid_steps: 3000

early_stopping: true

# save model every n steps
save_steps: -1

seed: 42

# Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit
fp16: false

# For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
# See details at https://nvidia.github.io/apex/amp.html
fp16_opt_level: "O1"

# Make it true if you have a gpu but you don't want to use it
no_cuda: false

# Overwrite the cached training and evaluation sets
overwrite_cache: false

weight_class_probs_by_span_probs: true

# class loss is multiplied by this value
class_loss_weight: 0.1

# Either of 'identification_classification' or 'classification'
task: "identification_classification"

# Whether to treat hypothesis (query) texts as a symbol instead of feeding the
# hypothesis descriptions
symbol_based_hypothesis: false