Datasets:
cjvt
/

Modalities:
Text
Languages:
Slovenian
Libraries:
Datasets
License:
Matej Klemen commited on
Commit
ea77161
1 Parent(s): f42214b

Fix issues with private dataset not loading properly

Browse files
Files changed (2) hide show
  1. README.md +103 -0
  2. si_nli.py +8 -4
README.md CHANGED
@@ -20,6 +20,109 @@ task_categories:
20
  task_ids:
21
  - multi-class-classification
22
  - natural-language-inference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  ---
24
 
25
  # Dataset Card for SI-NLI
 
20
  task_ids:
21
  - multi-class-classification
22
  - natural-language-inference
23
+ dataset_info:
24
+ - config_name: default
25
+ features:
26
+ - name: pair_id
27
+ dtype: string
28
+ - name: premise
29
+ dtype: string
30
+ - name: hypothesis
31
+ dtype: string
32
+ - name: annotation1
33
+ dtype: string
34
+ - name: annotator1_id
35
+ dtype: string
36
+ - name: annotation2
37
+ dtype: string
38
+ - name: annotator2_id
39
+ dtype: string
40
+ - name: annotation3
41
+ dtype: string
42
+ - name: annotator3_id
43
+ dtype: string
44
+ - name: annotation_final
45
+ dtype: string
46
+ - name: label
47
+ dtype: string
48
+ splits:
49
+ - name: train
50
+ num_bytes: 1352635
51
+ num_examples: 4392
52
+ - name: validation
53
+ num_bytes: 164561
54
+ num_examples: 547
55
+ - name: test
56
+ num_bytes: 246518
57
+ num_examples: 998
58
+ download_size: 410093
59
+ dataset_size: 1763714
60
+ - config_name: public
61
+ features:
62
+ - name: pair_id
63
+ dtype: string
64
+ - name: premise
65
+ dtype: string
66
+ - name: hypothesis
67
+ dtype: string
68
+ - name: annotation1
69
+ dtype: string
70
+ - name: annotator1_id
71
+ dtype: string
72
+ - name: annotation2
73
+ dtype: string
74
+ - name: annotator2_id
75
+ dtype: string
76
+ - name: annotation3
77
+ dtype: string
78
+ - name: annotator3_id
79
+ dtype: string
80
+ - name: annotation_final
81
+ dtype: string
82
+ - name: label
83
+ dtype: string
84
+ splits:
85
+ - name: train
86
+ num_bytes: 1352591
87
+ num_examples: 4392
88
+ - name: validation
89
+ num_bytes: 164517
90
+ num_examples: 547
91
+ - name: test
92
+ num_bytes: 246474
93
+ num_examples: 998
94
+ download_size: 410093
95
+ dataset_size: 1763582
96
+ - config_name: private
97
+ features:
98
+ - name: pair_id
99
+ dtype: string
100
+ - name: premise
101
+ dtype: string
102
+ - name: hypothesis
103
+ dtype: string
104
+ - name: annotation1
105
+ dtype: string
106
+ - name: annotator1_id
107
+ dtype: string
108
+ - name: annotation2
109
+ dtype: string
110
+ - name: annotator2_id
111
+ dtype: string
112
+ - name: annotation3
113
+ dtype: string
114
+ - name: annotator3_id
115
+ dtype: string
116
+ - name: annotation_final
117
+ dtype: string
118
+ - name: label
119
+ dtype: string
120
+ splits:
121
+ - name: train
122
+ - name: validation
123
+ - name: test
124
+ download_size: 0
125
+ dataset_size: 0
126
  ---
127
 
128
  # Dataset Card for SI-NLI
si_nli.py CHANGED
@@ -77,6 +77,7 @@ class SINLI(datasets.GeneratorBasedBuilder):
77
  )
78
 
79
  def _split_generators(self, dl_manager):
 
80
  if self.config.name == "public":
81
  urls = _URLS["si-nli"]
82
  data_dir = dl_manager.download_and_extract(urls)
@@ -89,33 +90,36 @@ class SINLI(datasets.GeneratorBasedBuilder):
89
  data_dir = dl_manager.manual_dir
90
 
91
  if data_dir is None:
92
- return []
93
 
94
  return [
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TRAIN,
97
  gen_kwargs={
98
  "file_path": os.path.join(data_dir, "SI-NLI", "train.tsv"),
99
- "split": "train"
100
  }
101
  ),
102
  datasets.SplitGenerator(
103
  name=datasets.Split.VALIDATION,
104
  gen_kwargs={
105
  "file_path": os.path.join(data_dir, "SI-NLI", "dev.tsv"),
106
- "split": "dev"
107
  }
108
  ),
109
  datasets.SplitGenerator(
110
  name=datasets.Split.TEST,
111
  gen_kwargs={
112
  "file_path": os.path.join(data_dir, "SI-NLI", "test.tsv"),
113
- "split": "test"
114
  }
115
  )
116
  ]
117
 
118
  def _generate_examples(self, file_path, split):
 
 
 
119
  with open(file_path, encoding="utf-8") as f:
120
  reader = csv.reader(f, delimiter="\t", quotechar='"')
121
  header = next(reader)
 
77
  )
78
 
79
  def _split_generators(self, dl_manager):
80
+ split_prefix = ""
81
  if self.config.name == "public":
82
  urls = _URLS["si-nli"]
83
  data_dir = dl_manager.download_and_extract(urls)
 
90
  data_dir = dl_manager.manual_dir
91
 
92
  if data_dir is None:
93
+ split_prefix = "dummy_"
94
 
95
  return [
96
  datasets.SplitGenerator(
97
  name=datasets.Split.TRAIN,
98
  gen_kwargs={
99
  "file_path": os.path.join(data_dir, "SI-NLI", "train.tsv"),
100
+ "split": f"{split_prefix}train"
101
  }
102
  ),
103
  datasets.SplitGenerator(
104
  name=datasets.Split.VALIDATION,
105
  gen_kwargs={
106
  "file_path": os.path.join(data_dir, "SI-NLI", "dev.tsv"),
107
+ "split": f"{split_prefix}dev"
108
  }
109
  ),
110
  datasets.SplitGenerator(
111
  name=datasets.Split.TEST,
112
  gen_kwargs={
113
  "file_path": os.path.join(data_dir, "SI-NLI", "test.tsv"),
114
+ "split": f"{split_prefix}test"
115
  }
116
  )
117
  ]
118
 
119
  def _generate_examples(self, file_path, split):
120
+ if split.startswith("dummy"):
121
+ return None
122
+
123
  with open(file_path, encoding="utf-8") as f:
124
  reader = csv.reader(f, delimiter="\t", quotechar='"')
125
  header = next(reader)