Convert dataset to Parquet

#5
by albertvillanova HF staff - opened
README.md CHANGED
@@ -21,6 +21,36 @@ task_ids: []
21
  paperswithcode_id: svhn
22
  pretty_name: Street View House Numbers
23
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  - config_name: full_numbers
25
  features:
26
  - name: image
@@ -46,46 +76,33 @@ dataset_info:
46
  '9': '9'
47
  splits:
48
  - name: train
49
- num_bytes: 390404309
50
  num_examples: 33402
51
  - name: test
52
- num_bytes: 271503052
53
  num_examples: 13068
54
  - name: extra
55
- num_bytes: 1868720340
56
  num_examples: 202353
57
- download_size: 2636187279
58
- dataset_size: 2530627701
 
59
  - config_name: cropped_digits
60
- features:
61
- - name: image
62
- dtype: image
63
- - name: label
64
- dtype:
65
- class_label:
66
- names:
67
- '0': '0'
68
- '1': '1'
69
- '2': '2'
70
- '3': '3'
71
- '4': '4'
72
- '5': '5'
73
- '6': '6'
74
- '7': '7'
75
- '8': '8'
76
- '9': '9'
77
- splits:
78
- - name: train
79
- num_bytes: 128364360
80
- num_examples: 73257
81
- - name: test
82
- num_bytes: 44464040
83
- num_examples: 26032
84
- - name: extra
85
- num_bytes: 967853504
86
- num_examples: 531131
87
- download_size: 1575594780
88
- dataset_size: 1140681904
89
  ---
90
 
91
  # Dataset Card for Street View House Numbers
 
21
  paperswithcode_id: svhn
22
  pretty_name: Street View House Numbers
23
  dataset_info:
24
+ - config_name: cropped_digits
25
+ features:
26
+ - name: image
27
+ dtype: image
28
+ - name: label
29
+ dtype:
30
+ class_label:
31
+ names:
32
+ '0': '0'
33
+ '1': '1'
34
+ '2': '2'
35
+ '3': '3'
36
+ '4': '4'
37
+ '5': '5'
38
+ '6': '6'
39
+ '7': '7'
40
+ '8': '8'
41
+ '9': '9'
42
+ splits:
43
+ - name: train
44
+ num_bytes: 128062110.875
45
+ num_examples: 73257
46
+ - name: test
47
+ num_bytes: 44356634.0
48
+ num_examples: 26032
49
+ - name: extra
50
+ num_bytes: 965662156.625
51
+ num_examples: 531131
52
+ download_size: 1205637083
53
+ dataset_size: 1138080901.5
54
  - config_name: full_numbers
55
  features:
56
  - name: image
 
76
  '9': '9'
77
  splits:
78
  - name: train
79
+ num_bytes: 389782132.75
80
  num_examples: 33402
81
  - name: test
82
+ num_bytes: 271279491.86
83
  num_examples: 13068
84
  - name: extra
85
+ num_bytes: 1864796784.036
86
  num_examples: 202353
87
+ download_size: 2530154571
88
+ dataset_size: 2525858408.646
89
+ configs:
90
  - config_name: cropped_digits
91
+ data_files:
92
+ - split: train
93
+ path: cropped_digits/train-*
94
+ - split: test
95
+ path: cropped_digits/test-*
96
+ - split: extra
97
+ path: cropped_digits/extra-*
98
+ - config_name: full_numbers
99
+ data_files:
100
+ - split: train
101
+ path: full_numbers/train-*
102
+ - split: test
103
+ path: full_numbers/test-*
104
+ - split: extra
105
+ path: full_numbers/extra-*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  ---
107
 
108
  # Dataset Card for Street View House Numbers
cropped_digits/extra-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f85c26a0b6b484ca0fa61f2bf4166f7878ee27298ba92ebc4f7adf634baa76
3
+ size 511374730
cropped_digits/extra-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a937f9590f4a9dd9d6d4085f4604adb38749df23531dbd938c618d87ef5a02c4
3
+ size 511669862
cropped_digits/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d192ecc88ace148c87b907b11c7372647d09f3552ecf937021d8336be2efdbde
3
+ size 47003111
cropped_digits/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f65cd79069d9d25048d58171cd729db01dd4aa7e7f597ce7fdd65efffe81013
3
+ size 135589380
full_numbers/extra-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53da719a7af57acf405c3df6ab3daa1bcded3a39ba7adf7c14e9c935ecc06f2
3
+ size 469560924
full_numbers/extra-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c459196992009cc2bd78d0f1a935668239c8e46d8c5ba0cd15a398c2bf313be9
3
+ size 466203106
full_numbers/extra-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6376c0b86938921f45eaba2bd2bc2f8f1e95d5a7a819b12361c418daa882deaa
3
+ size 466211895
full_numbers/extra-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2719419f90ae20de08f31419b9b4e2bf5636521f88658d8fdd7738f8370f168
3
+ size 466408081
full_numbers/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fe29a6d714edb34c2cbca46d3e4859c48d7d1ab8574155ff9bdd15fc31b58f0
3
+ size 271664598
full_numbers/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d72729c7371ea5de71bb99093a7cbdcba58076236ca2f7005661cc8dd59211f6
3
+ size 390105967
svhn.py DELETED
@@ -1,199 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Street View House Numbers (SVHN) dataset."""
16
-
17
- import io
18
- import os
19
-
20
- import h5py
21
- import numpy as np
22
- import scipy.io as sio
23
-
24
- import datasets
25
- from datasets.tasks import ImageClassification
26
-
27
-
28
- logger = datasets.logging.get_logger(__name__)
29
-
30
-
31
- _CITATION = """\
32
- @article{netzer2011reading,
33
- title={Reading digits in natural images with unsupervised feature learning},
34
- author={Netzer, Yuval and Wang, Tao and Coates, Adam and Bissacco, Alessandro and Wu, Bo and Ng, Andrew Y},
35
- year={2011}
36
- }
37
- """
38
-
39
- _DESCRIPTION = """\
40
- SVHN is a real-world image dataset for developing machine learning and object recognition algorithms with minimal requirement on data preprocessing and formatting.
41
- It can be seen as similar in flavor to MNIST (e.g., the images are of small cropped digits), but incorporates an order of magnitude more labeled data (over 600,000 digit images)
42
- and comes from a significantly harder, unsolved, real world problem (recognizing digits and numbers in natural scene images). SVHN is obtained from house numbers in Google Street View images.
43
- """
44
-
45
- _HOMEPAGE = "http://ufldl.stanford.edu/housenumbers/"
46
-
47
- _LICENSE = "Custom (non-commercial)"
48
-
49
- _URLs = {
50
- "full_numbers": [
51
- "http://ufldl.stanford.edu/housenumbers/train.tar.gz",
52
- "http://ufldl.stanford.edu/housenumbers/test.tar.gz",
53
- "http://ufldl.stanford.edu/housenumbers/extra.tar.gz",
54
- ],
55
- "cropped_digits": [
56
- "http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
57
- "http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
58
- "http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
59
- ],
60
- }
61
-
62
- _DIGIT_LABELS = [str(num) for num in range(10)]
63
-
64
-
65
- class SVHN(datasets.GeneratorBasedBuilder):
66
- """Street View House Numbers (SVHN) dataset."""
67
-
68
- VERSION = datasets.Version("1.0.0")
69
-
70
- BUILDER_CONFIGS = [
71
- datasets.BuilderConfig(
72
- name="full_numbers",
73
- version=VERSION,
74
- description="Contains the original, variable-resolution, color house-number images with character level bounding boxes.",
75
- ),
76
- datasets.BuilderConfig(
77
- name="cropped_digits",
78
- version=VERSION,
79
- description="Character level ground truth in an MNIST-like format. All digits have been resized to a fixed resolution of 32-by-32 pixels. The original character bounding boxes are extended in the appropriate dimension to become square windows, so that resizing them to 32-by-32 pixels does not introduce aspect ratio distortions. Nevertheless this preprocessing introduces some distracting digits to the sides of the digit of interest.",
80
- ),
81
- ]
82
-
83
- def _info(self):
84
- if self.config.name == "full_numbers":
85
- features = datasets.Features(
86
- {
87
- "image": datasets.Image(),
88
- "digits": datasets.Sequence(
89
- {
90
- "bbox": datasets.Sequence(datasets.Value("int32"), length=4),
91
- "label": datasets.ClassLabel(num_classes=10),
92
- }
93
- ),
94
- }
95
- )
96
- else:
97
- features = datasets.Features(
98
- {
99
- "image": datasets.Image(),
100
- "label": datasets.ClassLabel(num_classes=10),
101
- }
102
- )
103
- return datasets.DatasetInfo(
104
- description=_DESCRIPTION,
105
- features=features,
106
- supervised_keys=None,
107
- homepage=_HOMEPAGE,
108
- license=_LICENSE,
109
- citation=_CITATION,
110
- task_templates=[ImageClassification(image_column="image", label_column="label")]
111
- if self.config.name == "cropped_digits"
112
- else None,
113
- )
114
-
115
- def _split_generators(self, dl_manager):
116
- if self.config.name == "full_numbers":
117
- train_archive, test_archive, extra_archive = dl_manager.download(_URLs[self.config.name])
118
- for path, f in dl_manager.iter_archive(train_archive):
119
- if path.endswith("digitStruct.mat"):
120
- train_annot_data = f.read()
121
- break
122
- for path, f in dl_manager.iter_archive(test_archive):
123
- if path.endswith("digitStruct.mat"):
124
- test_annot_data = f.read()
125
- break
126
- for path, f in dl_manager.iter_archive(extra_archive):
127
- if path.endswith("digitStruct.mat"):
128
- extra_annot_data = f.read()
129
- break
130
- train_archive = dl_manager.iter_archive(train_archive)
131
- test_archive = dl_manager.iter_archive(test_archive)
132
- extra_archive = dl_manager.iter_archive(extra_archive)
133
- train_filepath, test_filepath, extra_filepath = None, None, None
134
- else:
135
- train_annot_data, test_annot_data, extra_annot_data = None, None, None
136
- train_archive, test_archive, extra_archive = None, None, None
137
- train_filepath, test_filepath, extra_filepath = dl_manager.download(_URLs[self.config.name])
138
- return [
139
- datasets.SplitGenerator(
140
- name=datasets.Split.TRAIN,
141
- gen_kwargs={
142
- "annot_data": train_annot_data,
143
- "files": train_archive,
144
- "filepath": train_filepath,
145
- },
146
- ),
147
- datasets.SplitGenerator(
148
- name=datasets.Split.TEST,
149
- gen_kwargs={
150
- "annot_data": test_annot_data,
151
- "files": test_archive,
152
- "filepath": test_filepath,
153
- },
154
- ),
155
- datasets.SplitGenerator(
156
- name="extra",
157
- gen_kwargs={
158
- "annot_data": extra_annot_data,
159
- "files": extra_archive,
160
- "filepath": extra_filepath,
161
- },
162
- ),
163
- ]
164
-
165
- def _generate_examples(self, annot_data, files, filepath):
166
- if self.config.name == "full_numbers":
167
-
168
- def _get_digits(bboxes, h5_file):
169
- def key_to_values(key, bbox):
170
- if bbox[key].shape[0] == 1:
171
- return [int(bbox[key][0][0])]
172
- else:
173
- return [int(h5_file[bbox[key][i][0]][()].item()) for i in range(bbox[key].shape[0])]
174
-
175
- bbox = h5_file[bboxes[0]]
176
- assert bbox.keys() == {"height", "left", "top", "width", "label"}
177
- bbox_columns = [key_to_values(key, bbox) for key in ["left", "top", "width", "height", "label"]]
178
- return [
179
- {"bbox": [left, top, width, height], "label": label % 10}
180
- for left, top, width, height, label in zip(*bbox_columns)
181
- ]
182
-
183
- with h5py.File(io.BytesIO(annot_data), "r") as h5_file:
184
- for path, f in files:
185
- root, ext = os.path.splitext(path)
186
- if ext != ".png":
187
- continue
188
- img_idx = int(os.path.basename(root)) - 1
189
- yield img_idx, {
190
- "image": {"path": path, "bytes": f.read()},
191
- "digits": _get_digits(h5_file["digitStruct/bbox"][img_idx], h5_file),
192
- }
193
- else:
194
- data = sio.loadmat(filepath)
195
- for i, (image_array, label) in enumerate(zip(np.rollaxis(data["X"], -1), data["y"])):
196
- yield i, {
197
- "image": image_array,
198
- "label": label.item() % 10,
199
- }