fix label/text swap
Browse files- something_something_v2.py +28 -15
something_something_v2.py
CHANGED
@@ -55,7 +55,8 @@ class SomethingSomethingV2(datasets.GeneratorBasedBuilder):
|
|
55 |
"video": datasets.Value("string"),
|
56 |
"text": datasets.Value("string"),
|
57 |
"label": datasets.features.ClassLabel(
|
58 |
-
|
|
|
59 |
),
|
60 |
"placeholders": datasets.Sequence(datasets.Value("string")),
|
61 |
}
|
@@ -74,31 +75,39 @@ class SomethingSomethingV2(datasets.GeneratorBasedBuilder):
|
|
74 |
"Use command like `cat 20bn-something-something-v2-?? >> 20bn-something-something-v2.tar.gz` "
|
75 |
"Place the `labels.zip` file and the tar file into a folder '/path/to/data/' and load the dataset using "
|
76 |
"`load_dataset('something-something-v2', data_dir='/path/to/data')`"
|
77 |
-
)
|
78 |
-
|
79 |
def _split_generators(self, dl_manager):
|
80 |
data_dir = dl_manager.manual_dir
|
81 |
labels_path = os.path.join(data_dir, "labels.zip")
|
82 |
videos_path = os.path.join(data_dir, "20bn-something-something-v2.tar.gz")
|
83 |
if not os.path.exists(labels_path):
|
84 |
-
raise FileNotFoundError(
|
|
|
|
|
85 |
|
86 |
if not os.path.exists(videos_path):
|
87 |
-
raise FileNotFoundError(
|
88 |
-
|
|
|
|
|
89 |
labels_path = dl_manager.extract(labels_path)
|
90 |
return [
|
91 |
datasets.SplitGenerator(
|
92 |
name=datasets.Split.TRAIN,
|
93 |
gen_kwargs={
|
94 |
-
"annotation_file": os.path.join(
|
|
|
|
|
95 |
"video_files": dl_manager.iter_archive(videos_path),
|
96 |
},
|
97 |
),
|
98 |
datasets.SplitGenerator(
|
99 |
name=datasets.Split.VALIDATION,
|
100 |
gen_kwargs={
|
101 |
-
"annotation_file": os.path.join(
|
|
|
|
|
102 |
"video_files": dl_manager.iter_archive(videos_path),
|
103 |
},
|
104 |
),
|
@@ -107,7 +116,9 @@ class SomethingSomethingV2(datasets.GeneratorBasedBuilder):
|
|
107 |
gen_kwargs={
|
108 |
"annotation_file": os.path.join(labels_path, "labels", "test.json"),
|
109 |
"video_files": dl_manager.iter_archive(videos_path),
|
110 |
-
"labels_file": os.path.join(
|
|
|
|
|
111 |
},
|
112 |
),
|
113 |
]
|
@@ -126,25 +137,27 @@ class SomethingSomethingV2(datasets.GeneratorBasedBuilder):
|
|
126 |
annotations = json.load(fobj)
|
127 |
for annotation in annotations:
|
128 |
if "template" in annotation:
|
129 |
-
annotation["template"] =
|
|
|
|
|
130 |
if labels:
|
131 |
annotation["template"] = labels[annotation["id"]]
|
132 |
data[annotation["id"]] = annotation
|
133 |
|
134 |
idx = 0
|
135 |
-
for path, file in video_files:
|
136 |
video_id = os.path.splitext(os.path.split(path)[1])[0]
|
137 |
-
|
138 |
if video_id not in data:
|
139 |
continue
|
140 |
-
|
141 |
info = data[video_id]
|
142 |
yield idx, {
|
143 |
"video_id": video_id,
|
144 |
"video": file,
|
145 |
"placeholders": info.get("placeholders", []),
|
146 |
-
"label": info["
|
147 |
-
"text": info["
|
148 |
}
|
149 |
|
150 |
idx += 1
|
|
|
55 |
"video": datasets.Value("string"),
|
56 |
"text": datasets.Value("string"),
|
57 |
"label": datasets.features.ClassLabel(
|
58 |
+
num_classes=len(SOMETHING_SOMETHING_V2_CLASSES),
|
59 |
+
names=SOMETHING_SOMETHING_V2_CLASSES,
|
60 |
),
|
61 |
"placeholders": datasets.Sequence(datasets.Value("string")),
|
62 |
}
|
|
|
75 |
"Use command like `cat 20bn-something-something-v2-?? >> 20bn-something-something-v2.tar.gz` "
|
76 |
"Place the `labels.zip` file and the tar file into a folder '/path/to/data/' and load the dataset using "
|
77 |
"`load_dataset('something-something-v2', data_dir='/path/to/data')`"
|
78 |
+
)
|
79 |
+
|
80 |
def _split_generators(self, dl_manager):
|
81 |
data_dir = dl_manager.manual_dir
|
82 |
labels_path = os.path.join(data_dir, "labels.zip")
|
83 |
videos_path = os.path.join(data_dir, "20bn-something-something-v2.tar.gz")
|
84 |
if not os.path.exists(labels_path):
|
85 |
+
raise FileNotFoundError(
|
86 |
+
f"labels.zip doesn't exist in {data_dir}. Please follow manual download instructions."
|
87 |
+
)
|
88 |
|
89 |
if not os.path.exists(videos_path):
|
90 |
+
raise FileNotFoundError(
|
91 |
+
f"20bn-something-sokmething-v2.tar.gz doesn't exist in {data_dir}. Please follow manual download instructions."
|
92 |
+
)
|
93 |
+
|
94 |
labels_path = dl_manager.extract(labels_path)
|
95 |
return [
|
96 |
datasets.SplitGenerator(
|
97 |
name=datasets.Split.TRAIN,
|
98 |
gen_kwargs={
|
99 |
+
"annotation_file": os.path.join(
|
100 |
+
labels_path, "labels", "train.json"
|
101 |
+
),
|
102 |
"video_files": dl_manager.iter_archive(videos_path),
|
103 |
},
|
104 |
),
|
105 |
datasets.SplitGenerator(
|
106 |
name=datasets.Split.VALIDATION,
|
107 |
gen_kwargs={
|
108 |
+
"annotation_file": os.path.join(
|
109 |
+
labels_path, "labels", "validation.json"
|
110 |
+
),
|
111 |
"video_files": dl_manager.iter_archive(videos_path),
|
112 |
},
|
113 |
),
|
|
|
116 |
gen_kwargs={
|
117 |
"annotation_file": os.path.join(labels_path, "labels", "test.json"),
|
118 |
"video_files": dl_manager.iter_archive(videos_path),
|
119 |
+
"labels_file": os.path.join(
|
120 |
+
labels_path, "labels", "test-answers.csv"
|
121 |
+
),
|
122 |
},
|
123 |
),
|
124 |
]
|
|
|
137 |
annotations = json.load(fobj)
|
138 |
for annotation in annotations:
|
139 |
if "template" in annotation:
|
140 |
+
annotation["template"] = (
|
141 |
+
annotation["template"].replace("[", "").replace("]", "")
|
142 |
+
)
|
143 |
if labels:
|
144 |
annotation["template"] = labels[annotation["id"]]
|
145 |
data[annotation["id"]] = annotation
|
146 |
|
147 |
idx = 0
|
148 |
+
for path, file in video_files:
|
149 |
video_id = os.path.splitext(os.path.split(path)[1])[0]
|
150 |
+
|
151 |
if video_id not in data:
|
152 |
continue
|
153 |
+
|
154 |
info = data[video_id]
|
155 |
yield idx, {
|
156 |
"video_id": video_id,
|
157 |
"video": file,
|
158 |
"placeholders": info.get("placeholders", []),
|
159 |
+
"label": info["label"] if "label" in info else -1,
|
160 |
+
"text": info["template"],
|
161 |
}
|
162 |
|
163 |
idx += 1
|