repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/pv4ger.py | """PV4GER dataset.
1. Install the AWS CLI. Instructions here: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
(requires sudo, so you may need to do it locally then copy the data over)
2. Make an AWS account and sign into it then navigate here: https://console.aws.amazon.com/iam/
3. Create a key pair (Access Key ID, Secret Access Key) following the instructions here: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-creds
4. Clone git repo: git clone https://github.com/kdmayer/3D-PV-Locator.git && cd 3D-PV-Locator
5. Configure AWS, follow default settings: aws configure
6. Copy imagery. Note that requester pays data transfer costs: aws s3 cp --request-payer requester s3://pv4ger/NRW_image_data/{classification,segmentation}/ dataset/pv4ger_v1.0/
"""
import sys
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
import rasterio
from PIL import Image
from tqdm import tqdm
from geobench import io
from geobench.benchmark.dataset_converters.util import center_to_transform
from geobench.io.dataset import Band, Sample
sys.path.append(str(Path.cwd()))
DATASET_NAME = "pv4ger"
SRC_DATASET_DIR = io.CCB_DIR / "source" / DATASET_NAME # type: ignore
# CLS_DATASET_DIR = io.CCB_DIR / "converted" / f"{DATASET_NAME}_classification"
# SEG_DATASET_DIR = io.CCB_DIR / "converted" / f"{DATASET_NAME}_segmentation"
DATASET_DIR = io.CCB_DIR / "converted" / f"{DATASET_NAME}_classification" # type: ignore
SPATIAL_RESOLUTION = 0.1
PATCH_SIZE = 320
BANDS_INFO = io.make_rgb_bands(SPATIAL_RESOLUTION)
LABELS = ["no solar pv", "solar pv"]
SEG_LABEL_BAND = io.SegmentationClasses("label", spatial_resolution=SPATIAL_RESOLUTION, n_classes=2, class_names=LABELS)
def get_transform(img_path: str):
"""Create transform based on image.
Args:
img_path: path to image
Returns:
rasterio transform
"""
# Get lat center and lon center from img path
lat_center, lon_center = map(float, Path(img_path).stem.split(","))
# Lat/lons are swapped for much of the dataset, fix this.
if lat_center < lon_center:
lat_center, lon_center = lon_center, lat_center
radius_in_meter = PATCH_SIZE / 2 * SPATIAL_RESOLUTION
transform = center_to_transform(lat_center, lon_center, radius_in_meter, (PATCH_SIZE, PATCH_SIZE))
# transform_center = rasterio.transform.from_origin(lon_center, lat_center, SPATIAL_RESOLUTION, SPATIAL_RESOLUTION)
# lon_corner, lat_corner = transform_center * [-PATCH_SIZE // 2, -PATCH_SIZE // 2]
# transform = rasterio.transform.from_origin(lon_corner, lat_corner, SPATIAL_RESOLUTION, SPATIAL_RESOLUTION)
return transform
def get_bands(img: "np.typing.NDArray[np.int_]", transform) -> List[Band]:
"""Retrieve RGB bands.
Args:
img: image array
transform: transformation applied to image
Returns:
list of retrieved bands
"""
bands = []
for i in range(3):
band_data = io.Band(
data=img[:, :, i],
band_info=BANDS_INFO[i],
spatial_resolution=SPATIAL_RESOLUTION,
transform=transform,
crs="EPSG:4326",
)
bands.append(band_data)
return bands
def load_cls_sample(img_path: Path, label: int) -> Sample:
"""Create classification sample.
Args:
img_path: path to image
label: classification label
Returns:
classification sample
"""
transform = get_transform(str(img_path))
img = np.array(Image.open(img_path).convert("RGB"))
bands = get_bands(img, transform)
return io.Sample(bands, label=label, sample_name=img_path.stem)
def load_seg_sample(img_path: Path, mask_path: Path) -> Sample:
"""Create segmentation sample.
Args:
img_path: path to image
mask_path: path to image mask
Returns:
segmentation sample
"""
transform = get_transform(str(img_path))
img = np.array(Image.open(img_path).convert("RGB"))
mask = np.array(Image.open(mask_path))
mask[mask > 0] = 1
bands = get_bands(img, transform)
label = io.Band(
data=mask, band_info=SEG_LABEL_BAND, spatial_resolution=SPATIAL_RESOLUTION, transform=transform, crs="EPSG:4326"
)
return io.Sample(bands, label=label, sample_name=img_path.stem)
def convert(max_count: int = None, dataset_dir: Path = DATASET_DIR, classification: bool = True) -> None:
"""Convert pv4ger dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
classification: whether or not to convert the classification version
"""
if classification:
label_type = io.Classification(2, LABELS) # type: ignore
# dataset_dir = CLS_DATASET_DIR
else:
label_type = SEG_LABEL_BAND # type: ignore
dataset_dir = dataset_dir.with_name(f"{DATASET_NAME}_segmentation")
dataset_dir.mkdir(exist_ok=True, parents=True)
task_specs = io.TaskSpecifications(
dataset_name=dataset_dir.name,
patch_size=(PATCH_SIZE, PATCH_SIZE),
n_time_steps=1,
bands_info=BANDS_INFO,
bands_stats=None, # Will be automatically written with inspect script
label_type=label_type,
spatial_resolution=SPATIAL_RESOLUTION,
)
task_specs.save(str(dataset_dir), overwrite=True)
rows = []
if classification:
for split in ["train", "val", "test"]:
for label in [0, 1]:
split_label_dir = SRC_DATASET_DIR / split / str(label)
for path in split_label_dir.iterdir():
if path.suffix == ".png":
rows.append([split, label, path])
else:
for split in ["train", "val", "test"]:
split_dir = SRC_DATASET_DIR / split / "image"
for image_path in split_dir.iterdir():
if image_path.suffix == ".png":
mask_path = image_path.parent.parent / "mask" / image_path.name
rows.append([split, mask_path, image_path])
df = pd.DataFrame(rows, columns=["Split", "Label", "Path"])
df["Split"] = df["Split"].str.replace("val", "valid")
partition = io.Partition()
sample_count = 0
for _, row in tqdm(df.iterrows(), total=df.shape[0]):
split = row["Split"]
row_path: Path = row["Path"]
if classification:
sample = load_cls_sample(row_path, row["Label"])
else:
sample = load_seg_sample(row_path, row["Label"])
sample_name = row_path.stem
partition.add(split, sample_name)
sample.write(str(dataset_dir))
sample_count += 1
# temporary for creating small datasets for development purpose
if max_count is not None and sample_count >= max_count:
break
partition.save(str(dataset_dir), "original", as_default=True)
if __name__ == "__main__":
convert(classification=True)
# convert(classification=False)
| 7,052 | 33.404878 | 198 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/convert_all_datasets.py | """Convert all datasets."""
# TODO the tqdm process bars will not work properly. Perhaps, we should revert to simple logging instead of tqdm + prints.
import multiprocessing
import shutil
from importlib import import_module
from pathlib import Path
from geobench import io
CONVERTERS = [
"brick_kiln",
"neon_tree",
"cv4a_kenya_crop_type",
"benin_smallholder_cashews",
"eurosat",
"benin_smallholder_cashews",
"so2sat",
"nz_cattle_detection",
]
MAX_COUNT = 1000
def convert(module_name: str) -> None:
"""Convert dataset given converter module name.
Args:
module_name: name of dataset converter
"""
converter = import_module("geobench.dataset_converters." + module_name)
assert Path(converter.DATASET_DIR).parent == Path(
str(io.datasets_dir)
), f"{Path(converter.DATASET_DIR).parent} vs {io.datasets_dir}"
assert Path(converter.DATASET_DIR).name == converter.DATASET_NAME
if Path(converter.DATASET_DIR).exists():
shutil.rmtree(converter.DATASET_DIR)
converter.convert(max_count=MAX_COUNT)
if __name__ == "__main__":
response = input(f"This will first delete all datasets in {io.datasets_dir}. To proceed, press 'y'.")
if response.lower() == "y":
jobs = []
for converter in CONVERTERS:
job = multiprocessing.Process(target=convert, args=(converter,))
jobs.append(job)
job.start()
for job in jobs:
job.join()
else:
print("No dataset deleted.")
| 1,536 | 26.446429 | 122 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/inspect_tools.py | """Inspect tools."""
import math
from bdb import Breakpoint
from pathlib import Path
from typing import Any, Callable, Dict, List, Sequence, Tuple, Union
from warnings import warn
import ipyplot
import numpy as np
import pandas as pd
from ipyleaflet import Map, Marker, Rectangle
from matplotlib import cm
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw
from rasterio import warp
from rasterio.crs import CRS
from tqdm.auto import tqdm
from geobench import io
from geobench.io import dataset as io_ds
from geobench.io.dataset import (
Band,
GeobenchDataset,
HyperSpectralBands,
Sample,
SegmentationClasses,
compute_dataset_statistics,
)
def compare(a, b, name, src_a, src_b) -> None:
"""Compare two values."""
if a != b:
print(f"Consistancy error with {name} between:\n {src_a}\n & {src_b}.\n {str(a)}\n != {str(b)}")
def plot_band_stats(band_values: Dict[str, np.ndarray], n_cols: int = 4, n_hist_bins: int = None) -> None:
"""Plot a histogram of band values for each band.
Args:
band_values: dict of 1d arryay representing flattenned values for each band.
n_cols: number of columns in the histogram gird
n_hist_bins: number of bins to use for histograms. See pyplot.hist's bins argument for more details
"""
items = list(band_values.items())
items.sort(key=lambda item: item[0])
n_rows = int(math.ceil(len(items) / n_cols))
fig1, ax_matrix = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(20, 5))
for i, (key, value) in enumerate(tqdm(items, desc="Plotting statistics")):
ax = ax_matrix.flat[i]
ax.set_title(key)
ax.hist(value, bins=n_hist_bins)
plt.tight_layout()
def float_image_to_uint8(
images: Union[Sequence[np.ndarray], np.ndarray],
percentile_max=99.9,
ensure_3_channels=True,
per_channel_scaling=False,
) -> np.ndarray:
"""Convert a batch of images to uint8 such that 99.9% of values fit in the range (0,255).
Args:
images: batch of images
percentile_max: maximum percentile value
ensure_3_channels: whether or not to return 3 channel dimensions
per_channel_scaling: whether or not to apply the scaling per channel
Returns:
converted batch of images
"""
images = np.asarray(images)
if images.dtype == np.uint8:
return images
images = images.astype(np.float64)
if per_channel_scaling:
mx = np.percentile(images, q=percentile_max, axis=(0, 1, 2), keepdims=True)
mx = np.squeeze(mx, axis=0)
mn = np.percentile(images, q=100 - percentile_max, axis=(0, 1, 2), keepdims=True)
else:
mn = np.percentile(images, q=100 - percentile_max)
mx = np.percentile(images, q=percentile_max)
new_images = []
for image in images:
image = np.clip((image - mn) * 255 / (mx - mn), 0, 255)
if ensure_3_channels:
if image.ndim == 2:
image = np.stack((image, image, image), axis=2)
if image.shape[2] == 1:
image = np.concatenate((image, image, image), axis=2)
new_images.append(image.astype(np.uint8))
return np.asarray(new_images)
def extract_images(
samples: List[Sample],
band_names: Sequence[str] = ("red", "green", "blue"),
percentile_max: float = 99.9,
resample: bool = False,
fill_value: int = None,
date_index: int = 0,
) -> Tuple[np.ndarray, Any]:
"""Extract images from samples.
Args:
samples: set of samples
band_names: band names to extract from sample
percentile_max: maximum percentile value
resample: whether or not to resample
fill_value: fill values
date_index: for timeseries which date to index
Returns:
images and labels extracted from sample
"""
images = []
labels = []
for sample in samples:
img_data, _, _ = sample.pack_to_4d(
sample.dates[date_index : date_index + 1], band_names=band_names, resample=resample, fill_value=fill_value
)
img_data = img_data[0].astype(np.float32)
# TODO We should pass labelType from task specs and compare that instead of the class
# Once we change this function, we should update all inspection notebooks
# if isinstance(sample.label, np.ndarray):
# for i, label in enumerate(sample.label):
# if label == 1:
# images.append(img_data)
# labels.append(i)
# else:
images.append(img_data)
labels.append(sample.label)
images = float_image_to_uint8(np.asarray(images), percentile_max) # type:ignore
return images, labels # type:ignore
def callback_hyperspectral_to_rgb(
samples: List[Sample], band_name: str, percentile_max: float = 99.9, img_width: int = 128
) -> Callable[[int, int], Any]:
"""Create callable to convert hyperspectral to rgb for plotting.
Args:
samples: set of samples
"""
def callback(center, width):
rgb_extractor = make_rgb_extractor(center, width)
images = hyperspectral_to_rgb(samples, band_name, rgb_extractor, percentile_max)
return ipyplot.plot_images(images=images, img_width=img_width, max_images=len(samples))
return callback
def make_rgb_extractor(center, width):
"""Create callable to extract rgb data from hyperspectral.
Args:
center:
width:
Returns:
callable
"""
def callback(hs_data):
def _extrac_band(start, stop):
return hs_data[:, :, int(start) : int(stop)].mean(axis=2)
h, w, d = hs_data.shape
_center = max(0, center - width * 1.5) + width * 1.5
_center = min(d, _center + width * 1.5) - width * 1.5
red = _extrac_band(_center - width * 1.5, _center - width * 0.5)
green = _extrac_band(_center - width * 0.5, _center + width * 0.5)
blue = _extrac_band(_center + width * 0.5, _center + width * 1.5)
return np.dstack((red, green, blue))
return callback
def hyperspectral_to_rgb(samples: List[Sample], band_name: str, rgb_extract, percentile_max=99.9):
"""Convert hyperspectral to rgb."""
images = []
for sample in samples:
band_array, _, _ = sample.get_band_array(band_names=(band_name,))
assert band_array.shape == (1, 1), f"Got shape: {band_array.shape}."
band = band_array[0, 0]
assert isinstance(band.band_info, HyperSpectralBands), f"Got type: {type(band.band_info)}."
hs_data = band.data
images.append(rgb_extract(hs_data))
return float_image_to_uint8(images, percentile_max, per_channel_scaling=True)
def extract_label_as_image(samples, rgb_images=None, opacity=0.3, percentile_max=99.9):
"""If label is a band, will convert into an image. Otherwise, will raise an error."""
images = []
for sample in samples:
label = sample.label
if not isinstance(label, Band):
raise ValueError("sample.label must be of type Band")
if isinstance(label.band_info, SegmentationClasses):
image = map_class_id_to_color(label.data, label.band_info.n_classes)
else:
image = label.data
images.append(image)
label_images = float_image_to_uint8(images, percentile_max)
if rgb_images is not None:
label_images = [
label_img.squeeze().astype(np.float32) * opacity + rgb_img.astype(np.float32) * (1 - opacity)
for label_img, rgb_img in zip(label_images, rgb_images)
]
label_images = float_image_to_uint8(label_images)
return label_images
def overlay_label(image, label, label_patch_size, opacity=0.5):
"""Overlay label on image."""
if label_patch_size is not None:
scale = np.array(image.shape[:2]) / np.array(label_patch_size)
else:
scale = np.array([1.0, 1.0])
if isinstance(label, (list, tuple)): # TODO hack tha needs to change
im = Image.fromarray(image)
ctxt = ImageDraw.Draw(im)
for obj in label:
if isinstance(obj, dict) and "xmin" in obj:
coord = np.array([[obj["xmin"], obj["ymin"]], [obj["xmax"], obj["ymax"]]])
ctxt.rectangle(list((coord * scale).flat), outline=(255, 0, 0))
elif isinstance(obj, (tuple, list)) and len(obj) == 2:
size = 5 * scale
coord = [obj[0] - size[0], obj[1] - size[1], obj[0] + size[1], obj[1] + size[1]]
ctxt.rectangle(coord, outline=(255, 0, 0))
return np.array(im) * opacity + (1 - opacity) * image
elif isinstance(label, io.Band):
label_img = map_class_id_to_color(label.data, label.band_info.n_classes)
(label_img,) = float_image_to_uint8([label_img])
return label_img * opacity + (1 - opacity) * image
else:
return image
def extract_bands_with_labels(samples, band_groups=None, draw_label=False, label_patch_size=None, date_index=0):
"""Extract bands."""
if band_groups is None:
band_groups = [(band_name,) for band_name in samples[0].band_names]
all_images = []
band_names = []
all_labels = []
unique_band_names = []
for i, band_group in enumerate(band_groups):
images, labels = extract_images(samples, band_names=band_group, date_index=date_index)
if draw_label:
images = [overlay_label(image, sample.label, label_patch_size) for image, sample in zip(images, samples)]
all_images.extend(images)
all_labels.extend(labels)
group_name = "-".join(band_group)
unique_band_names.append(group_name)
band_names.extend((group_name,) * len(images))
if isinstance(samples[0].label, Band):
rgb_images, _ = extract_images(samples, band_names=("red", "green", "blue"), date_index=date_index)
label_images = extract_label_as_image(samples, rgb_images, percentile_max=99)
all_images.extend(label_images)
all_labels.extend((None,) * len(label_images))
band_names.extend(("label",) * len(label_images))
unique_band_names.append("label")
return all_images, band_names, all_labels, unique_band_names
def pack_hyperspectral(img: np.ndarray, n_rows: int, n_cols: int):
"""Extract multiple triplet of channels and concatenated them as a grid of images."""
height, width, n_channels = img.shape
assert n_rows * n_cols * 3 <= n_channels
offset = int((n_channels - n_rows * n_cols * 3) / 2)
img = img[:, :, offset : n_rows * n_cols * 3 + offset]
img_grid = np.reshape(np.moveaxis(img, -1, 0), (n_rows, n_cols, 3, height, width))
img_grid = np.moveaxis(img_grid, 2, -1) # move the channel back to the end
assert img_grid.shape == (n_rows, n_cols, height, width, 3)
return img_grid.swapaxes(1, 2).reshape(n_rows * height, n_cols * width, 3)
def extract_bands(samples, band_groups=None, draw_label=False, label_patch_size=None, date_index=0):
"""For backward compatibility."""
return extract_bands_with_labels(samples, band_groups, draw_label, label_patch_size, date_index)[:2]
def center_coord(band):
"""Find center coordinates."""
center = np.array(band.data.shape[:2]) / 2.0
center = transform_to_4326(band.transform, band.crs, center)
return tuple(center[::-1])
def transform_to_4326(transform, crs, coord):
"""Transform `coord` from band.crs to EPSG4326."""
coord = transform * coord
if crs != CRS.from_epsg(4326):
xs = np.array([coord[0]])
ys = np.array([coord[1]])
xs, ys = warp.transform(src_crs=crs, dst_crs=CRS.from_epsg(4326), xs=xs, ys=ys)
coord = (xs[0], ys[0])
return coord
def get_rect(band):
"""Obtain a georeferenced rectangle ready to display in ipyleaflet."""
sw = transform_to_4326(band.transform, band.crs, (0, 0))
ne = transform_to_4326(band.transform, band.crs, band.data.shape[:2])
return Rectangle(bounds=(sw[::-1], ne[::-1]))
def leaflet_map(samples):
"""Position all samples on a world map using ipyleaflet. Experimental feature."""
# TODO need to use reproject to increse compatibility
# https://github.com/jupyter-widgets/ipyleaflet/blob/master/examples/Numpy.ipynb
map = Map(center=center_coord(samples[0].bands[0]), zoom=7)
map.layout.height = "800px"
for sample in tqdm(samples):
band = sample.bands[0]
if band.crs is None or band.transform is None:
warn("Unknown transformation or crs.")
continue
name = sample.sample_name
map.add_layer(Marker(location=center_coord(band), draggable=False, opacity=0.5, title=name, alt=name))
map.add_layer(get_rect(band))
return map
def load_and_verify_samples(
dataset_dir, n_samples, n_hist_bins=100, check_integrity=True, split=None, n_value_per_image=1000
):
"""High level function. Loads samples, perform some statistics and plot histograms."""
dataset = GeobenchDataset(dataset_dir, split=split)
samples = list(tqdm(dataset.iter_dataset(n_samples), desc="Loading Samples"))
if check_integrity:
io.check_dataset_integrity(dataset, samples=samples)
band_values, band_stats = compute_dataset_statistics(samples, n_value_per_image=n_value_per_image)
plot_band_stats(band_values=band_values, n_hist_bins=n_hist_bins)
return dataset, samples, band_values, band_stats
load_and_veryify_samples = load_and_verify_samples # compatibility
def map_class_id_to_color(id_array, n_classes, background_id=0, background_color=(0, 0, 0)):
"""Attribute a color for each classes using a rainbow colormap."""
colors = cm.hsv(np.linspace(0, 1, n_classes + 1))
colors = colors[:, :-1] # drop the last column since it corresponds to alpha channel.
colors = colors[:-1] # drop the last color since it's almost the same as the 1st color.
colors[background_id, :] = background_color
image = np.array([map[id_array] for map in colors.T])
return np.moveaxis(image, 0, 2)
def summarize_band_info(band_info_list: List[io.BandInfo]):
"""Summarize band info."""
sentinel2_count = 0
sentinel1_count = 0
spectral_count = 0
elevation_resolution = None
hs_resolution = None
resolution_dict = {}
for band_info in band_info_list:
if isinstance(band_info, io_ds.SpectralBand):
spectral_count += 1
if isinstance(band_info, io_ds.Sentinel1):
sentinel1_count += 1
if isinstance(band_info, io_ds.Sentinel2):
sentinel2_count += 1
if isinstance(band_info, io_ds.ElevationBand):
elevation_resolution = band_info.spatial_resolution
if isinstance(band_info, io_ds.HyperSpectralBands):
hs_resolution = band_info.spatial_resolution
resolution_dict[band_info.name.lower()] = band_info.spatial_resolution
for name in band_info.alt_names:
resolution_dict[name.lower()] = band_info.spatial_resolution
RGB_resolution: Any = [resolution_dict.get(color, None) for color in ("red", "green", "blue")]
if RGB_resolution[0] == RGB_resolution[1] and RGB_resolution[0] == RGB_resolution[2]:
RGB_resolution = RGB_resolution[0]
return {
"RGB res": RGB_resolution,
"NIR res": resolution_dict.get("nir", None),
"# Sentinel2": sentinel2_count,
"# Sentinel1": sentinel1_count,
"Elevation res": elevation_resolution,
"HS res": hs_resolution,
"# Spectral": spectral_count,
"# Bands": len(band_info_list),
}
# Temporary structure mapping task to sensor type (For display)
SENSORS = {
"forestnet_v1.0": "Landsat",
"eurosat": "Sentinel-2",
"brick_kiln_v1.0": "Sentinel-2",
"so2sat": "Sentinel-2, Sentinel-1",
"pv4ger_classification": "RGB",
"geolifeclef-2022": "RGBN, Elevation",
"bigearthnet": "Sentinel-2",
"pv4ger_segmentation": "RGB",
"nz_cattle_segmentation": "RGB",
"NeonTree_segmentation": "RGB, Hyperspectral (Neon), Elevation (Lidar)",
"smallholder_cashew": "Sentinel-2",
"southAfricaCropType": "Sentinel-2",
"cvpr_chesapeake_landcover": "RGBN",
"seasonet": "Sentinel-2",
}
DISPLAY_NAMES = {
"forestnet_v1.0": "m-forestnet",
"eurosat": "m-eurosat",
"brick_kiln_v1.0": "m-brick-kiln",
"so2sat": "m-so2sat",
"pv4ger_classification": "m-pv4ger",
"geolifeclef-2022": "m-geolifeclef",
"bigearthnet": "m-bigearthnet",
"pv4ger_segmentation": "m-pv4ger-seg",
"nz_cattle_segmentation": "m-nz-cattle",
"NeonTree_segmentation": "m-NeonTree",
"smallholder_cashew": "m-cashew-plant.",
"southAfricaCropType": "m-SA-crop-type",
"cvpr_chesapeake_landcover": "m-chesapeake",
"vit_small_patch16_224": "ViT-S-timm",
"scratch_vit_small_patch16_224": "ViT-S-Rnd",
"vit_tiny_patch16_224": "ViT-T-timm",
"swinv2_tiny_window16_256": "SwinV2-T-timm",
"convnext_base": "ConvNeXt-B-timm",
"resnet18": "ResNet18-timm",
"resnet50": "ResNet50-timm",
"millionaid_resnet50": "ResNet50-MillionAID",
"moco_resnet50": "ResNet50-MoCo-S2",
"moco_resnet50-multi": "ResNet50-MoCo-S2-multi",
"moco_resnet18": "ResNet18-MoCo-S2",
"scratch_resnet18": "ResNet18-Rnd",
"scratch_resnet50": "ResNet50-Rnd",
"resnet18_Unet": "ResNet18-U-Net-timm",
"resnet50_Unet": "ResNet50-U-Net-timm",
"resnet101_Unet": "ResNet101-U-Net-timm",
"resnet18_DeepLabV3": "ResNet18 DeepLabV3-timm",
"resnet50_DeepLabV3": "ResNet50 DeepLabV3-timm",
"resnet101_DeepLabV3": "ResNet101 DeepLabV3-timm",
"moco_vit_small_patch16_224": "ViT-S-MoCo-S2",
"moco_vit_small_patch16_224-multi": "ViT-S-MoCo-S2-multi",
"dino_resnet50": "ResNet50-DINO-S2",
"dino_resnet50-multi": "ResNet50-DINO-S2-multi",
"dino_vit_small_patch16_224": "ViT-S-DINO-S2",
"dino_vit_small_patch16_224-multi": "ViT-S-DINO-S2-multi",
}
def collect_task_info(task, fix_task_shape=False):
"""Collect information for the given task."""
# loss = task.eval_loss
# if isinstance(loss, type):
# loss = loss()
try:
dataset = task.get_dataset(split="train")
partition = dataset.active_partition.partition_dict
n_train = len(partition["train"])
n_valid = len(partition["valid"])
n_test = len(partition["test"])
n_geoinfo = 0
for band in dataset[0].bands:
if band.transform is not None:
n_geoinfo += 1
except Exception as e:
print(e)
n_train, n_valid, n_test = -1, -1, -1
n_classes = getattr(task.label_type, "n_classes", -1)
# shapes = [band.data.shape for band in dataset[0].bands]
largest_shape = dataset[0].largest_shape()
if task.patch_size != largest_shape:
print(f" *WARNING* task.patch_size = {task.patch_size} != dataset[0].largest_shape() = {largest_shape}.")
if fix_task_shape:
dataset_dir = io.CCB_DIR / task.benchmark_name / task.dataset_name
print(f"Overwritint task_info.pkl to {dataset_dir}.")
task.patch_size = largest_shape
task.save(dataset_dir, overwrite=True)
task_dict = {
"Name": task.dataset_name,
"Image Size": " x ".join([str(size) for size in task.patch_size]),
# "Loss": str(loss),
"Label Type": task.label_type.__class__.__name__,
"# Classes": int(n_classes),
"# Time Steps": task.n_time_steps,
"Train Size": n_train,
"Val Size": n_valid,
"Test Size": n_test,
"Sensors": SENSORS.get(task.dataset_name, None),
"n_geoinfo": n_geoinfo,
}
task_dict.update(summarize_band_info(task.bands_info))
return task_dict, dataset
def collect_benchmark_info(benchmark_name):
"""Collect information for eacth task in the benchmark."""
data = []
for task in io.task_iterator(io.CCB_DIR / benchmark_name):
print(task.dataset_name)
task_dict, _ = collect_task_info(task)
data.append(task_dict)
return data
def benchmark_data_frame(benchmark_name):
"""Format benchmark information into panda data frame."""
task_dicts = collect_benchmark_info(benchmark_name)
column_order = (
"Name",
"Image Size",
"Label Type",
"# Classes",
"Train Size",
"Val Size",
"Test Size",
"# Time Steps",
"# Bands",
"# Sentinel2",
"RGB res",
"NIR res",
"HS res",
"Elevation res",
"Sensors",
"n_geoinfo",
)
df = pd.DataFrame.from_records(task_dicts, columns=column_order)
pd.set_option("max_colwidth", 300)
return df
def extract_classification_samples(dataset: io.GeobenchDataset, num_samples=8, rng=np.random):
"""Extract `num_samples` for each class in `dataset`."""
label_map = dataset.task_specs.get_label_map()
n_classes = len(label_map)
n_per_class = np.ceil(num_samples / n_classes)
samples = []
for label, names in label_map.items():
for sample_name in rng.choice(names, size=int(n_per_class), replace=False):
samples.append(dataset.get_sample(sample_name))
return samples[:num_samples]
def replace_str(name):
"""Replace some strings to a more display ready version."""
replace_dict = {
"Land principally occupied by agriculture, with significant areas of natural vegetation": "Ag. and vegetation",
"Non-irrigated arable land": "Non-irrigated land",
"Complex cultivation patterns": "Cultivation patterns",
"Fruit trees and berry plantations": "Fruit trees and berry",
}
for key, val in replace_dict.items():
if name is not None:
name = name.replace(key, val)
return name
def ipyplot_benchmark(benchmark_name, n_samples, img_width=None):
"""Plot samples from every tasks of a given benchmark."""
for task in io.task_iterator(io.CCB_DIR / benchmark_name):
print(f"Task: {task.dataset_name}")
dataset = task.get_dataset(split="train")
if isinstance(task.label_type, io.label.Classification):
samples = extract_classification_samples(dataset, n_samples)
else:
indexes = np.random.choice(len(dataset), n_samples, replace=False)
samples = [dataset[idx] for idx in indexes]
band_groups = [("red", "green", "blue")] + [(band_name,) for band_name in samples[0].band_names]
images, band_names, labels, tabs_order = extract_bands_with_labels(samples, band_groups)
if "label" in tabs_order:
tabs_order.pop(tabs_order.index("label"))
tabs_order.insert(0, "label")
if isinstance(task.label_type, io.SegmentationClasses):
label_names = None
else:
label_names = [replace_str(task.label_type.value_to_str(label)) for label in labels]
for i, image in enumerate(images):
if image.shape[2] > 3:
images[i] = pack_hyperspectral(image, 4, 4)
ipyplot.plot_class_tabs(
images=images,
labels=band_names,
custom_texts=label_names,
img_width=img_width,
max_imgs_per_tab=48,
tabs_order=tabs_order,
)
def plot_benchmark(benchmark_name, n_samples, save_dir: Path = Path.home() / "figures", fig_size=None):
"""Plot samples of the benchmark using matplotlib for compact visualization."""
if save_dir is not None:
save_dir = save_dir / benchmark_name
save_dir.mkdir(parents=True, exist_ok=True)
path_list = []
# cherry picked to avoid images that are not representative
seed_dict = {
"forestnet_v1.0": 0, # 0
"eurosat": 4,
"brick_kiln_v1.0": 4, # 1
"so2sat": 0,
"pv4ger_classification": 0,
"geolifeclef-2022": 0,
"bigearthnet": 2,
}
for task in io.task_iterator(io.CCB_DIR / benchmark_name):
if task.dataset_name.startswith("geolifeclef"):
continue
print(f"Task: {task.dataset_name}")
dataset = task.get_dataset(split="train")
rng = np.random.RandomState(seed_dict.get(task.dataset_name, 0))
if isinstance(task.label_type, io.label.Classification):
samples = extract_classification_samples(dataset, n_samples, rng=rng)
else:
samples = [dataset[i] for i in rng.choice(len(dataset), size=n_samples)]
if isinstance(task.label_type, io.SegmentationClasses):
images, band_names, all_labels, unique_band_names = extract_bands_with_labels(samples, [])
label_names = [None] * len(images)
else:
images, labels = extract_images(samples)
label_names = [replace_str(task.label_type.value_to_str(label)) for label in labels]
plot_images(images, label_names, DISPLAY_NAMES[task.dataset_name], fig_size=fig_size)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
if save_dir is not None:
path = save_dir / f"{task.dataset_name}.png"
plt.savefig(path, bbox_inches="tight")
path_list.append(path)
return path_list
def plot_images(images, names, title, fig_size):
"""Plot images using matplotlib for compact visualization."""
fig, axs = plt.subplots(1, len(images), figsize=fig_size)
for image, name, ax in zip(images, names, axs):
if name is not None:
for sub_name in name.split(" &\n"):
ax.plot(np.nan, np.nan, ".", color="k", label=sub_name)
ax.imshow(image)
ax.axis("off")
# ax.set_title(name)
if name is not None:
ax.legend()
# ax.text(5, 5, name, bbox={"facecolor": "white", "pad": 10})
fig.suptitle(title, fontsize=18, y=1.1)
fig.tight_layout()
fig.subplots_adjust(top=0.85)
| 25,916 | 35.918803 | 119 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/chesapeake_land_cover.py | """Chesapeake Land Cover dataset."""
# Chesapeake Bay Land Cover dataset will be automatically downloaded by
# TorchGeo (https://github.com/microsoft/torchgeo)
from pathlib import Path
import numpy as np
from torchgeo.datamodules import ChesapeakeCVPRDataModule
from tqdm import tqdm
from geobench import io
# Note: both of the following datasets need to be downloaded manually
# into the same directory. It will not download successfully using
# download=True as in other datasets from torchgeo. See Github issue:
# https://github.com/microsoft/torchgeo/issues/452#issuecomment-1059469588
# 1. Primary dataset: https://lila.science/datasets/chesapeakelandcover
# (use azcopy)
# 2. Extension: https://zenodo.org/record/5866525#.YlhpH27MJf0
DATASET_NAME = "cvpr_chesapeake_landcover"
SRC_DATASET_DIR = Path(io.src_datasets_dir, DATASET_NAME) # type: ignore
DATASET_DIR = Path(io.datasets_dir, DATASET_NAME) # type: ignore
# See dataset documentation for more details on below:
# https://torchgeo.readthedocs.io/en/latest/api/datasets.html#torchgeo.datasets.ChesapeakeCVPR
SPATIAL_RESOLUTION = 1 # meters
PATCH_SIZE = 256
# Classification labels
LABELS = (
"water",
"tree-canopy-forest",
"low-vegetation-field",
"barren-land",
"impervious-other",
"impervious-roads",
"no-data",
)
LABEL_BAND = io.SegmentationClasses("label", spatial_resolution=SPATIAL_RESOLUTION, n_classes=len(LABELS))
BAND_INFO_LIST = io.make_rgb_bands(SPATIAL_RESOLUTION)
BAND_INFO_LIST.append(io.SpectralBand("NearInfrared", ("nir",), SPATIAL_RESOLUTION, 0.876))
def make_sample(
image: "np.typing.NDArray[np.int_]",
label: "np.typing.NDArray[np.int_]",
sample_name: str,
task_specs: io.TaskSpecifications,
crs,
) -> io.Sample:
"""Create a sample from images and label.
Args:
images: image array to be contained in sample
label: label to be contained in sample
sample_name: name of sample
task_specs: task specifications of this datasets
Returns:
sample
"""
n_bands, _height, _width = image.shape
if (_height, _width) != (PATCH_SIZE, PATCH_SIZE):
image = image[:, :PATCH_SIZE, :PATCH_SIZE]
n_bands, _height, _width = image.shape
transform = None # TODO can't find the GPS coordinates from torch geo.
bands = []
for band_idx in range(n_bands):
band_data = image[band_idx, :, :]
band_info = task_specs.bands_info[band_idx]
band_data = band_data.astype(np.float32)
band = io.Band(
data=band_data,
band_info=band_info,
spatial_resolution=task_specs.spatial_resolution,
transform=transform,
crs=crs,
convert_to_int16=False,
)
bands.append(band)
band_label = io.Band(
data=label, band_info=LABEL_BAND, spatial_resolution=SPATIAL_RESOLUTION, transform=transform, crs=crs
)
return io.Sample(bands, label=band_label, sample_name=sample_name)
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert Chesapeake Land Cover dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
np.random.seed(0) # Set random seed for reproducibility
partition = io.dataset.Partition()
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(PATCH_SIZE, PATCH_SIZE),
n_time_steps=1,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=LABEL_BAND,
# eval_loss=io.SegmentationAccuracy,
spatial_resolution=SPATIAL_RESOLUTION,
)
task_specs.save(dataset_dir, overwrite=True)
states = ["de", "md", "va", "wv", "pa", "ny"]
dm = ChesapeakeCVPRDataModule(
root_dir=SRC_DATASET_DIR,
train_splits=[f"{state}-train" for state in states],
val_splits=[f"{state}-val" for state in states],
test_splits=[f"{state}-test" for state in states],
patches_per_tile=500,
patch_size=PATCH_SIZE,
batch_size=1,
num_workers=0,
class_set=len(LABELS),
)
dm.prepare_data()
dm.setup()
train_dl = dm.train_dataloader()
val_dl = dm.val_dataloader()
test_dl = dm.test_dataloader()
n_samples = 0
for s_idx, split_dl in enumerate([test_dl, val_dl, train_dl]):
for i, dl_sample in enumerate(tqdm(split_dl)):
sample_name = f"id_{n_samples:06d}"
image = np.array(dl_sample["image"])[0]
label = np.array(dl_sample["mask"])[0]
crs = dl_sample["crs"][0]
# I don't know why but sometime this is needed.
if label.shape != (256, 256):
print(f"Cropping label to 256x256 from shape {label.shape} and image from shape {image.shape}. ")
label = label[:256, :256]
image = image[..., :256, :256]
sample = make_sample(image, label, sample_name, task_specs, crs)
sample.write(dataset_dir)
partition.add(("test", "valid", "train")[s_idx], sample_name)
n_samples += 1
if max_count is not None and n_samples >= max_count:
break
if max_count is not None and n_samples >= max_count:
break
partition.save(dataset_dir, "original", as_default=True)
if __name__ == "__main__":
convert()
| 5,530 | 32.11976 | 113 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/util.py | """Utility functions for dataset converters."""
import pyproj
import rasterio
def center_to_transform(lat_center, lon_center, radius_in_meter, img_shape):
"""Convert center point and radius to rasterio transform, assuming lat long coordinates."""
geod = pyproj.Geod(ellps="clrk66")
lon, lat, baz = geod.fwd([lon_center] * 4, [lat_center] * 4, [0, 90, 180, 270], [radius_in_meter] * 4)
north, east, south, west = lat[0], lon[1], lat[2], lon[3]
transform = rasterio.transform.from_bounds(west, south, east, north, *img_shape)
return transform
| 568 | 39.642857 | 106 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/bigearthnet.py | """Big Earth Net dataset."""
from pathlib import Path
from typing import Callable, Dict, Optional
import numpy as np
import rasterio
import torch
from rasterio.enums import Resampling
from torch import Tensor
from torchgeo.datasets import BigEarthNet # noqa: F811
from tqdm import tqdm
from geobench import io
DATASET_NAME = "bigearthnet"
SRC_DATASET_DIR = Path(io.src_datasets_dir, "bigearthnet") # type: ignore
DATASET_DIR = Path(io.datasets_dir, DATASET_NAME) # type: ignore
class GeoBigEarthNet(BigEarthNet):
"""Wrapper for BigEarthNet to get geo information."""
def __init__(
self,
root: str = "data",
split: str = "train",
bands: str = "all",
num_classes: int = 19,
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
download: bool = False,
checksum: bool = False,
) -> None:
"""Initialize a new BigEarthNet dataset instance.
Args:
root: root directory where dataset can be found
split: train/val/test split to load
bands: load Sentinel-1 bands, Sentinel-2, or both. one of {s1, s2, all}
num_classes: number of classes to load in target. one of {19, 43}
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
download: if True, download dataset and store it in the root directory
checksum: if True, check the MD5 of the downloaded files (may be slow)
"""
super().__init__(root, split, bands, num_classes, transforms, download, checksum)
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and label at that index
"""
image, crs, bounds = self._load_image(index)
label = self._load_target(index)
sample: Dict[str, Tensor] = {"image": image, "label": label, "crs": crs, "bounds": bounds}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _load_image(self, index: int) -> Tensor:
"""Load a single image.
Args:
index: index to return
Returns:
the raster image or target
"""
paths = self._load_paths(index)
images = []
for path in paths:
# Bands are of different spatial resolutions
# Resample to (120, 120)
with rasterio.open(path) as dataset:
array = dataset.read(
indexes=1,
out_shape=self.image_size,
out_dtype="int32",
resampling=Resampling.bilinear,
)
images.append(array)
arrays: "np.typing.NDArray[np.int_]" = np.stack(images, axis=0)
tensor = torch.from_numpy(arrays).float()
return tensor, dataset.crs, dataset.bounds
def make_sample(
images: "np.typing.NDArray[np.int_]", label, sample_name: str, task_specs: io.TaskSpecifications
) -> io.Sample:
"""Create a sample from images and label.
Args:
images: image array to be contained in sample
label: label to be contained in sample
sample_name: name of sample
task_specs: task specifications of this datasets
Returns:
sample
"""
n_bands, _height, _width = images.shape
transform = None # TODO can't find the GPS coordinates from torch geo.
crs = None
bands = []
for band_idx in range(n_bands):
band_data = images[band_idx, :, :]
if task_specs.bands_info is not None:
band_info = task_specs.bands_info[band_idx]
band_data = band_data.astype(np.float32)
band = io.Band(
data=band_data,
band_info=band_info,
spatial_resolution=task_specs.spatial_resolution,
transform=transform,
crs=crs,
convert_to_int16=False,
)
bands.append(band)
return io.Sample(bands, label=label, sample_name=sample_name)
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert BigEarthNet dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
partition = io.dataset.Partition()
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(120, 120),
n_time_steps=1,
bands_info=io.sentinel2_13_bands[0:10] + io.sentinel2_13_bands[-2:],
bands_stats=None, # Will be automatically written with the inspect script
label_type=io.MultiLabelClassification(43, class_names=BigEarthNet.class_sets[43]),
# eval_loss=io.MultilabelAccuracy,
spatial_resolution=10,
)
task_specs.save(dataset_dir, overwrite=True)
n_samples = 0
for split_name in ["train", "val", "test"]:
bigearthnet_dataset = GeoBigEarthNet(
root=SRC_DATASET_DIR,
split=split_name,
bands="s2",
download=False,
transforms=None,
checksum=False,
num_classes=43,
)
for i, tg_sample in enumerate(tqdm(bigearthnet_dataset)):
sample_name = f"id_{n_samples:04d}"
images = np.array(tg_sample["image"])
label = np.array(tg_sample["label"])
sample = make_sample(images, label, sample_name, task_specs)
sample.write(dataset_dir)
partition.add(split_name.replace("val", "valid"), sample_name)
n_samples += 1
if max_count is not None and n_samples >= max_count:
break
if max_count is not None and n_samples >= max_count:
break
partition.save(dataset_dir, "original", as_default=True)
if __name__ == "__main__":
convert()
| 6,034 | 31.101064 | 100 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/geolifeclef.py | """GeoLifeCLEF dataset."""
# pip install kaggle
# set kaggle.json according to https://www.kaggle.com/docs/api
# accept terms and conditions from: https://www.kaggle.com/competitions/geolifeclef-2022-lifeclef-2022-fgvc9/data
# Download dataset using this command
# `kaggle competitions download -c geolifeclef-2022-lifeclef-2022-fgvc9`
#
# Note: This converter uses per default "observations_sample.csv", which can be found and copied from geolifeclef-scripts into the observations folder of geolifeclef-2022
from pathlib import Path
from typing import Any, List
import numpy as np
import pandas as pd
import rasterio
import tifffile
from PIL import Image
from tqdm import tqdm
from geobench import io
DATASET_NAME = "geolifeclef-2022"
SPATIAL_RESOLUTION = 1
PATCH_SIZE = 256
N_LABELS = 100
SRC_DATASET_DIR = io.CCB_DIR / "source" / DATASET_NAME # type: ignore
DATA_PATH = Path(SRC_DATASET_DIR) # type: ignore
DATASET_DIR = io.CCB_DIR / "converted" / DATASET_NAME # type: ignore
# US NAIP, FR aerial based (IGN)
BAND_INFO_LIST: List[Any] = io.make_rgb_bands(spatial_resolution=SPATIAL_RESOLUTION)
NIR_BAND = io.SpectralBand("NIR", ("nir",), SPATIAL_RESOLUTION, wavelength=0.829)
BAND_INFO_LIST.append(NIR_BAND)
BAND_INFO_LIST.append(io.ElevationBand("Altitude", ("elevation",), spatial_resolution=SPATIAL_RESOLUTION))
def make_sample(observation_id, label, lat, lng) -> io.Sample:
"""Create a sample.
Args:
observation_id:
label:
lat:
lng:
Returns:
sample
"""
observation_id = str(observation_id)
region_id = observation_id[0]
if region_id == "1":
region = "fr"
elif region_id == "2":
region = "us"
else:
raise ValueError("Incorrect 'observation_id' {}, can not extract region id from it".format(observation_id))
subfolder1 = observation_id[-2:]
subfolder2 = observation_id[-4:-2]
filename = Path(SRC_DATASET_DIR) / f"patches-{region}" / subfolder1 / subfolder2 / observation_id
transform_center = rasterio.transform.from_origin(lng, lat, SPATIAL_RESOLUTION, SPATIAL_RESOLUTION)
lon_corner, lat_corner = transform_center * [-PATCH_SIZE // 2, -PATCH_SIZE // 2]
transform = rasterio.transform.from_origin(lon_corner, lat_corner, SPATIAL_RESOLUTION, SPATIAL_RESOLUTION)
crs = "EPSG:4326"
date = None # ?
bands = []
rgb_filename = filename.with_name(filename.stem + "_rgb.jpg")
rgb_patch = Image.open(rgb_filename)
rgb_patch = np.asarray(rgb_patch)
for i in range(3):
band_data = io.Band(
data=rgb_patch[:, :, i],
band_info=BAND_INFO_LIST[i],
spatial_resolution=SPATIAL_RESOLUTION,
transform=transform,
crs=crs,
date=date,
meta_info={"latitude": lat, "longitude": lng},
)
bands.append(band_data)
near_ir_filename = filename.with_name(filename.stem + "_near_ir.jpg")
near_ir_patch = Image.open(near_ir_filename)
near_ir_patch = np.asarray(near_ir_patch)
ir_band_data = io.Band(
data=near_ir_patch,
band_info=BAND_INFO_LIST[3],
spatial_resolution=SPATIAL_RESOLUTION,
transform=transform,
crs=crs,
date=date,
meta_info={"latitude": lat, "longitude": lng},
)
bands.append(ir_band_data)
altitude_filename = filename.with_name(filename.stem + "_altitude.tif")
altitude_patch = tifffile.imread(altitude_filename)
altitude_band_data = io.Band(
data=altitude_patch,
band_info=BAND_INFO_LIST[4],
spatial_resolution=SPATIAL_RESOLUTION,
transform=transform,
crs=crs,
date=date,
meta_info={"latitude": lat, "longitude": lng},
)
bands.append(altitude_band_data)
# landcover_filename = filename.with_name(filename.stem + "_landcover.tif")
# landcover_patch = tifffile.imread(landcover_filename)
# landcover_patch = df_suggested_landcover_alignment.values[landcover_patch]
# landcover_band_data = io.Band(
# data=landcover_patch, band_info=BAND_INFO_LIST[5],
# spatial_resolution=SPATIAL_RESOLUTION, transform=transform, crs=crs, date=date, meta_info={'latitude': lat, 'longitude': lng})
# bands.append(landcover_band_data)
return io.Sample(bands, label=label, sample_name=observation_id)
def convert(max_count: int = None, dataset_dir: Path = DATASET_DIR) -> None:
"""Convert GeoLifeCLEF dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
partition = io.Partition()
observations_sample_path = Path(__file__).parent / "geolifeclef_scripts" / "observations_sample.csv"
df = pd.read_csv(observations_sample_path, sep=";", index_col="observation_id")
species_names_path = Path(__file__).parent / "geolifeclef_scripts" / "names.csv"
df_species_names = pd.read_csv(species_names_path, sep=";")
names = list(df_species_names["GBIF_species_name"])
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(PATCH_SIZE, PATCH_SIZE),
n_time_steps=1,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=io.Classification(N_LABELS, class_names=names),
# eval_loss=io.Accuracy,
spatial_resolution=SPATIAL_RESOLUTION,
)
task_specs.save(str(dataset_dir), overwrite=True)
for i, el in enumerate(tqdm(list(df.iterrows()))):
sample_name = f"{el[0]}"
observation_id = el[0]
label = el[1]["species_id"]
latitude = el[1]["latitude"]
longitude = el[1]["longitude"]
split_name = el[1]["subset"]
# due to using 'valid' and not 'val
if split_name == "val":
split_name = "valid"
# print(f'name={sample_name} oid={observation_id} y={label} lat={latitude} lng={longitude} split={split_name}')
sample = make_sample(observation_id, int(label), latitude, longitude)
sample.write(str(dataset_dir))
partition.add(split_name, sample_name)
# temporary for creating small datasets for development purpose
if max_count is not None and i + 1 >= max_count:
break
partition.resplit_iid(split_names=("valid", "test"), ratios=(0.5, 0.5))
partition.save(str(dataset_dir), "original", as_default=True)
if __name__ == "__main__":
convert()
| 6,532 | 34.313514 | 170 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/__init__.py | """GeobenchDataset converter init."""
| 38 | 18.5 | 37 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/neon_tree.py | """Neon Tree dataset."""
# Download using zenodo_get (or manual download: https://zenodo.org/record/4746605#.Yd7mtlhKgeb)
#
# For training tiles:
# $ pip install zenodo_get
# $ zenodo_get 5593238
#
# For Evaluation set:
# $ git clone https://github.com/weecology/NeonTreeEvaluation.git
#
# For running this code:
# $ pip install xmltodict
import csv
import re
from pathlib import Path
from typing import Any, Dict, List, Set, Tuple, Union
from warnings import warn
import numpy as np
import rasterio
import xmltodict
from tqdm import tqdm
from geobench import io
from geobench.benchmark.rasterize_detection import rasterize_box
SEGMENTATION = True
if SEGMENTATION:
DATASET_NAME = "NeonTree_segmentation"
else:
DATASET_NAME = "NeonTree_detection"
SRC_DATASET_DIR = io.CCB_DIR / "source" / "NeonTree" # type: ignore
# ZENODO_DATASET_DIR = Path(io.src_datasets_dir, DATASET_NAME + "_zenodo")
ZENODO_DATASET_DIR = SRC_DATASET_DIR / "_zenodo" # type: ignore
DATASET_DIR = io.CCB_DIR / "converted" / DATASET_NAME
if SEGMENTATION:
label_type = io.SegmentationClasses("label", spatial_resolution=0.1, n_classes=2, class_names=["no tree", "tree"]) # type: ignore
else:
label_type = io.Detection() # type: ignore
def read_xml(xml_path) -> List[Dict[str, int]]:
"""Parse the xml annotation file.
Only the bounding box is extracted all other fields contain constant information except:
* truncated: 30891 False, 152 True
* difficult: 31012 False, 31 True
, which doesn't seem useful.
Args:
xml_path: path to xml file
Returns:
bounding box annotations
"""
with open(xml_path, "r") as fd:
xml = fd.read()
info = xmltodict.parse(xml)
objects = info["annotation"]["object"]
if not isinstance(objects, list):
objects = [objects]
boxes = []
for object in objects:
box = {key: int(val) for key, val in object["bndbox"].items()}
boxes.append(box)
return boxes
def load_tif(tif_path) -> Tuple["np.typing.NDArray[np.int_]", Any, Any, Any]:
"""Load tif file.
Args:
tif_path: path to tif file
Returns:
tif image data array
"""
with rasterio.open(tif_path) as fd:
data: "np.typing.NDArray[np.int_]" = fd.read()
crs: Any = fd.crs
transform: Any = fd.transform
no_data: Any = fd.nodata
return np.moveaxis(data, 0, 2), crs, transform, no_data
def to_csv(info_list: List[Tuple[Union[str, bool], ...]], dst_dir: str) -> None:
"""Save info to csv.
Args:
info_list: info to save
dst_dir: path to directory where to save csv
"""
with open(Path(dst_dir, "info.csv"), "w") as fd:
writer = csv.writer(fd)
writer.writerows(info_list)
def find_missing(dir_list: List[Path], file_set) -> None:
"""Find missing files.
Args:
dir_list: list of paths to directories
file_set: set of current files
"""
missing_list = []
other_files = []
for dir in dir_list:
for file in dir.iterdir():
if file.name.endswith(".tif"):
if file not in file_set:
missing_list.append(file)
else:
other_files.append(file)
print("Unused files from zenodo (training files):")
for file in missing_list:
print(file)
def _extract_tag(file_name: str) -> Union[None, str]:
"""Extract tag with regex.
Args:
file_name: file name
Returns:
tag if found
"""
tags = re.findall("[A-Z]{4}", file_name)
if len(tags) == 0:
tag = None
elif len(tags) == 1:
tag = tags[0]
else:
print("more than one tag:", tags)
tag = tags[0]
return tag
def convert_dataset(src_dataset_dir: str, zenodo_dataset_dir: str, dataset_dir: str, max_count: int) -> None:
"""Convert dataset.
Args:
src_dataset_dir: source dataset directory
zenodo_dataset_dir: directory to zenodo dataset
dataset_dir: directory where to convert dataset to
max_count: maximum number of samples
"""
sample_count = 0
info_list: List[Tuple[Union[str, bool], ...]] = []
file_set: Set[Path] = set()
partition = io.Partition()
path_list = list(Path(src_dataset_dir, "annotations").iterdir())
for label_path in tqdm(path_list):
if label_path.suffix == ".xml":
name = label_path.stem
tag = _extract_tag(name)
boxes = read_xml(label_path)
rgb_path = Path(src_dataset_dir, "evaluation", "RGB", f"{name}.tif")
hs_path = Path(src_dataset_dir, "evaluation", "Hyperspectral", f"{name}_hyperspectral.tif")
chm_path = Path(src_dataset_dir, "evaluation", "CHM", f"{name}_CHM.tif")
rgb_path_z = Path(zenodo_dataset_dir, f"{name}.tif")
hs_path_z = Path(zenodo_dataset_dir, f"{name}_hyperspectral.tif")
chm_path_z = Path(zenodo_dataset_dir, f"{name}_CHM.tif")
all_paths = (rgb_path, hs_path, chm_path, rgb_path_z, hs_path_z, chm_path_z)
exists = [p.exists() for p in all_paths]
file_set.update(all_paths)
# shapes = []
# for p in all_paths:
# if p.exists():
# shapes.append(str(load_tif(p)[0].shape))
# else:
# shapes.append("None")
if np.all(exists[:3]):
split = "test"
sample_list = make_sample(name, str(rgb_path), str(chm_path), str(hs_path), boxes, check_shapes=True)
elif np.all(exists[3:]):
split = "train"
sample_list = make_sample(
name, str(rgb_path_z), str(chm_path_z), str(hs_path_z), boxes, check_shapes=True, slice=True
)
else:
split = "unk"
sample_list = []
info = (str(name), str(tag), str(len(boxes)), str(split)) + tuple(exists)
info_list.append(info)
for sample in sample_list:
partition.add(split, sample.sample_name)
sample.write(dataset_dir)
sample_count += 1
if max_count is not None and sample_count >= max_count:
break
if max_count is not None and sample_count >= max_count:
break
partition.resplit_iid(split_names=("valid", "test"), ratios=(0.5, 0.5))
partition.save(dataset_dir, "original", as_default=True)
to_csv(info_list, str(dataset_dir))
find_missing([Path(zenodo_dataset_dir)], file_set)
BAND_INFO_LIST: List[Any] = io.make_rgb_bands(0.1)
BAND_INFO_LIST.append(io.ElevationBand("Canopy Height Model", alt_names=("lidar", "CHM"), spatial_resolution=0.1))
BAND_INFO_LIST.append(io.HyperSpectralBands("Neon", n_bands=369, spatial_resolution=1))
def extract_boxes(boxes, y_offset, x_offset, area_threshold=10) -> List[Dict[str, int]]:
"""Extract bounding boxes.
Args:
boxes: list of bounding boxes
y_offset: y offset for box
x_offset: x offset for box
area_threshold: minimum area of bounding box to extract
Return:
extracted bounding boxes
"""
new_boxes = []
def clip(box, key, offset):
box[key] = int(np.clip(box[key] + offset, 0, 399))
for box_ in boxes:
box = box_.copy()
clip(box, "xmin", x_offset)
clip(box, "ymin", y_offset)
clip(box, "xmax", x_offset)
clip(box, "ymax", y_offset)
area = (box["xmax"] - box["xmin"]) * (box["ymax"] - box["ymin"])
if area >= area_threshold:
new_boxes.append(box)
return new_boxes
def extract_slices(rgb_data, chm_data, hs_data, boxes, slice_shape):
"""Extract image patch slices.
Args:
rgb_data: RGB imagery data
chm_data: canopy height model data
hs_data: hyperspectral data
boxes: bounding boxes
slice_shape: desired shape of slice
Returns:
sliced data
"""
# TODO slice boxes
def get_patch(data, start_x, start_y, scale=1):
start_x, start_y, size_x, size_y = tuple(
np.round(np.array([start_x, start_y, slice_shape[0], slice_shape[1]]) * scale).astype(int)
)
return data[start_x : start_x + size_x, start_y : start_y + size_y, :]
shape = np.array(rgb_data.shape[:2])
slice_shape = np.asarray(slice_shape)
n_tiles = np.ceil(shape / slice_shape).astype(int)
stride = np.floor((shape - slice_shape) / (n_tiles - 1)).astype(int)
n_x, n_y = tuple(n_tiles)
stride_x, stride_y = tuple(stride)
data_list = []
for j in range(n_y):
for i in range(n_x):
rgb_patch = get_patch(rgb_data, stride_x * i, stride_y * j, scale=1)
chm_patch = get_patch(chm_data, stride_x * i, stride_y * j, scale=0.1)
hs_patch = get_patch(hs_data, stride_x * i, stride_y * j, scale=0.1)
new_boxes = extract_boxes(boxes, -stride_x * i, -stride_y * j)
data_list.append((rgb_patch, chm_patch, hs_patch, new_boxes, f"_{i:02d}_{j:02d}"))
return data_list
def make_sample(
name: str, rgb_path: str, chm_path: str, hs_path: str, boxes, check_shapes: bool = True, slice: bool = False
) -> List[io.Sample]:
"""Create a sample.
Args:
name: name of sample
rgb_path: path to rgb data
chm_path: path to canopy height model data
hs_path: path to hyperspectral data
boxes: set of bounding boxes
check_shapes: whether or not to check shapes before making sample
slice: whether or not to slice sample
Returns:
sample
"""
rgb_data, crs, rgb_transform, rgb_nodata = load_tif(rgb_path)
chm_data, chm_crs, chm_transform, chm_nodata = load_tif(chm_path)
hs_data, _, hs_transform, hs_nodata = load_tif(hs_path)
assert crs == chm_crs
if hs_data.shape[2] == 426:
hs_data = hs_data[:, :, :369] # TODO fix to the right set of bands
# TODO fix Temporary hack for the nodata
chm_data[chm_data == chm_nodata] = 0
hs_data[hs_data == chm_nodata] = 0
if slice:
data_list = extract_slices(rgb_data, chm_data, hs_data, boxes, slice_shape=(400, 400))
else:
data_list = [(rgb_data, chm_data, hs_data, boxes, "")]
sample_list: List[io.Sample] = []
for rgb_data, chm_data, hs_data, new_boxes, suffix in data_list:
for tag, data in (("rgb", rgb_data), ("chm", chm_data), ("hs", hs_data)):
if np.any(data < 0):
print(f"negative values in {tag}.")
if check_shapes:
shapes = (rgb_data.shape, chm_data.shape, hs_data.shape)
target_shapes = ((400, 400, 3), (40, 40, 1), (40, 40, 369))
if shapes != target_shapes:
warn(f"skipping {name}, shapes (rgb, chm, hyperspectral) = {shapes} != {target_shapes}")
return sample_list
bands = []
for i in range(3):
band = io.Band(
data=rgb_data[:, :, i],
band_info=BAND_INFO_LIST[i],
spatial_resolution=0.1,
transform=rgb_transform,
crs=crs,
)
bands.append(band)
bands.append(
io.Band(chm_data, band_info=BAND_INFO_LIST[3], spatial_resolution=1, transform=chm_transform, crs=crs)
)
bands.append(
io.Band(hs_data, band_info=BAND_INFO_LIST[4], spatial_resolution=1, transform=hs_transform, crs=crs)
)
if SEGMENTATION:
label_data = rasterize_box(boxes=new_boxes, img_shape=rgb_data.shape[:2], scale=0.6)
label = io.Band(
data=label_data, band_info=label_type, spatial_resolution=0.1, transform=rgb_transform, crs=crs
)
else:
label = new_boxes
sample_list.append(io.Sample(bands, label=label, sample_name=name + suffix))
return sample_list
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert Neon Tree dataset dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(400, 400),
n_time_steps=1,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=label_type,
# eval_loss=io.Accuracy(), # TODO what loss will we use?
spatial_resolution=0.1,
)
task_specs.save(dataset_dir, overwrite=True)
convert_dataset(str(SRC_DATASET_DIR), str(ZENODO_DATASET_DIR), str(dataset_dir), max_count=max_count)
if __name__ == "__main__":
convert()
| 12,875 | 31.351759 | 134 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/nz_cattle_detection.py | """Nz cattle detection dataset."""
# Downloaded from "https://zenodo.org/record/5908869"
# to authors
# * coordintates are lon-lat (not lat-lon)
# * specify the coordintates are for the center.
# * can we change "Kapiti_Coast" to "Kapiti-Coast"
import datetime
from pathlib import Path
from typing import List, Union
import numpy as np
import rasterio
from PIL import Image
from tqdm import tqdm
from geobench import io
from geobench.benchmark.rasterize_detection import point_to_boxes, rasterize_box
SEGMENTATION = True
if SEGMENTATION:
DATASET_NAME = "nz_cattle_segmentation"
else:
DATASET_NAME = "nz_cattle_detection"
SRC_DATASET_DIR = Path(io.src_datasets_dir, "nz_cattle") # type: ignore
DATASET_DIR = Path(io.datasets_dir, DATASET_NAME) # type: ignore
BAND_INFO_LIST = io.make_rgb_bands(0.1)
if SEGMENTATION:
label_type = io.SegmentationClasses( # type: ignore
"label", spatial_resolution=0.1, n_classes=2, class_names=["no cattle", "cattle"]
)
else:
label_type = io.PointAnnotation() # type: ignore
def parse_file_name(name):
"""Parse file name and extract information.
Args:
name: filename
Returns:
location, date, transform and crs information
"""
name = name.replace("Kapiti_Coast", "Kapiti-Coast")
_index, location, year, lon_lat = name.split("_")[:4]
lon_center, lat_center = [float(val) for val in lon_lat.split(",")]
year = int(year[1:-1].split("-")[-1])
date = datetime.date(year=year, month=1, day=1)
transform_center = rasterio.transform.from_origin(lon_center, lat_center, 0.1, 0.1)
lon_corner, lat_corner = transform_center * [-250, -250]
transform = rasterio.transform.from_origin(lon_corner, lat_corner, 0.1, 0.1)
crs = rasterio.crs.CRS.from_epsg(4326)
return location, date, transform, crs
def load_sample(img_path: Path) -> io.Sample:
"""Create sample from a given image path.
Args:
img_path: path to image
Return:
created sample
"""
label_path = img_path.with_suffix(".png.mask.0.txt")
with Image.open(img_path) as im:
data = np.array(im)[:, :, :3]
location, date, transform, crs = parse_file_name(img_path.stem)
coords = []
with open(label_path, "r") as fd:
for line in fd:
coord = [int(val) for val in line.split(",")]
coords.append(coord)
bands = []
for i in range(3):
band_data = io.Band(
data=data[:, :, i],
band_info=BAND_INFO_LIST[i],
spatial_resolution=0.1,
transform=transform,
crs=crs,
date=date,
meta_info={"location": location},
)
bands.append(band_data)
if SEGMENTATION:
label_data = rasterize_box(boxes=point_to_boxes(points=coords, radius=6), img_shape=data.shape[:2])
label = io.Band(
data=label_data,
band_info=label_type,
spatial_resolution=0.1,
transform=transform,
crs=crs,
date=date,
meta_info={"location": location},
)
# else:
# label = coords
return io.Sample(bands, label=label, sample_name=img_path.stem)
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert Nz Cattle detection dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(500, 500),
n_time_steps=1,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=label_type,
# eval_loss=io.SegmentationAccuracy(), # TODO decide on the loss
spatial_resolution=0.1,
)
task_specs.save(dataset_dir, overwrite=True)
partition = io.Partition()
path_list = list(Path(SRC_DATASET_DIR, "cow_images").iterdir())
sample_count = 0
partition = io.Partition() # default partition: everything in train
for file in tqdm(path_list):
if file.suffix == ".png":
sample = load_sample(img_path=file)
sample.write(dataset_dir)
partition.add("train", sample.sample_name)
sample_count += 1
if max_count is not None and sample_count >= max_count:
break
partition.resplit_iid(split_names=("train", "valid", "test"), ratios=(0.8, 0.1, 0.1))
partition.save(dataset_dir, "iid", as_default=True)
if __name__ == "__main__":
convert()
| 4,644 | 28.398734 | 107 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/cv4a_kenya_crop_type.py | """CV4A Kenya Crop Type dataset."""
import datetime
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import rasterio
import torch
from torch import Tensor
from torchgeo.datasets import CV4AKenyaCropType
from tqdm import tqdm
from geobench import io
# Deprecated:
# we need to re-write this scripts so that it can properly splits into train / test
# and extract georefence. torchgeo is not an option.
# Notes
# * torchgeo doesn't seem to provide coordinates in general as a general interface
# * should we use the radiant mlhub api_key as a constant?
DATASET_NAME = "CV4AKenyaCropType"
SRC_DATASET_DIR = io.src_datasets_dir # type: ignore
DATASET_DIR = Path(io.datasets_dir, DATASET_NAME) # type: ignore
DATES = [
datetime.datetime.strptime(date, "%Y%m%d").date()
for date in [
"20190606",
"20190701",
"20190706",
"20190711",
"20190721",
"20190805",
"20190815",
"20190825",
"20190909",
"20190919",
"20190924",
"20191004",
"20191103",
]
]
max_band_value = {
"06 - Vegetation Red Edge": 1.4976,
"02 - Blue": 1.7024,
"03 - Green": 1.6,
"12 - SWIR": 1.2458,
"05 - Vegetation Red Edge": 1.5987,
"04 - Red": 1.5144,
"01 - Coastal aerosol": 1.7096,
"07 - Vegetation Red Edge": 1.4803,
"11 - SWIR": 1.0489,
"09 - Water vapour": 1.6481,
"08A - Vegetation Red Edge": 1.4244,
"08 - NIR": 1.4592,
}
BAND_INFO_LIST: List[Any] = io.sentinel2_13_bands[:]
dropped_band = BAND_INFO_LIST.pop(10)
assert dropped_band.name == "10 - SWIR - Cirrus"
BAND_INFO_LIST.append(io.CloudProbability(alt_names=("CPL", "CLD")))
LABEL_BAND = io.SegmentationClasses("label", spatial_resolution=10, n_classes=8)
class GeoCV4AKenyaCropType(CV4AKenyaCropType):
"""Geo wrapper around crop type dataset."""
band_names = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B8A",
"B09",
"B11",
"B12",
"CLD",
)
rgb_bands = ["B04", "B03", "B02"]
def __init__(
self,
root: str = "data",
chip_size: int = 256,
stride: int = 128,
bands: Tuple[str, ...] = band_names,
download: bool = False,
api_key: Optional[str] = None,
checksum: bool = False,
verbose: bool = False,
) -> None:
"""Initialize a new CV4A Kenya Crop Type Dataset instance.
Args:
root: root directory where dataset can be found
chip_size: size of chips
stride: spacing between chips, if less than chip_size, then there
will be overlap between chips
bands: the subset of bands to load
download: if True, download dataset and store it in the root directory
api_key: a RadiantEarth MLHub API key to use for downloading the dataset
checksum: if True, check the MD5 of the downloaded files (may be slow)
verbose: if True, print messages when new tiles are loaded
Raises:
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
super().__init__(root, chip_size, stride, bands, None, download, api_key, checksum, verbose)
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data, labels, field ids, and metadata at that index
"""
tile_index, y, x = self.chips_metadata[index]
tile_name = self.tile_names[tile_index]
img, transform, crs, bounds = self._load_all_imagery(self.bands)
labels, field_ids = self._load_label_tile(tile_name)
img = img[:, :, y : y + self.chip_size, x : x + self.chip_size]
labels = labels[y : y + self.chip_size, x : x + self.chip_size]
field_ids = field_ids[y : y + self.chip_size, x : x + self.chip_size]
sample = {
"image": img,
"mask": labels,
"field_ids": field_ids,
"tile_index": torch.tensor(tile_index),
"x": torch.tensor(x),
"y": torch.tensor(y),
"transform": transform,
"crs": crs,
"bounds": bounds,
}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _load_all_image_tiles(self, tile_name: str, bands: Tuple[str, ...] = band_names) -> Tensor:
"""Load all the imagery (across time) for a single _tile_.
Optionally allows for subsetting of the bands that are loaded.
Args:
tile_name: name of tile to load
bands: tuple of bands to load
Returns
imagery of shape (13, number of bands, 3035, 2016) where 13 is the number of
points in time, 3035 is the tile height, and 2016 is the tile width
Raises:
AssertionError: if ``tile_name`` is invalid
"""
assert tile_name in self.tile_names
if self.verbose:
print(f"Loading all imagery for {tile_name}")
img = torch.zeros(
len(self.dates),
len(bands),
self.tile_height,
self.tile_width,
dtype=torch.float32,
)
for date_index, date in enumerate(self.dates):
single_scene, transform, crs, bounds = self._load_single_scene(date, self.bands)
img[date_index] = single_scene
return img, transform, crs, bounds
def _load_single_image_tile(self, tile_name: str, date: str, bands: Tuple[str, ...]) -> Tensor:
"""Load the imagery for a single tile for a single date.
Optionally allows for subsetting of the bands that are loaded.
Args:
tile_name: name of tile to load
date: date of tile to load
bands: bands to load
Returns:
array containing a single image tile
Raises:
AssertionError: if ``tile_name`` or ``date`` is invalid
"""
assert tile_name in self.tile_names
assert date in self.dates
if self.verbose:
print(f"Loading imagery for {tile_name} at {date}")
img = torch.zeros(len(bands), self.tile_height, self.tile_width, dtype=torch.float32)
for band_index, band_name in enumerate(self.bands):
filepath = os.path.join(
self.root,
"ref_african_crops_kenya_02_source",
f"{tile_name}_{date}",
f"{band_name}.tif",
)
with rasterio.open(filepath) as src:
transform = src.transform # same transform for every bands
crs = src.crs
array = src.read().astype(np.float32)
img[band_index] = torch.from_numpy(array)
roi = src.bounds
return img, crs, transform, roi
def make_sample(
images: "np.typing.NDArray[np.int_]", mask: "np.typing.NDArray[np.int_]", sample_name: str
) -> io.Sample:
"""Create a sample from images and label.
Args:
images: image array to be contained in sample
mask: label to be contained in sample
sample_name: name of sample
Returns:
sample
"""
n_dates, n_bands, _height, _width = images.shape
transform = None # TODO can't find the GPS coordinates from torch geo.
crs = None
bands = []
for date_idx in range(n_dates):
for band_idx in range(n_bands):
band_data = images[date_idx, band_idx, :, :]
band_info = BAND_INFO_LIST[band_idx]
if band_info.name in max_band_value:
band_data = band_data / max_band_value[band_info.name] * 10000 # type: ignore
band = io.Band(
data=band_data,
band_info=band_info,
date=DATES[date_idx],
spatial_resolution=10,
transform=transform,
crs=crs,
# convert_to_int16=False,
)
bands.append(band)
label = io.Band(data=mask, band_info=LABEL_BAND, spatial_resolution=10, transform=transform, crs=crs)
return io.Sample(bands, label=label, sample_name=sample_name)
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert CV4A Kenya crop type dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
cv4a_dataset = GeoCV4AKenyaCropType(
root=SRC_DATASET_DIR,
download=False,
checksum=True,
api_key="e46c4efbca1274862accc0f1616762c9c72791e00523980eea3db3c48acd106c",
chip_size=128,
verbose=True,
)
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(128, 128),
n_time_steps=13,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=LABEL_BAND,
eval_loss=io.SegmentationAccuracy, # TODO probably not the final loss eval loss. To be discussed.
spatial_resolution=10,
)
task_specs.save(dataset_dir)
# trying to understand train / test split.
set_map = {}
trn, tst = cv4a_dataset.get_splits()
for id in trn:
set_map[id] = 0
for id in tst:
set_map[id] = 1
set_map[0] = 0
partition = io.Partition()
j = 0
for i, tg_sample in enumerate(tqdm(cv4a_dataset)):
if np.all(np.array(tg_sample["field_ids"]) == 0):
continue
tile_id, x_start, y_start = cv4a_dataset.chips_metadata[i]
sample_name = f"tile={tile_id}_x={x_start:04d}_y={y_start:04d}"
# uids = np.unique(tg_sample["field_ids"])
images = np.array(tg_sample["image"])
mask = np.array(tg_sample["mask"])
# set_count = np.bincount([set_map[id] for id in uids])
sample = make_sample(images, mask, sample_name)
sample.write(dataset_dir)
partition.add("train", sample_name) # by default everything goes in train
j += 1
if max_count is not None and j >= max_count:
break
partition.save(dataset_dir, "nopartition", as_default=True)
if __name__ == "__main__":
convert(10)
| 10,523 | 29.416185 | 106 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/seasonet.py | """SeasonNet dataset."""
import datetime
from pathlib import Path
from typing import Any, List
import numpy as np
import pandas as pd
import rasterio
from tqdm import tqdm
from geobench import io
# change dimensions to be H, W, C
# Paths
DATASET_NAME = "seasonet"
SRC_DATASET_DIR = Path(io.src_datasets_dir, DATASET_NAME) # type: ignore
DATASET_DIR = Path(io.datasets_dir, DATASET_NAME) # type: ignore
LABELS = [
"background",
"Continuous urban fabric",
"Discontinuous urban fabric",
"Industrial or commercial units",
"Road and rail networks and associated land",
"Port areas",
"Airports",
"Mineral extraction sites",
"Dump sites",
"Construction sites",
"Green urban areas",
"Sport and leisure facilities",
"Non-irrigated arable land",
"Vineyards",
"Fruit trees and berry plantations",
"Pastures",
"Broad-leaved forest",
"Coniferous forest",
"Mixed forest",
"Natural grasslands",
"Moors and heathland",
"Transitional woodland/shrub",
"Beaches, dunes, sands",
"Bare rock",
"Sparsely vegetated areas",
"Inland marshes",
"Peat bogs",
"Salt marshes",
"Intertidal flats",
"Water courses",
"Water bodies",
"Coastal lagoons",
"Estuaries",
"Sea and ocean",
]
BAND_INFO_LIST: List[Any] = io.sentinel2_13_bands[:]
dropped_band = BAND_INFO_LIST.pop(10)
assert dropped_band.name == "10 - SWIR - Cirrus"
SPATIAL_RESOLUTION = 10
LABEL_BAND = io.SegmentationClasses(
"label", spatial_resolution=SPATIAL_RESOLUTION, n_classes=len(LABELS), class_names=LABELS
)
DATASET_DIR = Path(io.CCB_DIR, "converted", DATASET_NAME)
HEIGHT = 120
WIDTH = 120
# there are multiple seasons and snow layer available
# specify from which you want to sample
SEASONS = ["Fall"]
train_split = "/mnt/data/cc_benchmark/source/seasonet/splits/train.csv"
val_split = "/mnt/data/cc_benchmark/source/seasonet/splits/val.csv"
test_split = "/mnt/data/cc_benchmark/source/seasonet/splits/test.csv"
split_paths = list((SRC_DATASET_DIR / "splits").glob("*.csv"))
SPLIT_DICT = {}
for path in split_paths:
split = str(path).split("/")[-1].split(".")[0]
if split == "val":
split = "valid"
df = pd.read_csv(path, header=None)
for id in df.loc[:, 0].tolist():
SPLIT_DICT[id] = split
def load_bands(path, band_info_list):
"""Load bands from tif files.
Args:
path: path to tif file
band_info_list: corresponding band_info to order of bands
Returns:
dictionary mapping band info to band
"""
with rasterio.open(path) as src:
data = src.read()
band_dict = {}
for i, band_info in enumerate(band_info_list):
band = io.Band(
data=data[i, :, :],
band_info=band_info,
date=None,
spatial_resolution=src.res[0],
transform=src.transform,
crs=src.crs,
convert_to_int16=False,
)
band_dict[band_info] = band
return band_dict
def load_label_as_band(label_path):
"""Load the label as band.
Args:
label_path: path to label tif file
Returns:
io.Band of label
"""
with rasterio.open(label_path) as src:
label = io.Band(
data=src.read().transpose((1, 2, 0)),
band_info=LABEL_BAND,
spatial_resolution=src.res[0],
transform=src.transform,
crs=src.crs,
)
return label
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert SeasoNet dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(HEIGHT, WIDTH),
n_time_steps=1,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=LABEL_BAND,
# eval_loss=io.SegmentationAccuracy, # TODO probably not the final loss eval loss. To be discussed.
# either 50cm or 40cm, Airbus Pleiades 50cm, https://radiantearth.blob.core.windows.net/mlhub/technoserve-cashew-benin/Documentation.pdf
spatial_resolution=SPATIAL_RESOLUTION,
)
partition = io.Partition()
task_specs.save(dataset_dir, overwrite=True)
# load the metafile from which to load samples
meta_df = pd.read_csv(SRC_DATASET_DIR / "meta.csv")
meta_df = meta_df[meta_df["Season"].isin(SEASONS)]
# only use one grid
meta_df = meta_df[meta_df["Grid"] == 1]
# only consider cloud free and non snow-images
meta_df = meta_df[(meta_df["Clouds"] == 0.0) & (meta_df["Snow"] == 0.0)]
if max_count is None:
max_count = len(meta_df)
# sample max_count number of samples from df
meta_df = meta_df.sample(n=max_count, random_state=1).reset_index(drop=True)
# iterate over df to load samples
for idx, row in tqdm(list(meta_df.iterrows())):
sample_dir = SRC_DATASET_DIR / row.Path
id = sample_dir.name
band_dict = {}
rgb_band_info = [BAND_INFO_LIST[3], BAND_INFO_LIST[2], BAND_INFO_LIST[1]]
band_dict.update(load_bands(sample_dir / (id + "_10m_RGB.tif"), rgb_band_info))
band_dict.update(load_bands(sample_dir / (id + "_10m_IR.tif"), [BAND_INFO_LIST[7]]))
vegetation_swir_info = [
BAND_INFO_LIST[4],
BAND_INFO_LIST[5],
BAND_INFO_LIST[6],
BAND_INFO_LIST[8],
BAND_INFO_LIST[10],
BAND_INFO_LIST[11],
]
band_dict.update(load_bands(sample_dir / (id + "_20m.tif"), vegetation_swir_info))
water_info = [
BAND_INFO_LIST[0],
BAND_INFO_LIST[9],
]
band_dict.update(load_bands(sample_dir / (id + "_60m.tif"), water_info))
ordered_bands = [band_dict[band_info] for band_info in BAND_INFO_LIST]
label = load_label_as_band(sample_dir / (id + "_labels.tif"))
sample = io.Sample(ordered_bands, label=label, sample_name=id)
sample.write(dataset_dir)
partition.add(SPLIT_DICT[int(id.split("_")[-1])], id)
partition.save(dataset_dir, "default")
if __name__ == "__main__":
convert()
| 6,373 | 27.328889 | 144 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/benin_smallholder_cashews.py | """Benin Smallholder Cashew dataset."""
# Smallholder Cashew GeobenchDataset will be downloaded by torchgeo
#
# 1) This requires Radiant MLHub package and API token
# pip install radiant_mlhub
# 2) Sign up for a MLHub account here: https://mlhub.earth/
# 3) Type this in your terminal:
# mlhub configure
# and enter your API key.
#
# More info on the dataset: https://mlhub.earth/10.34911/rdnt.hfv20i
import datetime
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import rasterio
import torch
from rasterio.crs import CRS
from torch import Tensor
from torchgeo.datasets import BeninSmallHolderCashews
from tqdm import tqdm
from geobench import io
# Classification labels
LABELS = (
"no data",
"well-managed plantation",
"poorly-managed plantation",
"non-plantation",
"residential",
"background",
"uncertain",
)
DATES: List[datetime.date] = [
datetime.datetime.strptime(date, "%Y-%m-%d").date()
for date in [
"2019-11-05",
"2019-11-10",
"2019-11-15",
"2019-11-20",
"2019-11-30",
"2019-12-05",
"2019-12-10",
"2019-12-15",
"2019-12-20",
"2019-12-25",
"2019-12-30",
"2020-01-04",
"2020-01-09",
"2020-01-14",
"2020-01-19",
"2020-01-24",
"2020-01-29",
"2020-02-08",
"2020-02-13",
"2020-02-18",
"2020-02-23",
"2020-02-28",
"2020-03-04",
"2020-03-09",
"2020-03-14",
"2020-03-19",
"2020-03-24",
"2020-03-29",
"2020-04-03",
"2020-04-08",
"2020-04-13",
"2020-04-18",
"2020-04-23",
"2020-04-28",
"2020-05-03",
"2020-05-08",
"2020-05-13",
"2020-05-18",
"2020-05-23",
"2020-05-28",
"2020-06-02",
"2020-06-07",
"2020-06-12",
"2020-06-17",
"2020-06-22",
"2020-06-27",
"2020-07-02",
"2020-07-07",
"2020-07-12",
"2020-07-17",
"2020-07-22",
"2020-07-27",
"2020-08-01",
"2020-08-06",
"2020-08-11",
"2020-08-16",
"2020-08-21",
"2020-08-26",
"2020-08-31",
"2020-09-05",
"2020-09-10",
"2020-09-15",
"2020-09-20",
"2020-09-25",
"2020-09-30",
"2020-10-10",
"2020-10-15",
"2020-10-20",
"2020-10-25",
"2020-10-30",
]
]
noclouds_25 = [
2,
3,
4,
5,
6,
7,
8,
9,
10,
12,
13,
15,
16,
17,
19,
20,
22,
23,
27,
28,
30,
33,
37,
38,
69,
] # 25 dates with the least clouds
BAND_INFO_LIST: List[Any] = io.sentinel2_13_bands[:]
dropped_band = BAND_INFO_LIST.pop(10)
assert dropped_band.name == "10 - SWIR - Cirrus"
BAND_INFO_LIST.append(io.CloudProbability(alt_names=("CPL", "CLD"), spatial_resolution=10))
SPATIAL_RESOLUTION = 0.5 # meters, to be confirmed
N_TIMESTEPS = 70
LABEL_BAND = io.SegmentationClasses("label", spatial_resolution=SPATIAL_RESOLUTION, n_classes=len(LABELS))
GROUP_BY_TIMESTEP = False
NOCLOUDS = True
# Paths
DATASET_NAME = "smallholder_cashew"
SRC_DATASET_DIR = Path(io.src_datasets_dir, DATASET_NAME) # type: ignore
DATASET_DIR = Path(io.datasets_dir, DATASET_NAME) # type: ignore
class GeoBeninCashew(BeninSmallHolderCashews):
"""Geo Wrapper to extract geo information from dataste."""
all_bands = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B8A",
"B09",
"B11",
"B12",
"CLD",
)
rgb_bands = ("B04", "B03", "B02")
def __init__(
self,
root: str = "data",
chip_size: int = 256,
stride: int = 128,
bands: Tuple[str, ...] = all_bands,
download: bool = False,
api_key: Optional[str] = None,
checksum: bool = False,
verbose: bool = False,
) -> None:
"""Initialize a new Benin Smallholder Cashew Plantations Dataset instance.
Args:
root: root directory where dataset can be found
chip_size: size of chips
stride: spacing between chips, if less than chip_size, then there
will be overlap between chips
bands: the subset of bands to load
download: if True, download dataset and store it in the root directory
api_key: a RadiantEarth MLHub API key to use for downloading the dataset
checksum: if True, check the MD5 of the downloaded files (may be slow)
verbose: if True, print messages when new tiles are loaded
Raises:
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
super().__init__(root, chip_size, stride, bands, None, download, api_key, checksum, verbose)
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
a dict containing image, mask, transform, crs, and metadata at index.
"""
y, x = self.chips_metadata[index]
img, transform, crs, bounds = self._load_all_imagery(self.bands)
labels = self._load_mask(transform)
img = img[:, :, y : y + self.chip_size, x : x + self.chip_size]
labels = labels[y : y + self.chip_size, x : x + self.chip_size]
sample = {
"image": img,
"mask": labels,
"x": torch.tensor(x),
"y": torch.tensor(y),
"transform": transform,
"crs": crs,
"bounds": bounds,
}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _load_single_scene(self, date: str, bands: Tuple[str, ...]) -> Tuple[Tensor, rasterio.Affine, CRS]:
"""Load the imagery for a single date.
Optionally allows for subsetting of the bands that are loaded.
Args:
date: date of the imagery to load
bands: bands to load
Returns:
Tensor containing a single image tile, rasterio affine transform,
mapping pixel coordinates to geo coordinates, and coordinate
reference system of transform.
Raises:
AssertionError: if ``date`` is invalid
"""
assert date in self.dates
if self.verbose:
print(f"Loading imagery at {date}")
img = torch.zeros(len(bands), self.tile_height, self.tile_width, dtype=torch.float32)
for band_index, band_name in enumerate(self.bands):
filepath = os.path.join(
self.root,
"ts_cashew_benin_source",
f"ts_cashew_benin_source_00_{date}",
f"{band_name}.tif",
)
with rasterio.open(filepath) as src:
transform = src.transform # same transform for every bands
crs = src.crs
array = src.read().astype(np.float32)
img[band_index] = torch.from_numpy(array)
roi = src.bounds
return img, transform, crs, roi
def _load_all_imagery(self, bands: Tuple[str, ...] = all_bands) -> Tuple[Tensor, rasterio.Affine, CRS]:
"""Load all the imagery (across time) for the dataset.
Optionally allows for subsetting of the bands that are loaded.
Args:
bands: tuple of bands to load
Returns:
imagery of shape (70, number of bands, 1186, 1122) where 70 is the number
of points in time, 1186 is the tile height, and 1122 is the tile width
rasterio affine transform, mapping pixel coordinates to geo coordinates
coordinate reference system of transform
"""
if self.verbose:
print("Loading all imagery")
img = torch.zeros(
len(self.dates),
len(bands),
self.tile_height,
self.tile_width,
dtype=torch.float32,
)
for date_index, date in enumerate(self.dates):
single_scene, transform, crs, bounds = self._load_single_scene(date, self.bands)
img[date_index] = single_scene
return img, transform, crs, bounds
def get_sample_name(total_samples) -> str:
"""Return the name of the samples.
Args:
total_sample:
Returns:
sample name
"""
return f"sample_{total_samples}"
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert Benin Smallholder Cashews dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
print("Loading dataset from torchgeo")
cashew = GeoBeninCashew(root=SRC_DATASET_DIR, download=True, checksum=True)
if GROUP_BY_TIMESTEP:
n_time_steps = len(noclouds_25) if NOCLOUDS else N_TIMESTEPS
else:
n_time_steps = 1
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(256, 256),
n_time_steps=n_time_steps,
bands_info=BAND_INFO_LIST,
bands_stats=None, # Will be automatically written with the inspect script
label_type=LABEL_BAND,
# eval_loss=io.SegmentationAccuracy, # TODO probably not the final loss eval loss. To be discussed.
# either 50cm or 40cm, Airbus Pleiades 50cm, https://radiantearth.blob.core.windows.net/mlhub/technoserve-cashew-benin/Documentation.pdf
spatial_resolution=SPATIAL_RESOLUTION,
)
partition = io.Partition()
task_specs.save(dataset_dir, overwrite=True)
print("Saving timesteps as separate bands")
total_samples = 0
for tg_sample in tqdm(cashew):
images = tg_sample["image"].numpy()
mask = tg_sample["mask"].numpy()
n_timesteps, n_bands, _height, _width = images.shape
label = io.Band(
data=mask, band_info=LABEL_BAND, spatial_resolution=SPATIAL_RESOLUTION, transform=None, crs=None
)
split = np.random.choice(("train", "valid", "test"), p=(0.8, 0.1, 0.1))
grouped_bands = []
for date_idx in range(n_timesteps):
current_bands = []
if NOCLOUDS and date_idx not in noclouds_25:
continue
for band_idx in range(n_bands):
band_data = images[date_idx, band_idx, :, :]
band_info = BAND_INFO_LIST[band_idx]
band = io.Band(
data=band_data,
band_info=band_info,
date=DATES[date_idx],
spatial_resolution=SPATIAL_RESOLUTION,
transform=tg_sample["transform"], # TODO can't find the GPS coordinates from torch geo.
crs=tg_sample["crs"],
convert_to_int16=False,
)
current_bands.append(band)
grouped_bands.append(band)
if not GROUP_BY_TIMESTEP:
sample = io.Sample(current_bands, label=label, sample_name=get_sample_name(total_samples))
sample.write(dataset_dir)
partition.add(split, get_sample_name(total_samples))
total_samples += 1
if max_count is not None and total_samples >= max_count:
break
if GROUP_BY_TIMESTEP:
sample = io.Sample(grouped_bands, label=label, sample_name=get_sample_name(total_samples))
sample.write(dataset_dir)
partition.add(split, get_sample_name(total_samples))
total_samples += 1
if max_count is not None and total_samples >= max_count:
break
# partition.resplit_iid(split_names=("train", "valid", "test"), ratios=(0.8, 0.1, 0.1))
partition.save(dataset_dir, "default")
print(f"Done. GROUP_BY_TIMESTEP={GROUP_BY_TIMESTEP}, total_samples={total_samples}")
if __name__ == "__main__":
convert()
| 12,245 | 28.651332 | 144 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/forestnet.py | """Foresnet dataset."""
# Download the dataset using this link: http://download.cs.stanford.edu/deep/ForestNetDataset.zip
# (Available at this webpage: https://stanfordmlgroup.github.io/projects/forestnet/)
# Unzip the directory, then either place contents in source/forestnet_v1.0
# or create a symlink.
import datetime
import pickle
from collections import defaultdict
from pathlib import Path
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw
from tqdm import tqdm
from geobench import io
from geobench.benchmark.dataset_converters import util
DATASET_NAME = "forestnet_v1.0"
SRC_DATASET_DIR = io.CCB_DIR / "source" / DATASET_NAME # type: ignore
DATASET_DIR = io.CCB_DIR / "converted" / DATASET_NAME # type: ignore
SPATIAL_RESOLUTION = 15
PATCH_SIZE = 332
LABELS = [
"Oil palm plantation",
"Timber plantation",
"Other large-scale plantations",
"Grassland shrubland",
"Small-scale agriculture",
"Small-scale mixed plantation",
"Small-scale oil palm plantation",
"Mining",
"Fish pond",
"Logging",
"Secondary forest",
"Other",
]
def get_band_data(img, channel_index, band_idx, date, resolution, transform, crs, meta_info) -> io.Band:
"""Create a Band.
Args:
img:
channel_index:
band_idx:
date:
resolution:
transform:
crs:
meta_info:
Returns:
Band
"""
band_data = io.Band(
data=img[:, :, channel_index],
band_info=io.landsat8_9_bands[band_idx],
date=date,
spatial_resolution=resolution,
transform=transform,
crs=crs,
meta_info=meta_info,
)
return band_data
def draw_img_roi(draw, shape, label: int) -> None:
"""Draw image region of interest.
Args:
draw:
shape:
label:
"""
shape_type = shape.geom_type
if shape_type == "Polygon":
coords = np.array(shape.exterior.coords)
draw.polygon([tuple(coord) for coord in coords], outline=label, fill=label)
else:
for poly in shape.geoms:
coords = np.array(poly.exterior.coords)
draw.polygon([tuple(coord) for coord in coords], outline=label, fill=label)
def overlay_mask(img, mask):
"""Overlay mask on top of image.
Args:
img:
mask:
"""
faded_img = img.copy()
faded_img.putalpha(192)
overlaid_img = Image.new("RGB", img.size, (255, 255, 255))
overlaid_img.paste(faded_img, mask=faded_img.split()[3])
img = np.array(img)
overlaid_img = np.array(overlaid_img)
overlaid_img[np.where(mask)] = img[np.where(mask)]
return overlaid_img
def load_sample(example_dir: Path, label: str, year: int) -> io.Sample:
"""Create a sample.
Args:
example_dir: directory to raw data
label: label
year: year
Returns:
sample
"""
# Get lat center and lon center from img path
lat_center, lon_center = map(float, example_dir.name.split("_"))
redius_in_meter = (PATCH_SIZE / 2) * SPATIAL_RESOLUTION
crs = "EPSG:4326"
transform = util.center_to_transform(lat_center, lon_center, redius_in_meter, (PATCH_SIZE, PATCH_SIZE))
# Load the forest loss region to mask the image
forest_loss_region = example_dir / "forest_loss_region.pkl"
with forest_loss_region.open("rb") as f:
forest_loss_polygon = pickle.load(f)
mask = Image.new("L", (PATCH_SIZE, PATCH_SIZE), 0)
draw_img_roi(ImageDraw.Draw(mask), forest_loss_polygon, 1)
mask = np.tile(np.array(mask), (3, 1, 1)).transpose((1, 2, 0))
# Load the visible + infrared images and add them as bands
images_dir = example_dir / "images"
visible_dir = images_dir / "visible"
bands = []
seen_years = set()
for visible_image_path in visible_dir.iterdir():
infrared_path = Path(str(visible_image_path).replace("visible", "infrared").replace("png", "npy"))
is_composite = visible_image_path.stem == "composite"
if not is_composite:
str_date, clouds = visible_image_path.stem.split("_cloud_")
date = datetime.datetime.strptime(str_date, "%Y_%m_%d").date()
n_cloud_px = int(clouds)
img_year = date.year
if img_year in seen_years:
# Skip images from the same year
# To get one image per year
continue
seen_years.add(img_year)
visible_img = Image.open(visible_image_path).convert("RGB")
infrared_img = Image.fromarray(np.load(infrared_path).astype(np.uint8))
# Overlay loss region
visible_img = overlay_mask(visible_img, mask)
infrared_img = overlay_mask(infrared_img, mask)
if is_composite:
composite_visible_img = visible_img
composite_infrared_img = infrared_img
continue
meta_info = {"n_cloud_pixels": n_cloud_px, "is_composite": False, "forest_loss_region": forest_loss_polygon.wkt}
# Visible
for i, band_idx in enumerate([3, 2, 1]):
band_data = get_band_data(visible_img, i, band_idx, date, SPATIAL_RESOLUTION, transform, crs, meta_info)
bands.append(band_data)
# Infrared
for i, band_idx in enumerate([4, 5, 6]):
band_data = get_band_data(infrared_img, i, band_idx, date, SPATIAL_RESOLUTION, transform, crs, meta_info)
bands.append(band_data)
# Impute missing years with composite
year = max(year, 2012)
for year_succ in range(year + 1, year + 5):
if year_succ not in seen_years:
meta_info = {"n_cloud_pixels": None, "is_composite": True, "forest_loss_region": forest_loss_polygon.wkt}
str_date = f"{year_succ}_01_01"
date = datetime.datetime.strptime(str_date, "%Y_%m_%d").date()
# Visible
for i, band_idx in enumerate([3, 2, 1]):
band_data = get_band_data(
composite_visible_img, i, band_idx, date, SPATIAL_RESOLUTION, transform, crs, meta_info
)
bands.append(band_data)
# Infrared
for i, band_idx in enumerate([4, 5, 6]):
band_data = get_band_data(
composite_infrared_img, i, band_idx, date, SPATIAL_RESOLUTION, transform, crs, meta_info
)
bands.append(band_data)
label_int = LABELS.index(label)
# How to add per-example metadata?
# TODO: Add the year of the forest loss event
# TODO: Load the per pixel auxiliary files and add them as bands (*.npy)
# TODO: Load the per image auxiliary files and add them as metadata? (*.json)
# TODO: Load all files in NCEP and add them as metadata (ncep/*)
# aux_dir = example_dir / "auxiliary"
return io.Sample(bands, label=label_int, sample_name=example_dir.name)
def convert(max_count=None, dataset_dir=DATASET_DIR) -> None:
"""Convert Forestnet dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
bands_info = io.landsat8_9_bands[3:0:-1] + io.landsat8_9_bands[4:7]
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(PATCH_SIZE, PATCH_SIZE),
n_time_steps=1, # multiple time steps are decomposed into multiple samples
bands_info=bands_info,
bands_stats=None, # Will be automatically written with inspect script
label_type=io.Classification(len(LABELS), LABELS),
# eval_loss=io.Accuracy, # TODO probably not the final
# loss eval loss. To be discussed.
spatial_resolution=SPATIAL_RESOLUTION,
)
task_specs.save(dataset_dir, overwrite=True)
partition = io.Partition()
convert_all(partition, dataset_dir, max_count)
partition.save(dataset_dir, "original", as_default=True)
def convert_all(partition, dataset_dir, max_count):
"""Enclosing code to breaking 3 for loops with a single return statement."""
sample_count = 0
for split in ["train", "val", "test"]:
df = pd.read_csv(SRC_DATASET_DIR / f"{split}.csv")
if split == "val":
split = "valid"
for _, row in tqdm(df.iterrows(), total=df.shape[0]):
example_dir = SRC_DATASET_DIR / row["example_path"]
sample = load_sample(example_dir, row["label"], row["year"])
for sample in decompose_time(sample):
partition.add(split, sample.sample_name)
sample.write(dataset_dir)
sample_count += 1
if max_count is not None and sample_count >= max_count:
return
def decompose_time(sample: io.Sample):
"""Split time of one sample into different samples through a generator."""
band_dict = defaultdict(list)
for band in sample.bands:
band_dict[band.date].append(band)
for date, bands in band_dict.items():
if date is not None:
yield io.Sample(bands, label=sample.label, sample_name=f"{sample.sample_name}_{date.strftime('%Y_%m_%d')}")
else:
yield io.Sample(bands, label=sample.label, sample_name=f"{sample.sample_name}")
if __name__ == "__main__":
convert()
| 9,302 | 33.712687 | 120 | py |
geo-bench | geo-bench-main/geobench/benchmark/dataset_converters/so2sat.py | """So2Sat dataset."""
# So2Sat will be automatically downloaded by TorchGeo (https://github.com/microsoft/torchgeo)
import os
from pathlib import Path
import numpy as np
from torchgeo.datasets import So2Sat
from tqdm import tqdm
from geobench import io
from geobench.io.dataset import Sample
from geobench.io.task import TaskSpecifications
DATASET_NAME = "so2sat"
SRC_DATASET_DIR = io.CCB_DIR / "source" / DATASET_NAME # type: ignore
DATASET_DIR = io.CCB_DIR / "converted" / DATASET_NAME # type: ignore
def make_sample(
images: "np.typing.NDArray[np.int_]", label: int, sample_name: str, task_specs: TaskSpecifications
) -> Sample:
"""Create a sample from images and label.
Args:
images: image array to be contained in sample
label: label to be contained in sample
sample_name: name of sample
task_specs: task specifications of this datasets
Returns:
sample
"""
n_bands, _height, _width = images.shape
transform = None # TODO can't find the GPS coordinates from torch geo.
crs = None
bands = []
for band_idx in range(n_bands):
band_data = images[band_idx, :, :]
band_info = task_specs.bands_info[band_idx]
band_data = band_data.astype(np.float32)
band = io.Band(
data=band_data,
band_info=band_info,
spatial_resolution=task_specs.spatial_resolution,
transform=transform,
crs=crs,
convert_to_int16=False,
)
bands.append(band)
return io.Sample(bands, label=label, sample_name=sample_name)
def convert(max_count: int = None, dataset_dir: Path = DATASET_DIR) -> None:
"""Convert So2Sat dataset.
Args:
max_count: maximum number of samples
dataset_dir: path to dataset directory
"""
dataset_dir.mkdir(exist_ok=True, parents=True)
partition = io.dataset.Partition()
task_specs = io.TaskSpecifications(
dataset_name=DATASET_NAME,
patch_size=(32, 32),
n_time_steps=1,
bands_info=io.sentinel1_8_bands + io.sentinel2_13_bands[1:9] + io.sentinel2_13_bands[-2:], # type: ignore
bands_stats=None, # Will be automatically written with the inspect script
label_type=io.Classification(17, class_names=So2Sat.classes),
# eval_loss=io.Accuracy,
spatial_resolution=10,
)
task_specs.save(str(dataset_dir), overwrite=True)
n_samples = 0
for split_name in ["train", "validation", "test"]:
so2sat_dataset = So2Sat(root=SRC_DATASET_DIR, split=split_name, transforms=None, checksum=True)
for tg_sample in tqdm(so2sat_dataset):
sample_name = f"id_{n_samples:04d}"
images = np.array(tg_sample["image"])
label = tg_sample["label"]
sample = make_sample(images, int(label), sample_name, task_specs)
sample.write(str(dataset_dir))
partition.add(split_name.replace("validation", "valid"), sample_name)
n_samples += 1
if max_count is not None and n_samples >= max_count:
break
if max_count is not None and n_samples >= max_count:
break
partition.save(str(dataset_dir), "original", as_default=True)
if __name__ == "__main__":
convert()
| 3,310 | 30.533333 | 114 | py |
geo-bench | geo-bench-main/geobench/torch_toolbox/dataset.py | """GeobenchDataset Datamodule."""
from pathlib import Path
from typing import Sequence
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from geobench import io
class DataModule(pl.LightningDataModule):
"""Data Module.
Define a
`PyTorch Lightning <https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html>`_
that provides dataloaders from task_specs.
"""
def __init__(
self,
task_specs: io.TaskSpecifications,
benchmark_dir: str,
partition_name: str,
batch_size: int,
num_workers: int,
val_batch_size: int = None,
train_transform=None,
eval_transform=None,
collate_fn=None,
band_names: Sequence[str] = ("red", "green", "blue"),
format: str = "hdf5",
) -> None:
"""Initialize new instance of DataModule .
Args:
task_specs: TaskSpecifications object to call get_dataset.
benchmark_dir: path to benchmark directory that contains datasets
partition_name: name of partition to load
batch_size: The size of the mini-batch.
num_workers: The number of parallel workers for loading samples from the hard-drive.
val_batch_size: Tes size of the batch for the validation set and test set. If None, will use batch_size.
transform: Callable transforming a Sample. Executed on a worker and the output will be provided to collate_fn.
collate_fn: A callable passed to the DataLoader. Maps a list of Sample to dictionnary of stacked torch tensors.
band_names: multi spectral bands to select
file_format: 'hdf5' or 'tif'
"""
super().__init__()
self.task_specs = task_specs
self.benchmark_dir = benchmark_dir
self.partition_name = partition_name
self.batch_size = batch_size
self.val_batch_size = val_batch_size or batch_size
self.num_workers = num_workers
self.train_transform = train_transform
self.eval_transform = eval_transform
self.collate_fn = collate_fn
self.band_names = band_names
self.format = format
def train_dataloader(self) -> DataLoader:
"""Create the train dataloader."""
return DataLoader(
self.task_specs.get_dataset(
split="train",
partition_name=self.partition_name,
transform=self.train_transform,
band_names=self.band_names,
format=self.format,
benchmark_dir=self.benchmark_dir,
),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self) -> DataLoader:
"""Create the validation dataloader."""
return (
DataLoader(
self.task_specs.get_dataset(
split="valid",
partition_name=self.partition_name,
transform=self.eval_transform,
band_names=self.band_names,
format=self.format,
benchmark_dir=Path(self.benchmark_dir),
),
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
),
DataLoader(
self.task_specs.get_dataset(
split="test",
partition_name=self.partition_name,
transform=self.eval_transform,
band_names=self.band_names,
format=self.format,
benchmark_dir=Path(self.benchmark_dir),
),
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
),
)
def test_dataloader(self) -> DataLoader:
"""Create the test dataloader."""
return DataLoader(
self.task_specs.get_dataset(
split="test",
partition_name=self.partition_name,
transform=self.eval_transform,
band_names=self.band_names,
format=self.format,
benchmark_dir=self.benchmark_dir,
),
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
| 4,635 | 35.503937 | 123 | py |
geo-bench | geo-bench-main/geobench/torch_toolbox/__init__.py | """Torch toolbox."""
| 21 | 10 | 20 | py |
geo-bench | geo-bench-main/tests/io/test_dataset.py | import tempfile
import numpy as np
import pytest
import rasterio
from geobench import io
from geobench.io.bandstats import produce_band_stats
def random_band(shape=(16, 16), band_name="test_band", alt_band_names=("alt_name",)):
data = np.random.randint(1, 1000, shape, dtype=np.int16).astype(float)
data *= 2.1
if len(shape) == 3 and shape[2] > 1:
band_info = io.MultiBand(band_name, alt_names=alt_band_names, spatial_resolution=20, n_bands=shape[2])
else:
band_info = io.SpectralBand(band_name, alt_names=alt_band_names, spatial_resolution=20, wavelength=0.1)
transform = rasterio.transform.from_bounds(1, 2, 3, 3, 4, 5)
return io.Band(data, band_info, 10, transform=transform, crs="EPSG:4326")
def random_sample(n_bands=3, name="test_sample"):
bands = [random_band(band_name=f"{i:2d}", alt_band_names=(f"alt_{i:2d}")) for i in range(n_bands)]
return io.Sample(bands, np.random.randint(2), name)
def test_pack_4d_dense():
bands = [
random_band((3, 4), "band_1", ("alt_band_1",)),
random_band((3, 4), "band_2", ("alt_band_2",)),
random_band((6, 8), "band_3", ("alt_band_3",)),
]
sample = io.Sample(bands, np.random.randint(2), "test_sample")
image, dates, band_names = sample.pack_to_4d(resample=True, band_names=("band_1", "band_2", "band_3"))
image_, _ = sample.pack_to_3d(resample=True, band_names=("band_1", "band_2", "band_3"))
np.testing.assert_array_equal(image[0], image_)
assert image.shape == (1, 6, 8, 3)
assert dates == [None]
assert tuple(band_names) == ("band_1", "band_2", "band_3")
image, dates, band_names = sample.pack_to_4d(band_names=("band_1", "band_2"))
assert image.shape == (1, 3, 4, 2)
assert dates == [None]
assert tuple(band_names) == ("band_1", "band_2")
def test_crop_from_ratio():
band = random_band(shape=(10, 10))
old_data = band.data
band.crop_from_ratio((0.1, 0.1), (0.8, 0.8))
assert band.data.shape == (8, 8)
np.testing.assert_equal(old_data[1:9, 1:9], band.data)
def test_pack_4d_multi_band():
bands = [
random_band((3, 4, 5), "band_1", ("alt_band_1",)),
random_band((3, 4), "band_2", ("alt_band_2",)),
random_band((6, 8), "band_3", ("alt_band_3",)),
]
sample = io.Sample(bands, np.random.randint(2), "test_sample")
image, dates, band_names = sample.pack_to_4d(resample=True)
assert dates == [None]
assert image.shape == (1, 6, 8, 7)
assert tuple(band_names) == ("band_1",) * 5 + ("band_2", "band_3")
def test_write_read():
with tempfile.TemporaryDirectory() as dataset_dir:
sample = random_sample()
sample.write(dataset_dir)
band_names = [band.band_info.name for band in sample.bands]
# define task_spec for dataset
bands_info = [
io.SpectralBand(
name=band.band_info.name,
alt_names=(band.band_info.alt_names,),
spatial_resolution=band.band_info.spatial_resolution,
)
for band in sample.bands
]
task_specs = io.TaskSpecifications(
dataset_name="test",
benchmark_name="test_bench",
patch_size=(16, 16),
spatial_resolution=1.0,
bands_info=bands_info,
)
task_specs.save(dataset_dir, overwrite=True)
partition = io.Partition()
partition.add("train", sample.sample_name)
partition.save(directory=dataset_dir, partition_name="default")
ds = io.GeobenchDataset(dataset_dir, band_names=band_names, partition_name="default")
sample_ = list(ds.iter_dataset(1))[0]
assert len(sample.bands) == len(sample_.bands)
# TODO need to review test here
for band in sample.bands:
assert len(list(filter(lambda band_: band.band_info == band_.band_info, sample_.bands))) > 0
# assert len(list(filter(lambda band_: band.crs == band_.crs, sample_.bands))) > 0
def assert_same_sample(sample, sample_):
assert sample.sample_name == sample_.sample_name
assert len(sample.bands) == len(sample_.bands)
for band in sample.bands:
len(list(filter(lambda band_: band.band_info == band_.band_info, sample_.bands))) > 0
def test_dataset_partition():
# Create fake dataset
with tempfile.TemporaryDirectory() as dataset_dir:
sample1 = random_sample(name="sample1")
sample1.write(dataset_dir)
sample2 = random_sample(name="sample2")
sample2.write(dataset_dir)
sample3 = random_sample(name="sample3")
sample3.write(dataset_dir)
band_names = [band.band_info.name for band in sample1.bands]
bands_info = [
io.SpectralBand(
name=band.band_info.name,
alt_names=(band.band_info.alt_names,),
spatial_resolution=band.band_info.spatial_resolution,
)
for band in sample1.bands
]
task_specs = io.TaskSpecifications(
dataset_name="test",
benchmark_name="test_bench",
patch_size=(16, 16),
spatial_resolution=1.0,
bands_info=bands_info,
)
task_specs.save(dataset_dir, overwrite=True)
# Create default partition
partition = io.Partition()
partition.add("train", sample1.sample_name)
partition.add("valid", sample2.sample_name)
partition.add("valid", sample3.sample_name)
partition.save(directory=dataset_dir, partition_name="default")
# Create funky partition
partition = io.Partition()
partition.add("valid", sample1.sample_name)
partition.add("test", sample2.sample_name)
partition.add("train", sample3.sample_name)
partition.save(directory=dataset_dir, partition_name="funky")
# Test 1: load partition default, no split
ds = io.GeobenchDataset(dataset_dir, band_names=band_names, partition_name="default")
assert set(ds.list_partitions()) == set(["funky", "default"])
assert ds.active_partition_name == "default" # use default normally
assert set(ds.list_splits()) == set(["train", "valid", "test"])
assert ds.split is None
assert len(ds) == 3
# Ordering is not guaranteed. Do we want to enforce that? The following can fail
# assert_same_sample(ds[0], sample1)
# assert_same_sample(ds[1], sample2)
# assert_same_sample(ds[2], sample3)
ds.set_split("train")
assert ds.split == "train"
assert_same_sample(ds[0], sample1)
assert len(ds) == 1
ds.set_split("valid")
assert ds.split == "valid"
# Try strict ordering
try:
assert_same_sample(ds[0], sample2)
assert_same_sample(ds[1], sample3)
except Exception:
assert_same_sample(ds[0], sample3)
assert_same_sample(ds[1], sample2)
assert len(ds) == 2
ds.set_split("test")
assert ds.split == "test"
assert len(ds) == 0
with pytest.raises(IndexError): # default:test is empty
ds[0]
ds = io.GeobenchDataset(dataset_dir, band_names=band_names, partition_name="funky")
assert set(ds.list_partitions()) == set(["funky", "default"])
assert ds.active_partition_name == "funky" # use default normally
assert set(ds.list_splits()) == set(["train", "valid", "test"])
assert len(ds) == 3
ds.set_split("train")
assert ds.split == "train"
assert_same_sample(ds[0], sample3)
assert len(ds) == 1
ds.set_split("valid")
assert ds.split == "valid"
assert_same_sample(ds[0], sample1)
assert len(ds) == 1
ds.set_split("test")
assert ds.split == "test"
assert_same_sample(ds[0], sample2)
assert len(ds) == 1
with pytest.raises(IndexError): # default:test is out of bounds
ds[2]
def test_dataset_withnopartition():
with tempfile.TemporaryDirectory() as dataset_dir:
sample1 = random_sample(name="sample1")
sample1.write(dataset_dir)
sample2 = random_sample(name="sample2")
sample2.write(dataset_dir)
sample3 = random_sample(name="sample3")
sample3.write(dataset_dir)
band_names = [band.band_info.name for band in sample1.bands]
with pytest.raises(ValueError): # raise ValueError because not partition exists
_ = io.GeobenchDataset(dataset_dir, band_names=band_names, partition_name="default")
def custom_band(value, shape=(4, 4), band_name="test_band"):
data = np.empty(shape)
data.fill(value)
if len(shape) == 3 and shape[2] > 1:
band_info = io.MultiBand(band_name, alt_names=("tb"), spatial_resolution=20, n_bands=shape[2])
else:
band_info = io.SpectralBand(band_name, alt_names=("tb"), spatial_resolution=20, wavelength=0.1)
return io.Band(data, band_info, 10)
def custom_sample(base_value, n_bands=3, name="test_sample"):
bands = [custom_band(value=base_value + float(i), band_name=f"Band {i}") for i in (100, 200, 300)]
return io.Sample(bands, base_value, name)
def test_dataset_statistics():
with tempfile.TemporaryDirectory() as dataset_dir:
sample1 = custom_sample(base_value=1, name="sample_001")
sample1.write(dataset_dir)
sample2 = custom_sample(base_value=2, name="sample_002")
sample2.write(dataset_dir)
sample3 = custom_sample(base_value=3, name="sample_003")
sample3.write(dataset_dir)
band_names = [band.band_info.name for band in sample1.bands]
bands_info = [
io.SpectralBand(
name=band.band_info.name,
alt_names=(band.band_info.alt_names,),
spatial_resolution=band.band_info.spatial_resolution,
)
for band in sample1.bands
]
task_specs = io.TaskSpecifications(
dataset_name="test",
benchmark_name="test_bench",
patch_size=(16, 16),
spatial_resolution=1.0,
bands_info=bands_info,
)
task_specs.save(dataset_dir, overwrite=True)
# Default partition, only train
partition = io.Partition()
partition.add("train", sample1.sample_name)
partition.add("train", sample2.sample_name)
partition.add("train", sample3.sample_name)
partition.save(directory=dataset_dir, partition_name="default")
# Compute statistics : this will create all_bandstats.json
produce_band_stats(
io.GeobenchDataset(dataset_dir, band_names=band_names, partition_name="default"),
values_per_image=None,
samples=None,
)
# Reload dataset with statistics
ds2 = io.GeobenchDataset(dataset_dir, band_names=band_names, partition_name="default")
statistics = ds2.band_stats
assert set(statistics.keys()) == set(["Band 100", "Band 200", "Band 300", "label"])
assert np.equal(statistics["Band 100"].min, 101)
assert np.equal(statistics["Band 100"].max, 103)
assert np.equal(statistics["Band 100"].median, 102)
assert np.equal(statistics["Band 100"].mean, 102)
assert np.equal(statistics["Band 100"].percentile_1, 101)
assert np.equal(statistics["Band 100"].percentile_99, 103)
assert np.equal(statistics["Band 200"].min, 201)
assert np.equal(statistics["Band 200"].max, 203)
assert np.equal(statistics["Band 200"].median, 202)
assert np.equal(statistics["Band 200"].mean, 202)
assert np.equal(statistics["Band 200"].percentile_1, 201)
assert np.equal(statistics["Band 200"].percentile_99, 203)
assert np.equal(statistics["label"].min, 1)
assert np.equal(statistics["label"].max, 3)
assert np.equal(statistics["label"].median, 2)
assert np.equal(statistics["label"].mean, 2)
print("Done")
def test_class_id():
from geobench import io
from geobench.io import dataset
assert isinstance(dataset.sentinel2_13_bands[0], io.SpectralBand)
assert isinstance(io.sentinel2_13_bands[0], dataset.SpectralBand)
if __name__ == "__main__":
# test_pack_4d_dense()
# test_pack_4d_multi_band()
test_write_read()
# test_dataset_partition()
# test_dataset_withnopartition()
# test_dataset_statistics()
# test_class_id()
# test_crop_from_ratio()
| 12,539 | 35.242775 | 111 | py |
geo-bench | geo-bench-main/tests/experiment/__init__.py | 0 | 0 | 0 | py |
|
geo-bench | geo-bench-main/tests/benchmark/create_benchmark_test.py | from collections import defaultdict
from email.policy import default
from typing import Dict, List
import numpy as np
from geobench import io
from geobench.benchmark.create_benchmark import resample, resample_from_stats
def make_rand_partition(n=1000):
sample_names = [f"{i:04}" for i in range(n)]
splits = np.random.choice(["train", "valid", "test"], size=n, p=[0.8, 0.1, 0.1], replace=True)
class_probs = [0.7, 0.2, 0.02, 0.08]
labels = np.random.choice(list(range(len(class_probs))), size=n, p=class_probs, replace=True)
partition_dict = defaultdict(list)
label_map = defaultdict(list)
reverse_label_map = {}
eye = np.eye(len(class_probs))
label_stats = {}
for split, sample_name, label in zip(splits, sample_names, labels):
partition_dict[split].append(sample_name)
label_map[label].append(sample_name)
reverse_label_map[sample_name] = label
label_stats[sample_name] = eye[label] # converts to one hot
assert_no_verlap(partition_dict)
return io.Partition(partition_dict=partition_dict), label_map, reverse_label_map, label_stats
def assert_no_verlap(partition_dict: Dict[str, List[str]]):
sample_set = set()
total_count = 0
for sample_names in partition_dict.values():
sample_set.update(sample_names)
total_count += len(sample_names)
assert total_count == len(sample_set), f"{total_count} != {len(sample_set)}"
def test_resample():
partition, label_map, reverse_label_map, _ = make_rand_partition(n=1000)
max_sizes = {"train": 100, "valid": 20, "test": 25}
min_class_sizes = {"train": 10, "valid": 1, "test": 2}
partition = resample(
partition,
label_map,
max_sizes=max_sizes,
min_class_sizes=min_class_sizes,
)
verify_partition(partition, max_sizes, min_class_sizes, reverse_label_map)
def verify_partition(partition, max_sizes, min_class_sizes=None, reverse_label_map=None):
partition_dict = partition.partition_dict
assert_no_verlap(partition_dict)
split_label_map = {}
for split, max_size in max_sizes.items():
split_label_map[split] = defaultdict(list)
sample_names = partition_dict[split]
assert len(sample_names) <= max_size
for sample_name in sample_names:
label = reverse_label_map[sample_name]
split_label_map[split][label].append(sample_name)
for split, min_class_size in min_class_sizes.items():
for label, sample_names in split_label_map[split].items():
assert len(sample_names) >= min_class_size
for split, label_map in split_label_map.items():
print(split)
for label, sample_names in label_map.items():
print(f" class {label:2d}: {len(sample_names)}.")
def test_resample_from_stats():
partition, _, reverse_label_map, label_stats = make_rand_partition(n=10000)
max_sizes = {"train": 100, "valid": 20, "test": 25}
min_class_sizes = {"train": 10, "valid": 1, "test": 1}
new_partition, prob_dict = resample_from_stats(
partition, label_stats=label_stats, max_sizes=max_sizes, return_prob=True
)
verify_partition(new_partition, max_sizes, min_class_sizes, reverse_label_map)
if __name__ == "__main__":
test_resample()
# test_resample_from_stats()
| 3,325 | 33.645833 | 98 | py |
geo-bench | geo-bench-main/tests/benchmark/__init__.py | 0 | 0 | 0 | py |
|
geo-bench | geo-bench-main/tests/dataset_converters/test_util.py | import numpy as np
from geobench.benchmark.dataset_converters import util
def transform_to_center(transform, img_shape):
lon_lat = transform * (np.array(img_shape) / 2.0)
return lon_lat[::-1]
def test_center_to_transform():
point_lat_lon = 45.630001, -73.519997
img_shape = 100, 100
spatial_resolution = 10
radius_in_meter = spatial_resolution * img_shape[0] / 2
transfrorm = util.center_to_transform(*point_lat_lon, radius_in_meter, img_shape)
point_lat_lon_ = transform_to_center(transfrorm, np.array(img_shape) * 1.0)
assert np.allclose(point_lat_lon, point_lat_lon_)
if __name__ == "__main__":
test_center_to_transform()
| 675 | 25 | 85 | py |
geo-bench | geo-bench-main/tests/dataset_converters/test_converters.py | import tempfile
from pathlib import Path
import pytest
from geobench import io
def converter_tester(converter):
assert "convert" in dir(converter)
assert "DATASET_NAME" in dir(converter)
assert "DATASET_DIR" in dir(converter)
with tempfile.TemporaryDirectory() as datasets_dir:
dataset_dir = Path(datasets_dir, converter.DATASET_NAME)
converter.convert(max_count=5, dataset_dir=Path(dataset_dir))
dataset = io.GeobenchDataset(dataset_dir, band_names=["red", "green", "blue"], partition_name="default")
samples = list(dataset.iter_dataset(5))
assert len(dataset) == 5, f"returned dataset of length {len(dataset)}"
io.check_dataset_integrity(dataset, samples=samples)
SRC_DIR_EXISTS = not Path(io.src_datasets_dir).exists()
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_brick_kiln():
from geobench.benchmark.dataset_converters import brick_kiln
converter_tester(brick_kiln)
# @pytest.mark.converter
# @pytest.mark.slow
# @pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
# def test_cv4a_kenya_cropy_type():
# from geobench.benchmark.dataset_converters import cv4a_kenya_crop_type
# converter_tester(cv4a_kenya_crop_type)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_eurosat():
from geobench.benchmark.dataset_converters import eurosat
converter_tester(eurosat)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_neon_tree():
from geobench.benchmark.dataset_converters import neon_tree
converter_tester(neon_tree)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_smallholder_cashews():
from geobench.benchmark.dataset_converters import benin_smallholder_cashews
converter_tester(benin_smallholder_cashews)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_so2sat():
from geobench.benchmark.dataset_converters import so2sat
converter_tester(so2sat)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_geolifeclef():
from geobench.benchmark.dataset_converters import geolifeclef
converter_tester(geolifeclef)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_nz_cattle_detection():
from geobench.benchmark.dataset_converters import nz_cattle_detection
converter_tester(nz_cattle_detection)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_xview2():
from geobench.benchmark.dataset_converters import xview2
converter_tester(xview2)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_pv4ger():
from geobench.benchmark.dataset_converters import pv4ger
converter_tester(pv4ger)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_chesapeake():
from geobench.benchmark.dataset_converters import chesapeake_land_cover
converter_tester(chesapeake_land_cover)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_forestnet():
from geobench.benchmark.dataset_converters import forestnet
converter_tester(forestnet)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_bigearthnet():
from geobench.benchmark.dataset_converters import bigearthnet
converter_tester(bigearthnet)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_south_africa_crop_type():
from geobench.benchmark.dataset_converters import crop_type_south_africa
converter_tester(crop_type_south_africa)
@pytest.mark.converter
@pytest.mark.slow
@pytest.mark.skipif(SRC_DIR_EXISTS, reason="Requires presence of the source datasets.")
def test_seasonet():
from geobench.benchmark.dataset_converters import seasonet
converter_tester(seasonet)
if __name__ == "__main__":
# test_brick_kiln()
# test_cv4a_kenya_cropy_type()
test_eurosat()
# test_neon_tree()
# # test_smallholder_cashews()
# test_so2sat()
# test_nz_cattle_detection()
# test_xview2()
# test_bigearthnet()
# test_south_africa_crop_type()
# test_chesapeake()
# test_forestnet()
# test_pv4ger()
# test_seasonet()
| 5,090 | 28.258621 | 112 | py |
PeriodSuite | PeriodSuite-master/arb_matrix_cereal_wrap.py |
from sage.all import *
class ARBMatrixCerealWrap:
"""
A wrapper class to enable serialization of complex arb matrix objects. The original arb matrix
can be constructed via the `arb_matrix` method.
"""
def __init__(self, arb_mat):
self.nrows = arb_mat.nrows()
self.ncols = arb_mat.ncols()
self.arb_entries = [ [x.mid(), x.diameter()] for x in arb_mat.list()]
self.base_ring = arb_mat.base_ring()
def ball_field_elem(self, x):
return self.base_ring(x[0]).add_error(x[1])
def arb_matrix(self):
return matrix(self.base_ring, self.nrows , self.ncols,
map(self.ball_field_elem, self.arb_entries))
def list(self):
return self.arb_entries
def entries_as_arbs(self):
raw_out_data = self.arb_entries
return [self.base_ring(x[0]).add_error(x[1]) for x in raw_out_data]
| 906 | 31.392857 | 98 | py |
PeriodSuite | PeriodSuite-master/__init__.py | 0 | 0 | 0 | py |
|
PeriodSuite | PeriodSuite-master/parse_suite_data.py | ######
# Utilities to parse data from suite files. Typical format is:
"""
ode=D^2 + 3*t/(t^2 - 4)*D + 3/4/(t^2 - 4)
init=[
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ],
[ 0, 0, 0, 0, 1/4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
]
path=[ 0, 1 ]
label=(1,2)
loop_position=-1
singular_locus=[1]
"""
def parse_suite_file(filename):
"""
Parses the output of a period suite file. Returns a dictionary of the form
{identifier:value}. Value will be a string, with the intention to evaluate
later.
"""
id_dic = {}
with open(filename) as F:
for line in F:
s = line.rstrip().split('=')
# Detect if an identifier occurs on the line
if not len(s) == 1:
some_id = s[0].strip()
id_dic[some_id] = s[1]
else:
id_dic[some_id] += s[0]
return id_dic
| 905 | 24.885714 | 78 | py |
sent_debias | sent_debias-master/download_glue_data.py | ''' Script for downloading all GLUE data.
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
'''
import os
import sys
import shutil
import argparse
import tempfile
import urllib.request
import zipfile
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4',
"SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
"MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc',
"QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5',
"STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5',
"MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce',
"SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df',
"QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601',
"RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb',
"WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf',
"diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'}
MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt'
MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt'
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
data_file = "%s.zip" % task
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
with open(mrpc_train_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \
open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split('\t')
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with open(mrpc_test_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split('\t')
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
os.mkdir(os.path.join(data_dir, "diagnostic"))
data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(',')
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
parser.add_argument('--tasks', help='tasks to download data for as a comma separated string',
type=str, default='QNLI,CoLA,SST')
parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt',
type=str, default='')
args = parser.parse_args(arguments)
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == 'MRPC':
format_mrpc(args.data_dir, args.path_to_mrpc)
elif task == 'diagnostic':
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 8,236 | 57.007042 | 623 | py |
sent_debias | sent_debias-master/debias-BERT/setup.py | """
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py and setup.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi allennlp
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from io import open
from setuptools import find_packages, setup
setup(
name="pytorch_pretrained_bert",
version="0.6.2",
author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors, Open AI team Authors",
author_email="[email protected]",
description="PyTorch version of Google AI BERT model with script to load Google pre-trained models",
keywords='BERT NLP deep learning google',
license='Apache',
url="https://github.com/huggingface/pytorch-pretrained-BERT",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['torch>=0.4.1',
'numpy',
'boto3',
'requests',
'tqdm',
'regex'],
entry_points={
'console_scripts': [
"pytorch_pretrained_bert=pytorch_pretrained_bert.__main__:main",
]
},
# python_requires='>=3.5.0',
tests_require=['pytest'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| 2,677 | 37.257143 | 104 | py |
sent_debias | sent_debias-master/debias-BERT/hubconf.py | from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import (
BertModel,
BertForNextSentencePrediction,
BertForMaskedLM,
BertForMultipleChoice,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
)
dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex']
# A lot of models share the same param doc. Use a decorator
# to save typing
bert_docstring = """
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining
instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow
checkpoint
cache_dir: an optional path to a folder in which the pre-trained models
will be cached.
state_dict: an optional state dictionnary
(collections.OrderedDict object) to use instead of Google
pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
def _append_from_pretrained_docstring(docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + docstr
return fn
return docstring_decorator
def bertTokenizer(*args, **kwargs):
"""
Instantiate a BertTokenizer from a pre-trained/customized vocab file
Args:
pretrained_model_name_or_path: Path to pretrained model archive
or one of pre-trained vocab configs below.
* bert-base-uncased
* bert-large-uncased
* bert-base-cased
* bert-large-cased
* bert-base-multilingual-uncased
* bert-base-multilingual-cased
* bert-base-chinese
Keyword args:
cache_dir: an optional path to a specific directory to download and cache
the pre-trained model weights.
Default: None
do_lower_case: Whether to lower case the input.
Only has an effect when do_wordpiece_only=False
Default: True
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
Default: True
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
Default: None
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
Default: ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"]
Example:
>>> sentence = 'Hello, World!'
>>> tokenizer = torch.hub.load('ailzhang/pytorch-pretrained-BERT:hubconf', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
>>> toks = tokenizer.tokenize(sentence)
['Hello', '##,', 'World', '##!']
>>> ids = tokenizer.convert_tokens_to_ids(toks)
[8667, 28136, 1291, 28125]
"""
tokenizer = BertTokenizer.from_pretrained(*args, **kwargs)
return tokenizer
@_append_from_pretrained_docstring(bert_docstring)
def bertModel(*args, **kwargs):
"""
BertModel is the basic BERT Transformer model with a layer of summed token,
position and sequence embeddings followed by a series of identical
self-attention blocks (12 for BERT-base, 24 for BERT-large).
"""
model = BertModel.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForNextSentencePrediction(*args, **kwargs):
"""
BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence
classification head.
"""
model = BertForNextSentencePrediction.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForPreTraining(*args, **kwargs):
"""
BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads
- the masked language modeling head, and
- the next sentence classification head.
"""
model = BertForPreTraining.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForMaskedLM(*args, **kwargs):
"""
BertForMaskedLM includes the BertModel Transformer followed by the
(possibly) pre-trained masked language modeling head.
"""
model = BertForMaskedLM.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForSequenceClassification(*args, **kwargs):
"""
BertForSequenceClassification is a fine-tuning model that includes
BertModel and a sequence-level (sequence or pair of sequences) classifier
on top of the BertModel.
The sequence-level classifier is a linear layer that takes as input the
last hidden state of the first character in the input sequence
(see Figures 3a and 3b in the BERT paper).
"""
model = BertForSequenceClassification.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForMultipleChoice(*args, **kwargs):
"""
BertForMultipleChoice is a fine-tuning model that includes BertModel and a
linear layer on top of the BertModel.
"""
model = BertForMultipleChoice.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForQuestionAnswering(*args, **kwargs):
"""
BertForQuestionAnswering is a fine-tuning model that includes BertModel
with a token-level classifiers on top of the full sequence of last hidden
states.
"""
model = BertForQuestionAnswering.from_pretrained(*args, **kwargs)
return model
@_append_from_pretrained_docstring(bert_docstring)
def bertForTokenClassification(*args, **kwargs):
"""
BertForTokenClassification is a fine-tuning model that includes BertModel
and a token-level classifier on top of the BertModel.
The token-level classifier is a linear layer that takes as input the last
hidden state of the sequence.
"""
model = BertForTokenClassification.from_pretrained(*args, **kwargs)
return model
| 7,403 | 38.382979 | 163 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 13,028 | 42 | 139 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/optimization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for OpenAI GPT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
from .optimization import SCHEDULES, _LRSchedule, WarmupCosineWithWarmupRestartsSchedule, \
WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule, WarmupLinearSchedule, WarmupConstantSchedule
logger = logging.getLogger(__name__)
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1,
b1=0.9, b2=0.999, e=1e-8, weight_decay=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:
p.data.add_(-lr_scheduled * group['weight_decay'], p.data)
return loss
| 5,517 | 42.109375 | 134 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
"convert_tf_checkpoint_to_pytorch",
"convert_openai_checkpoint",
"convert_transfo_xl_checkpoint",
"convert_gpt2_checkpoint",
]:
print(
"Should be used as one of: \n"
">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
else:
if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_openai_checkpoint":
from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_transfo_xl_checkpoint":
try:
from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
else:
try:
from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 4,393 | 51.309524 | 145 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME,
GPT2Config,
GPT2Model,
load_tf_weights_in_gpt2)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--gpt2_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--gpt2_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path,
args.gpt2_config_file,
args.pytorch_dump_folder_path)
| 3,017 | 40.342466 | 111 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
OpenAIGPTConfig,
OpenAIGPTModel,
load_tf_weights_in_openai_gpt)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--openai_checkpoint_folder_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--openai_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
args.openai_config_file,
args.pytorch_dump_folder_path)
| 3,106 | 41.561644 | 118 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 17,124 | 40.165865 | 133 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
print("Calling from_pretrained.")
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(config_file):
# Backward compatibility with old naming format
config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu')
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, normalize=False, tune_bert=True):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
self.normalize = normalize
logger.info("Normalize={}".format(normalize))
self.tune_bert = tune_bert
logger.info("Fine-tune bert={}".format(tune_bert))
logger.info("dp={} hidden size={}".format(config.hidden_dropout_prob, config.hidden_size))
# self.remove_bias = remove_bias
# logger.info("Removing bias={}".format(remove_bias))
# self.bias_dir = bias_dir
# if (remove_bias):
# logger.info("Bias direction first 10 elements={}".format(bias_dir[:10]))
def drop_bias(self, u, v):
return u - torch.ger(torch.matmul(u, v), v) / v.dot(v)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
encode_only=False, word_level=False, remove_bias=False, bias_dir=None):
encoded_layers, pooled_output = self.bert(input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, output_all_encoded_layers=False)
# pooled_output: batch_size x embed_size
# Detach from here
if (word_level):
embeddings = encoded_layers # [batch size x seq length x hidden size]
seq_length = embeddings.shape[1]
if (self.normalize):
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=-1)
# remove bias
if (remove_bias):
for t in range(seq_length):
embeddings[:, t] = self.drop_bias(embeddings[:, t], bias_dir)
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=-1)
# # average over time steps
# embeddings = torch.mean(embeddings, dim=1) # [batch size x hidden size]
else:
embeddings = pooled_output
if (self.normalize):
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=-1)
if (remove_bias):
embeddings = self.drop_bias(embeddings, bias_dir)
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=-1)
if (not self.tune_bert): embeddings.detach()
if (encode_only): return embeddings
# Gradient starts from here
# irene: TODO
embeddings = self.dropout(embeddings)
logits = self.classifier(embeddings)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
# def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, encode_only=False,
# remove_bias=False, bias_dir=None):
# _, pooled_output = self.bert(input_ids, token_type_ids=token_type_ids,
# attention_mask=attention_mask, output_all_encoded_layers=False)
# # pooled_output: batch_size x embed_size
# # Detach from here
# if (self.normalize):
# pooled_output = torch.nn.functional.normalize(pooled_output, p=2, dim=1)
# if (remove_bias):
# pooled_output = self.drop_bias(pooled_output, bias_dir)
# pooled_output = torch.nn.functional.normalize(pooled_output, p=2, dim=1)
# if (not self.tune_bert): pooled_output.detach()
# if (encode_only): return pooled_output
# # Gradient starts from here
# pooled_output = self.dropout(pooled_output)
# logits = self.classifier(pooled_output)
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# return loss
# else:
# return logits
# class SequenceClassificationHeader(BertPretrainedModel):
# def __init__(self, config):
# super(SequenceClassificationHeader, self).__init__(config, num_labels)
# self.config = config
# self.num_labels = num_labels
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier = nn.Linear(config.hidden_size, num_labels)
# self.apply(self.init_bert_weights)
# def forward(self, embeddings, labels=None):
# pooled_output = self.dropout(embeddings)
# logits = self.classifier(pooled_output)
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# return loss
# else:
# return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
def test_class_header():
model = SequenceClassificationHeader(config, num_labels=2)
'''
all bert models:
Model config {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30522
}
'''
| 57,304 | 42.086466 | 162 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json"}
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class GPT2Config(object):
"""Configuration class to store the configuration of a `GPT2Model`.
"""
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `GPT2Config` from a Python dictionary of parameters."""
config = GPT2Config(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e4 * (1 - b)
w = nn.Softmax(dim=-1)(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
a, present = self.attn(self.ln_1(x), layer_past=layer_past)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x, present
class GPT2LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class GPT2MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(GPT2MultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class GPT2PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__()
if not isinstance(config, GPT2Config):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def set_tied(self):
pass
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
):
"""
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `gpt2`
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. a TensorFlow checkpoint with trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific GPT class
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = GPT2Config.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_gpt2(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Make sure we are still sharing the output and input embeddings after loading weights
model.set_tied()
return model
class GPT2Model(GPT2PreTrainedModel):
"""OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs a tuple consisting of:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2Model(config)
hidden_states, presents = model(input_ids)
```
"""
def __init__(self, config):
super(GPT2Model, self).__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.apply(self.init_weights)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
presents = []
for block, layer_past in zip(self.h, past):
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape), presents
class GPT2LMHeadModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else a tuple:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
(or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits, presents = model(input_ids)
```
"""
def __init__(self, config):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.apply(self.init_weights)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[:, :-1].contiguous()
shift_labels = lm_labels[:, 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
return loss
return lm_logits, presents
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, config.vocab_size[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., config.vocab_size]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.multiple_choice_head = GPT2MultipleChoiceHead(config)
self.apply(self.init_weights)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
shift_logits = lm_logits[:, :-1].contiguous()
shift_labels = lm_labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1,
shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits, presents
| 31,784 | 43.641854 | 146 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
class OpenAIGPTConfig(object):
"""Configuration class to store the configuration of a `OpenAIGPTModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file=40478,
n_special=0,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
):
"""Constructs OpenAIGPTConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
afn: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `OpenAIGPTConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x):
a = self.attn(x)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class OpenAIGPTLMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(OpenAIGPTLMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class OpenAIGPTMultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTMultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
# self.multiple_choice_token = multiple_choice_token
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(OpenAIGPTPreTrainedModel, self).__init__()
if not isinstance(config, OpenAIGPTConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
):
"""
Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `openai_gpt_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. a series of NumPy files containing OpenAI TensorFlow trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = OpenAIGPTConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTModel(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTModel, self).__init__(config)
num_tokens = config.vocab_size + config.n_special
self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.apply(self.init_weights)
# nn.init.normal_(self.embed.weight, std=0.02)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.tokens_embed.to(old_embed.weight.device)
self.init_weights(self.tokens_embed)
# Copy word embeddings from the previous weights
self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None):
if position_ids is None:
# This was used when we had a single embedding matrice from position and token embeddings
# start = self.config.vocab_size + self.config.n_special
# end = start + input_ids.size(-1)
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
# Add the position information to the input embeddings
# h = e.sum(dim=2)
hidden_states = inputs_embeds + position_embeds + token_type_embeds
for block in self.h:
hidden_states = block(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
(or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTLMHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
return loss
return lm_logits
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, total_tokens_embeddings[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., total_tokens_embeddings]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config):
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits
| 38,218 | 45.438639 | 152 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
from io import open
import torch
import pytorch_pretrained_bert.tokenization_transfo_xl as data_utils
from pytorch_pretrained_bert.modeling_transfo_xl import (CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME,
VOCAB_NAME)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules['data_utils'] = data_utils
sys.modules['vocabulary'] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
transfo_xl_config_file,
pytorch_dump_folder_path,
transfo_xl_dataset_file):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the folder to store the PyTorch model or dataset/vocab.")
parser.add_argument("--tf_checkpoint_path",
default = "",
type = str,
help = "An optional path to a TensorFlow checkpoint path to be converted.")
parser.add_argument("--transfo_xl_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--transfo_xl_dataset_file",
default = "",
type = str,
help = "An optional dataset file to be converted in a vocabulary.")
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file)
| 5,671 | 47.478632 | 121 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 9,117 | 32.645756 | 112 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import argparse
import tensorflow as tf
import torch
import numpy as np
from pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path)
| 2,593 | 37.716418 | 101 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/__init__.py | __version__ = "0.6.2"
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .modeling import (BertConfig, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert)
from .modeling_openai import (OpenAIGPTConfig, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt)
from .modeling_transfo_xl import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from .modeling_gpt2 import (GPT2Config, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2)
from .optimization import BertAdam
from .optimization_openai import OpenAIAdam
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path, WEIGHTS_NAME, CONFIG_NAME
| 1,313 | 51.56 | 93 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/tokenization_gpt2.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
try:
from functools import lru_cache
except ImportError:
# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
def lru_cache():
return lambda func: func
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
}
PRETRAINED_MERGES_ARCHIVE_MAP = {
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'gpt2': 1024,
}
VOCAB_NAME = 'vocab.json'
MERGES_NAME = 'merges.txt'
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class GPT2Tokenizer(object):
"""
GPT-2 BPE tokenizer. Peculiarities:
- Byte-level BPE
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file, merges_file))
return None
if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
logger.info("loading merges file {} from cache at {}".format(
merges_file, resolved_merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
def __init__(self, vocab_file, merges_file, errors='replace', special_tokens=None, max_len=None):
self.max_len = max_len if max_len is not None else int(1e12)
self.encoder = json.load(open(vocab_file))
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_data]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
self.special_tokens = {}
self.special_tokens_decoder = {}
self.set_special_tokens(special_tokens)
def __len__(self):
return len(self.encoder) + len(self.special_tokens)
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
logger.info("Special tokens {}".format(self.special_tokens))
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def tokenize(self, text):
""" Tokenize a string. """
bpe_tokens = []
for token in re.findall(self.pat, text):
if sys.version_info[0] == 2:
token = ''.join(self.byte_encoder[ord(b)] for b in token)
else:
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
index = len(self.encoder)
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(special_tokens_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file, merge_file, special_tokens_file
| 13,230 | 42.380328 | 116 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/debug.py | print("Hello")
| 16 | 4.666667 | 14 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| 58,920 | 41.450288 | 131 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import sys
from collections import Counter, OrderedDict
from io import open
import unicodedata
import torch
import numpy as np
from .file_utils import cached_path
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
VOCAB_NAME = 'vocab.bin'
PRETRAINED_CORPUS_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = 'corpus.bin'
class TransfoXLTokenizer(object):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a TransfoXLTokenizer.
The TransfoXLTokenizer.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
if os.path.isdir(pretrained_model_name_or_path):
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
else:
vocab_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
vocab_dict = torch.load(resolved_vocab_file)
for key, value in vocab_dict.items():
tokenizer.__dict__[key] = value
return tokenizer
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False,
delimiter=None, vocab_file=None, never_split=("<unk>", "<eos>", "<formula>")):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
torch.save(self.__dict__, vocab_file)
return vocab_file
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, 'unk_idx'):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif '<unk>' in self.sym2idx:
return self.sym2idx['<unk>']
elif '<UNK>' in self.sym2idx:
return self.sym2idx['<UNK>']
else:
raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
def convert_ids_to_tokens(self, indices):
"""Converts a sequence of indices in symbols using the vocab."""
return [self.get_sym(idx) for idx in indices]
def convert_tokens_to_ids(self, symbols):
"""Converts a sequence of symbols into ids using the vocab."""
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
def decode(self, indices, exclude=None):
"""Converts a sequence of indices in a string."""
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file))
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(
corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
fn_pickle = os.path.join(datadir, 'cache.pkl')
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
print('Loading cached dataset from pickle...')
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
print('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
| 22,060 | 36.582624 | 110 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/tokenization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
from io import open
from tqdm import tqdm
from .file_utils import cached_path
from .tokenization import BasicTokenizer
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json",
}
PRETRAINED_MERGES_ARCHIVE_MAP = {
'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'openai-gpt': 512,
}
VOCAB_NAME = 'vocab.json'
MERGES_NAME = 'merges.txt'
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip()
class OpenAIGPTTokenizer(object):
"""
BPE tokenizer. Peculiarities:
- lower case all inputs
- uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not.
- argument special_tokens and function set_special_tokens:
can be used to add additional symbols (ex: "__classify__") to a vocabulary.
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
special_tokens_file = None
else:
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
if not os.path.exists(special_tokens_file):
special_tokens_file = None
else:
logger.info("loading special tokens file {}".format(special_tokens_file))
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file, merges_file))
return None
if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
logger.info("loading vocabulary file {}".format(vocab_file))
logger.info("loading merges file {}".format(merges_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
logger.info("loading merges file {} from cache at {}".format(
merges_file, resolved_merges_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
if special_tokens_file and 'special_tokens' not in kwargs:
special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
else:
special_tokens = kwargs.pop('special_tokens', [])
tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
return tokenizer
def __init__(self, vocab_file, merges_file, special_tokens=None, max_len=None):
try:
import ftfy
import spacy
self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True,
never_split=special_tokens if special_tokens is not None else [])
self.fix_text = None
self.max_len = max_len if max_len is not None else int(1e12)
self.encoder = json.load(open(vocab_file, encoding="utf-8"))
self.decoder = {v:k for k,v in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.special_tokens = {}
self.special_tokens_decoder = {}
self.set_special_tokens(special_tokens)
def __len__(self):
return len(self.encoder) + len(self.special_tokens)
def set_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
if self.fix_text is None:
# Using BERT's BasicTokenizer: we can update the tokenizer
self.nlp.never_split = special_tokens
logger.info("Special tokens {}".format(self.special_tokens))
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def tokenize(self, text):
""" Tokenize a string. """
split_tokens = []
if self.fix_text is None:
# Using BERT's BasicTokenizer
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
# Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def convert_tokens_to_ids(self, tokens):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
if tokens in self.special_tokens:
return self.special_tokens[tokens]
else:
return self.encoder.get(tokens, 0)
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
ids.append(self.encoder.get(token, 0))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this OpenAI GPT model ({} > {}). Running this"
" sequence through the model will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in BPE tokens using the vocab."""
tokens = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
tokens.append(self.decoder[i])
return tokens
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""Converts a sequence of ids in a string."""
tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens)
out_string = ''.join(tokens).replace('</w>', ' ').strip()
if clean_up_tokenization_spaces:
out_string = out_string.replace('<unk>', '')
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary and merge files to a directory."""
if not os.path.isdir(vocab_path):
logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
return
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
merge_file = os.path.join(vocab_path, MERGES_NAME)
special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write(u'#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file))
index = token_index
writer.write(' '.join(bpe_tokens) + u'\n')
index += 1
index = len(self.encoder)
with open(special_tokens_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(special_tokens_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file, merge_file, special_tokens_file
| 13,929 | 43.363057 | 129 | py |
sent_debias | sent_debias-master/debias-BERT/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target=None, keep_order=False):
'''
Params:
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
Return:
if target is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
'''
if target is not None:
target = target.view(-1)
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
if target is not None:
output = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
output = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if target is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if target is not None:
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if target is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if target is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if target is not None:
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
| 16,114 | 38.987593 | 132 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/tsne_plot.py | import numpy as np
from sklearn.manifold import TSNE
import plotly.offline as plt
import plotly.graph_objs as go
def example():
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
X_embedded = TSNE(n_components=2).fit_transform(X)
print(X_embedded.shape)
print(X_embedded)
import matplotlib.pyplot as plt
import seaborn as sns
palette = sns.color_palette("bright", 4)
sns.scatterplot(X_embedded[:,0], X_embedded[:,1], legend='full', palette=palette)
plt.show()
def tsne_plot(word_vectors):
# PCA (optional)
words = list(word_vectors.keys())
X = np.array([word_vectors[word] for word in words])
X_embedded = TSNE(n_components=2).fit_transform(X) # Nx2
fig = go.Figure(data=go.Scatter(x=X_embedded[:,0],
y=X_embedded[:,1],
mode='markers+text',
text=words,
textposition='bottom center',
hoverinfo="text")) # hover text goes here
fig.update_layout(title='Evaluation Words')
fig.write_image("fig1.png")
word_vectors = {"dog": np.array([1, 0, 0, 0]),
"cat": np.array([0.9, 0.1, 0, 0]),
"tree": np.array([0, 0, 1, 0]),
"human": np.array([1.5, 0.05, 0.03, 0.02])}
tsne_plot(word_vectors)
| 1,158 | 27.268293 | 82 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/run_classifier.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
# standard library
import argparse
import csv
import logging
import os
import random
import sys
import pickle
import pdb
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.decomposition import PCA
# first party
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertModel, BertConfig
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from def_sent_utils import get_def_pairs
from eval_utils import isInSet
from my_debiaswe import my_we
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DualInputFeatures(object):
"""A single set of dual features of data."""
def __init__(self, input_ids_a, input_ids_b, mask_a, mask_b, segments_a, segments_b):
self.input_ids_a = input_ids_a
self.input_ids_b = input_ids_b
self.mask_a = mask_a
self.mask_b = mask_b
self.segments_a = segments_a
self.segments_b = segments_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, tokens, input_ids, input_mask, segment_ids, label_id):
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class BertEncoder(object):
def __init__(self, model, device):
self.device = device
self.bert = model
def encode(self, input_ids, token_type_ids=None, attention_mask=None, word_level=False):
self.bert.eval()
embeddings = self.bert(input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, word_level=word_level,
remove_bias=False, bias_dir=None, encode_only=True)
return embeddings
def extract_embeddings(bert_encoder, tokenizer, examples, max_seq_length, device,
label_list, output_mode, norm, word_level=False):
'''Encode examples into BERT embeddings in batches.'''
features = convert_examples_to_dualfeatures(
examples, label_list, max_seq_length, tokenizer, output_mode)
all_inputs_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_mask_a = torch.tensor([f.mask_a for f in features], dtype=torch.long)
all_segments_a = torch.tensor([f.segments_a for f in features], dtype=torch.long)
data = TensorDataset(all_inputs_a, all_mask_a, all_segments_a)
dataloader = DataLoader(data, batch_size=32, shuffle=False)
all_embeddings = []
for step, batch in enumerate(tqdm(dataloader)):
inputs_a, mask_a, segments_a = batch
if (device != None):
inputs_a = inputs_a.to(device)
mask_a = mask_a.to(device)
segments_a = segments_a.to(device)
embeddings = bert_encoder.encode(input_ids=inputs_a, token_type_ids=segments_a, attention_mask=mask_a, word_level=False)
embeddings = embeddings.cpu().detach().numpy()
all_embeddings.append(embeddings)
all_embeddings = np.concatenate(all_embeddings, axis=0)
return all_embeddings
def extract_embeddings_pair(bert_encoder, tokenizer, examples, max_seq_length, device,
load, task, label_list, output_mode, norm, word_level=False):
'''Encode paired examples into BERT embeddings in batches.
Used in the computation of gender bias direction.
Save computed embeddings under saved_embs/.
'''
emb_loc_a = 'saved_embs/num%d_a_%s.pkl' % (len(examples), task)
emb_loc_b = 'saved_embs/num%d_b_%s.pkl' % (len(examples), task)
if os.path.isfile(emb_loc_a) and os.path.isfile(emb_loc_b) and load:
with open(emb_loc_a, 'rb') as f:
all_embeddings_a = pickle.load(f)
with open(emb_loc_b, 'rb') as f:
all_embeddings_b = pickle.load(f)
print ('preprocessed embeddings loaded from:', emb_loc_a, emb_loc_b)
else:
features = convert_examples_to_dualfeatures(
examples, label_list, max_seq_length, tokenizer, output_mode)
all_inputs_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_mask_a = torch.tensor([f.mask_a for f in features], dtype=torch.long)
all_segments_a = torch.tensor([f.segments_a for f in features], dtype=torch.long)
all_inputs_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_mask_b = torch.tensor([f.mask_b for f in features], dtype=torch.long)
all_segments_b = torch.tensor([f.segments_b for f in features], dtype=torch.long)
data = TensorDataset(all_inputs_a, all_inputs_b, all_mask_a, all_mask_b, all_segments_a, all_segments_b)
dataloader = DataLoader(data, batch_size=32, shuffle=False)
all_embeddings_a = []
all_embeddings_b = []
for step, batch in enumerate(tqdm(dataloader)):
inputs_a, inputs_b, mask_a, mask_b, segments_a, segments_b = batch
if (device != None):
inputs_a = inputs_a.to(device)
mask_a = mask_a.to(device)
segments_a = segments_a.to(device)
inputs_b = inputs_b.to(device)
mask_b = mask_b.to(device)
segments_b = segments_b.to(device)
embeddings_a = bert_encoder.encode(input_ids=inputs_a, token_type_ids=segments_a, attention_mask=mask_a, word_level=False)
embeddings_b = bert_encoder.encode(input_ids=inputs_b, token_type_ids=segments_b, attention_mask=mask_b, word_level=False)
embeddings_a /= torch.norm(embeddings_a, dim=-1, keepdim=True)
embeddings_b /= torch.norm(embeddings_b, dim=-1, keepdim=True)
if not torch.isnan(embeddings_a).any() and not torch.isnan(embeddings_b).any():
embeddings_a = embeddings_a.cpu().detach().numpy()
embeddings_b = embeddings_b.cpu().detach().numpy()
all_embeddings_a.append(embeddings_a)
all_embeddings_b.append(embeddings_b)
all_embeddings_a = np.concatenate(all_embeddings_a, axis=0)
all_embeddings_b = np.concatenate(all_embeddings_b, axis=0)
with open(emb_loc_a, 'wb') as f:
pickle.dump(all_embeddings_a, f)
with open(emb_loc_b, 'wb') as f:
pickle.dump(all_embeddings_b, f)
print ('preprocessed embeddings saved to:', emb_loc_a, emb_loc_b)
means = (all_embeddings_a + all_embeddings_b) / 2.0
all_embeddings_a -= means
all_embeddings_b -= means
all_embeddings = np.concatenate([all_embeddings_a, all_embeddings_b], axis=0)
return all_embeddings
def doPCA(matrix, num_components=10):
pca = PCA(n_components=num_components, svd_solver="auto")
pca.fit(matrix) # Produce different results each time...
return pca
def get_def_examples(def_pairs):
'''Construct definitional examples from definitional pairs.'''
def_examples = []
for group_id in def_pairs:
def_group = def_pairs[group_id]
f_sents = def_group['f']
m_sents = def_group['m']
for sent_id, (sent_a, sent_b) in enumerate(zip(f_sents, m_sents)):
def_examples.append(InputExample(guid='{}-{}'.format(group_id, sent_id),
text_a=sent_a, text_b=sent_b, label=None))
return def_examples
def compute_gender_dir(device, tokenizer, bert_encoder, def_pairs, max_seq_length, k, load, task, word_level=False, keepdims=False):
'''Compute gender bias direction from definitional sentence pairs.'''
def_examples = get_def_examples(def_pairs) # 1D list where 2i and 2i+1 are a pair
all_embeddings = extract_embeddings_pair(bert_encoder, tokenizer, def_examples, max_seq_length, device, load, task,
label_list=None, output_mode=None, norm=True, word_level=word_level)
gender_dir = doPCA(all_embeddings).components_[:k]
if (not keepdims):
gender_dir = np.mean(gender_dir, axis=0)
logger.info("gender direction={} {} {}".format(gender_dir.shape,
type(gender_dir), gender_dir[:10]))
return gender_dir
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode):
"""Loads a data file into a list of input features."""
'''
output_mode: classification or regression
'''
if (label_list != None):
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert(len(input_ids) == max_seq_length)
assert(len(input_mask) == max_seq_length)
assert(len(segment_ids) == max_seq_length)
if (label_list != None):
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
else:
label_id = None
features.append(
InputFeatures(tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_dualfeatures(examples, label_list, max_seq_length, tokenizer, output_mode):
"""Loads a data file into a list of dual input features."""
'''
output_mode: classification or regression
'''
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
# truncate length
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens_a = ["[CLS]"] + tokens_a + ["[SEP]"]
segments_a = [0] * len(tokens_a)
input_ids_a = tokenizer.convert_tokens_to_ids(tokens_a)
mask_a = [1] * len(input_ids_a)
padding_a = [0] * (max_seq_length - len(input_ids_a))
input_ids_a += padding_a
mask_a += padding_a
segments_a += padding_a
assert(len(input_ids_a) == max_seq_length)
assert(len(mask_a) == max_seq_length)
assert(len(segments_a) == max_seq_length)
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if len(tokens_b) > max_seq_length - 2:
tokens_b = tokens_b[:(max_seq_length - 2)]
tokens_b = ["[CLS]"] + tokens_b + ["[SEP]"]
segments_b = [0] * len(tokens_b)
input_ids_b = tokenizer.convert_tokens_to_ids(tokens_b)
mask_b = [1] * len(input_ids_b)
padding_b = [0] * (max_seq_length - len(input_ids_b))
input_ids_b += padding_b
mask_b += padding_b
segments_b += padding_b
assert(len(input_ids_b) == max_seq_length)
assert(len(mask_b) == max_seq_length)
assert(len(segments_b) == max_seq_length)
else:
input_ids_b = None
mask_b = None
segments_b = None
features.append(
DualInputFeatures(input_ids_a=input_ids_a,
input_ids_b=input_ids_b,
mask_a=mask_a,
mask_b=mask_b,
segments_a=segments_a,
segments_b=segments_b))
return features
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def parse_args():
'''Parse command line arguments.'''
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str,
choices = ["bert-base-uncased", "bert-large-uncased", "bert-base-cased",
"bert-large-cased", "bert-base-multilingual-uncased", "bert-base-multilingual-cased",
"bert-base-chinese"],
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--resume_model_path",
type=str,
default="",
help="Whether to resume from a model.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--normalize",
action='store_true',
help="Set this flag if you want embeddings normalized.")
parser.add_argument("--tune_bert",
action='store_true',
help="Set this flag if you want to fine-tune bert model.")
parser.add_argument("--debias",
action='store_true',
help="Set this flag if you want embeddings debiased.")
parser.add_argument("--no_save",
action='store_true',
help="Set this flag if you don't want to save any results.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument("--def_pairs_name", default="all", type=str,
help="Name of definitional sentence pairs.")
parser.add_argument("--num_dimension", "-k", type=int, default=1,
help="dimensionality of bias subspace")
args = parser.parse_args()
if (args.output_dir == None):
args.output_dir = os.path.join('results', args.task_name, args.bert_model)
print("output_dir={}".format(args.output_dir))
if (args.do_lower_case and 'uncased' not in args.bert_model):
raise ValueError("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False.")
if (not args.do_lower_case and 'uncased' in args.bert_model):
raise ValueError("The pre-trained model you are loading is an uncased model but you have not set "
"`do_lower_case` to True.")
return args
def get_tokenizer_encoder(args, device=None):
'''Return BERT tokenizer and encoder based on args. Used in eval_bias.py.'''
print("get tokenizer from {}".format(args.model_path))
tokenizer = BertTokenizer.from_pretrained(args.model_path, do_lower_case=args.do_lower_case)
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
model_weights_path = args.model_path
model = BertForSequenceClassification.from_pretrained(model_weights_path,
cache_dir=cache_dir,
num_labels=2,
normalize=args.normalize,
tune_bert=args.tune_bert)
if (device != None): model.to(device)
bert_encoder = BertEncoder(model, device)
return tokenizer, bert_encoder
def get_encodings(args, encs, tokenizer, bert_encoder, gender_space, device,
word_level=False, specific_set=None):
'''Extract BERT embeddings from encodings dictionary.
Perform the debiasing step if debias is specified in args.
'''
if (word_level): assert(specific_set != None)
logger.info("Get encodings")
logger.info("Debias={}".format(args.debias))
examples_dict = dict()
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
texts = encs[key]['examples']
category = encs[key]['category'].lower()
examples = []
encs[key]['text_ids'] = dict()
for i, text in enumerate(texts):
examples.append(InputExample(guid='{}'.format(i), text_a=text, text_b=None, label=None))
encs[key]['text_ids'][i] = text
examples_dict[key] = examples
all_embeddings = extract_embeddings(bert_encoder, tokenizer, examples, args.max_seq_length, device,
label_list=None, output_mode=None, norm=False, word_level=word_level)
logger.info("Debias category {}".format(category))
emb_dict = {}
for index, emb in enumerate(all_embeddings):
emb /= np.linalg.norm(emb)
if (args.debias and not category in {'male','female'}): # don't debias gender definitional sentences
emb = my_we.dropspace(emb, gender_space)
emb /= np.linalg.norm(emb) # Normalization actually doesn't affect e_size
emb_dict[index] = emb
encs[key]['encs'] = emb_dict
return encs
def prepare_model_and_bias(args, device, num_labels, cache_dir):
'''Return model and gender direction (computed by resume_model_path)'''
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# a. load pretrained model and compute gender direction
model_weights_path = args.bert_model if (args.resume_model_path == "") else args.resume_model_path
logger.info("Initialize model with {}".format(model_weights_path))
model = BertForSequenceClassification.from_pretrained(model_weights_path,
cache_dir=cache_dir,
num_labels=num_labels,
normalize=args.normalize,
tune_bert=args.tune_bert).to(device)
gender_dir = None
if (args.debias):
bert_encoder = BertEncoder(model, device)
def_pairs = get_def_pairs(args.def_pairs_name)
gender_dir = compute_gender_dir(device, tokenizer, bert_encoder,
def_pairs, args.max_seq_length, k=args.num_dimension, load=True, task='pretrained')
gender_dir = torch.tensor(gender_dir, dtype=torch.float, device=device)
return model, tokenizer, gender_dir
def prepare_model_and_pretrained_bias(args, device, num_labels, cache_dir):
'''Return model and gender direction (computed by pretrained bert)'''
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# a. load pretrained model and compute gender direction
model_pretrained = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels=num_labels,
normalize=args.normalize,
tune_bert=args.tune_bert).to(device)
gender_dir_pretrained = None
if (args.debias):
bert_encoder_pretrained = BertEncoder(model_pretrained, device)
def_pairs = get_def_pairs(args.def_pairs_name)
gender_dir_pretrained = compute_gender_dir(device, tokenizer, bert_encoder_pretrained,
def_pairs, args.max_seq_length, k=args.num_dimension, load=True, task='pretrained')
gender_dir_pretrained = torch.tensor(gender_dir_pretrained, dtype=torch.float, device=device)
if (args.resume_model_path == ""):
model_weights_path = args.bert_model
else:
model_weights_path = args.resume_model_path
logger.info("Resume training from {}".format(model_weights_path))
# b. Load model for training
if (args.do_train and args.resume_model_path != ""):
del model_pretrained
model = BertForSequenceClassification.from_pretrained(args.resume_model_path,
cache_dir=cache_dir,
num_labels=num_labels,
normalize=args.normalize,
tune_bert=args.tune_bert).to(device)
else:
model = model_pretrained
return model, tokenizer, gender_dir_pretrained
def prepare_optimizer(args, model, num_train_optimization_steps):
'''Initialize and return optimizer.'''
# Prepare optimizer
logger.info("Prepare optimizer {} fine-tuning".format("with" if args.tune_bert else "without"))
if (args.tune_bert):
param_optimizer = list(model.named_parameters()) # include all parameters
else:
param_optimizer = list(model.classifier.named_parameters()) # only the classification head
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
return optimizer
def main():
'''Fine-tune BERT on the specified task and evaluate on dev set.'''
args = parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"cola": ColaProcessor,
"sst-2": Sst2Processor,
"qnli": QnliProcessor
}
output_modes = {
"cola": "classification",
"sst-2": "classification",
"qnli": "classification"
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if (not args.no_save):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
train_examples = None
num_train_optimization_steps = None
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
if (args.do_train):
# Prepare training examples, model, tokenizer, optimizer, and bias direction.
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
model, tokenizer, gender_dir_pretrained = prepare_model_and_bias(args, device, num_labels, cache_dir)
optimizer = prepare_optimizer(args, model, num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
# start training
logger.info("Prepare training features")
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.classifier.train()
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
epoch_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# define a new function to compute loss values for both output_modes
logits = model(input_ids, segment_ids, input_mask, remove_bias=args.debias, bias_dir=gender_dir_pretrained)
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), label_ids.view(-1))
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
epoch_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
epoch_loss /= len(train_dataloader)
print("Epoch {}: loss={}".format(epoch, epoch_loss))
if not args.no_save:
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if (not args.do_train):
# Load a trained model and vocabulary that you have fine-tuned
model = BertForSequenceClassification.from_pretrained(args.output_dir,
cache_dir=cache_dir,
num_labels=num_labels,
normalize=args.normalize,
tune_bert=args.tune_bert)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(device)
# Get gender direction
gender_dir_tuned = None
if args.debias:
bert_encoder = BertEncoder(model, device)
def_pairs = get_def_pairs(args.def_pairs_name)
gender_dir_tuned = compute_gender_dir(device, tokenizer, bert_encoder, def_pairs, args.max_seq_length, k=args.num_dimension, load=False, task=args.task_name)
gender_dir_tuned = torch.tensor(gender_dir_tuned, dtype=torch.float, device=device)
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
all_sample_ids = torch.arange(len(eval_features), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_sample_ids)
# Run prediction for full data
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size, shuffle=False)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
for input_ids, input_mask, segment_ids, label_ids, sample_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask,
labels=None, remove_bias=args.debias, bias_dir=gender_dir_tuned)
# create eval loss and other metric required by the task
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(task_name, preds, all_label_ids.numpy())
loss = tr_loss/global_step if (args.do_train and global_step > 0) else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
if (not args.no_save):
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 40,329 | 36.691589 | 160 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/extract_features.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from a PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import json
import re
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputFeature`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_file", default=None, type=str, required=True)
parser.add_argument("--output_file", default=None, type=str, required=True)
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
## Other parameters
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--layers", default="-1,-2,-3,-4", type=str)
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences longer "
"than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help = "local_rank for distributed training on gpus")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1)))
layer_indexes = [int(x) for x in args.layers.split(",")]
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
examples = read_examples(args.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model = BertModel.from_pretrained(args.bert_model)
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
with open(args.output_file, "w", encoding='utf-8') as writer:
for input_ids, input_mask, example_indices in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
for b, example_index in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
# feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_out_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(x.item(), 6) for x in layer_output[i]
]
all_layers.append(layers)
out_features = collections.OrderedDict()
out_features["token"] = token
out_features["layers"] = all_layers
all_out_features.append(out_features)
output_json["features"] = all_out_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
main()
| 12,208 | 39.969799 | 120 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/run_elmo.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import time
import numpy as np
import torch
from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
import torch.nn as nn
import torch.optim as optim
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.decomposition import PCA
from allennlp.modules.elmo import Elmo, batch_to_ids
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper, ELMoTokenCharactersIndexer
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
# from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertModel, BertConfig
# from pytorch_pretrained_bert.tokenization import BertTokenizer
# from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
# from bias_data.def_sent_pairs import full_def_sent_pairs, thisis_def_sent_pairs, expanded_thisis
# from bias_data.more_def_sent_pairs import full_def_sent_pairs
from bias_data.def_sent_pairs import pairs_dict
from allennlp.commands.elmo import ElmoEmbedder
logger = logging.getLogger(__name__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Config(dict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def set(self, key, val):
self[key] = val
setattr(self, key, val)
config = Config(
testing=True,
seed=1,
batch_size=64,
lr=3e-4,
epochs=2,
hidden_sz=64,
max_seq_len=100, # necessary to limit memory usage
max_vocab_size=100000,
)
OPTIONS_FILE = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
WEIGHT_FILE = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
elmo = Elmo(options_file=OPTIONS_FILE, weight_file=WEIGHT_FILE,
do_layer_norm=False, dropout=0.0, num_output_representations=1).to(device)
def tokenizer(x: str):
return [w.text for w in
SpacyWordSplitter(language='en_core_web_sm',
pos_tags=False).split_words(x)[:config.max_seq_len]]
class LSTMClassifier(nn.Module):
def __init__(self, elmo, num_labels, device, normalize=False):
super(LSTMClassifier, self).__init__()
self.elmo = elmo.to(device)
self.input_dim = 1024
self.hidden_size = 512
self.num_layers = 1
self.num_labels = num_labels
self.dropout = nn.Dropout(0.1)
self.rnn = nn.LSTM(input_size=self.input_dim, hidden_size=self.hidden_size,
num_layers=self.num_layers , batch_first=True)
self.classifier = nn.Linear(self.hidden_size * self.num_layers, num_labels)
# self.hidden2lable = nn.Linear(self.input_dim, num_labels)
self.act = nn.Tanh()
self.device = device
self.normalize = normalize
logger.info("Normalize={}".format(normalize))
def drop_bias(self, u, v):
return u - torch.ger(torch.matmul(u, v), v) / v.dot(v)
def forward(self, embeddings, remove_bias=False, bias_dir=None):
# embeddings: batch_size x T x embed_size
batch_size = embeddings.shape[0]
T = embeddings.shape[1]
if (self.normalize):
embeddings = nn.functional.normalize(embeddings, p=2, dim=1)
if (remove_bias):
embeddings = embeddings.view(-1, self.input_dim)
embeddings = self.drop_bias(embeddings, bias_dir)
embeddings = embeddings.view(batch_size, T, self.input_dim)
embeddings = nn.functional.normalize(embeddings, p=2, dim=1)
# Gradient starts from here
embeddings = self.dropout(embeddings)
_, (h, c) = self.rnn(embeddings)
# h: 2xBxH
h = h.permute(1, 0, 2).contiguous().view(batch_size, -1)
logits = self.classifier(h)
return logits
class FullyConnectedClassifier(nn.Module):
def __init__(self, num_labels, hidden_dims, device, normalize=False):
super(FullyConnectedClassifier, self).__init__()
self.input_dim = 1024
self.num_labels = num_labels
self.dropout = nn.Dropout(0.1)
layers = []
hidden_dims.append(num_labels)
prev_hidden_dim = self.input_dim
for i in range(len(hidden_dims)):
hidden_dim = hidden_dims[i]
layers.append(nn.Linear(prev_hidden_dim, hidden_dim))
prev_hidden_dim = hidden_dim
self.layers = nn.ModuleList(layers)
self.act = nn.Tanh()
self.device = device
self.normalize = normalize
logger.info("Normalize={}".format(normalize))
def drop_bias(self, u, v):
return u - torch.ger(torch.matmul(u, v), v) / v.dot(v)
def forward(self, embeddings, remove_bias=False, bias_dir=None):
# embeddings: batch_size x embed_size
# Detach from here
if (self.normalize):
embeddings = nn.functional.normalize(embeddings, p=2, dim=1)
embeddings -= torch.mean(embeddings, dim=-1, keepdim=True)
embeddings /= torch.std(embeddings, dim=-1, keepdim=True)
if (remove_bias):
embeddings = self.drop_bias(embeddings, bias_dir)
embeddings = nn.functional.normalize(embeddings, p=2, dim=1)
# Gradient starts from here
embeddings = self.dropout(embeddings)
for layer in self.layers:
embeddings = self.act(embeddings)
embeddings = layer(embeddings)
logits = embeddings
return logits
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class ColaElmoProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
data_file = os.path.join(data_dir, "{};{}.npy".format("cola", "train"))
examples = np.load(data_file)
return examples
def get_dev_examples(self, data_dir):
data_file = os.path.join(data_dir, "{};{}.npy".format("cola", "dev"))
examples = np.load(data_file)
return examples
def get_labels(self):
"""See base class."""
return ["0", "1"]
class Sst2ElmoProcessor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
data_file = os.path.join(data_dir, "{};{}.npy".format("sst-2", "train"))
examples = np.load(data_file)
return examples
def get_dev_examples(self, data_dir):
data_file = os.path.join(data_dir, "{};{}.npy".format("sst-2", "dev"))
examples = np.load(data_file)
return examples
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
class ElmoDataset(Dataset):
def __init__(self, examples):
self.examples = examples
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
example = self.examples[idx]
sent = torch.tensor(example[0], dtype=torch.float)
label = torch.tensor(example[1], dtype=torch.long)
return sent, label
class SentenceDataset(Dataset):
def __init__(self, examples):
self.examples = examples
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
example = self.examples[idx]
sent = example.text_a
label = example.label
return sent, label
def my_collate(batch):
sentences = [tokenizer(item[0]) for item in batch]
character_ids = batch_to_ids(sentences).to(device)
elmo_dir = elmo(character_ids)
embeddings = elmo_dir['elmo_representations'][0]
labels = [item[1] for item in batch]
labels = torch.LongTensor(labels, device=device)
return embeddings, labels
def get_def_examples(def_pairs_name):
def_pairs = pairs_dict[def_pairs_name]
def_examples = []
for i, pair in enumerate(def_pairs):
sentA = pair[0]
sentB = pair[1]
def_examples.append(InputExample(guid='{}-a'.format(i),
text_a=sentA, text_b=None, label=None))
def_examples.append(InputExample(guid='{}-b'.format(i),
text_a=sentB, text_b=None, label=None))
# for i in range(10):
# example = def_examples[i]
# print(example.guid, example.text_a)
return def_examples
def doPCA(pairs, num_components = 10):
matrix = []
for a, b in pairs:
center = (a + b)/2
matrix.append(a - center)
matrix.append(b - center)
matrix = np.array(matrix)
pca = PCA(n_components=num_components, svd_solver="full")
pca.fit(matrix) # Produce different results each time...
return pca
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def parse_args():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
choices = ["bert-base-uncased", "bert-large-uncased", "bert-base-cased",
"bert-large-cased", "bert-base-multilingual-uncased", "bert-base-multilingual-cased",
"bert-base-chinese"],
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--resume_model_path",
type=str,
default="",
help="Whether to resume from a model.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--normalize",
action='store_true',
help="Set this flag if you want embeddings normalized.")
parser.add_argument("--tune_bert",
action='store_true',
help="Set this flag if you want to fine-tune bert model.")
parser.add_argument("--debias",
action='store_true',
help="Set this flag if you want embeddings normalized.")
parser.add_argument("--no_save",
action='store_true',
help="Set this flag if you don't want to save any results.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=0.1,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=102,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument("--def_pairs_name", default="large_real", type=str,
help="Name of definitional sentence pairs.")
parser.add_argument("--weights_name", default="model_weights", type=str)
parser.add_argument("--overwrite",
action='store_true',
help="Overwrite output directory if it already exists")
args = parser.parse_args()
if (args.output_dir == None):
args.output_dir = os.path.join('results', args.task_name, args.bert_model)
print("output_dir={}".format(args.output_dir))
if (args.do_lower_case and 'uncased' not in args.bert_model):
raise ValueError("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False.")
if (not args.do_lower_case and 'uncased' in args.bert_model):
raise ValueError("The pre-trained model you are loading is an uncased model but you have not set "
"`do_lower_case` to True.")
return args
def correct_count(A, B):
count = np.sum(A == B)
return count
def load_gender_dir():
filename = os.path.join("elmo_data", "gender_dir.npy")
gender_dir = np.load(filename)
return gender_dir
def main():
args = parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"cola": ColaProcessor,
"sst-2": Sst2Processor
}
output_modes = {
"cola": "classification",
"sst-2": "classification"
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite and not args.no_save:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
print("Prepare training examples...")
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# train_dataset = SentenceDataset(train_examples)
# train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.train_batch_size,
# collate_fn=my_collate)
# logger.info("Number of batches={}".format(len(train_loader)))
# model = LSTMClassifier(elmo, len(label_list), device, normalize=True).to(device)
# for data, label in train_loader:
# data = data.to(device)
# # logger.info("data={} label={}".format(len(data), label))
# logits = model(data)
# logger.info("logits={}".format(logits.shape))
# return
hidden_dims = [512, 256, 128, 64, 16]
model = LSTMClassifier(elmo, len(label_list), device, normalize=True).to(device)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = nn.DataParallel(model)
gender_dir = None
if args.debias:
gender_dir = load_gender_dir()
gender_dir = torch.tensor(gender_dir, dtype=torch.float, device=device)
logger.info("Gender direction: {}".format(gender_dir[:10]))
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
loss_fct = CrossEntropyLoss() if (output_mode == "classification") else MSELoss()
global_step = 0
tr_loss = 0
if args.do_train:
train_dataset = SentenceDataset(train_examples)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.train_batch_size,
collate_fn=my_collate)
eval_examples = processor.get_dev_examples(args.data_dir)
eval_dataset = SentenceDataset(eval_examples)
eval_dataloader = DataLoader(eval_dataset, batch_size=args.eval_batch_size, shuffle=False,
collate_fn=my_collate)
model.train()
best_epoch_loss = 0.
best_metric = 0.
best_result = dict()
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
epoch_loss = 0
all_correct = 0
model.train()
for step, (data, label_ids) in enumerate(tqdm(train_dataloader, desc="Iteration")):
label_ids = label_ids.to(device)
data = data.to(device)
logits = model(data, remove_bias=args.debias, bias_dir=gender_dir)
# logger.info("logits={}".format(logits))
predictions = np.argmax(logits.detach().cpu().numpy(), axis=-1)
all_correct += correct_count(predictions, label_ids.cpu().numpy())
if output_mode == "classification":
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss = loss_fct(logits.view(-1), label_ids.view(-1))
loss.backward()
tr_loss += loss.item()
epoch_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
epoch_loss /= len(train_dataloader)
# Evaluation
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
all_label_ids = []
for data, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
data = data.to(device)
label_ids = label_ids.to(device)
all_label_ids.append(label_ids.cpu().numpy())
with torch.no_grad():
logits = model(data, remove_bias=args.debias, bias_dir=gender_dir)
# logger.info("logits={}".format(logits))
# create eval loss and other metric required by the task
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
# logger.info("preds={}".format(preds[:10]))
preds = preds[0]
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
all_label_ids = np.concatenate(all_label_ids)
result = compute_metrics(task_name, preds, all_label_ids)
metric = result[list(result.keys())[0]]
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
if (metric > best_metric):
best_metric = metric
best_result = result
if (output_mode == "classification"):
train_acc = all_correct / len(train_examples)
print("Epoch {}: loss={} acc={}".format(epoch, epoch_loss, train_acc))
elif (output_mode == "regression"):
print("Epoch {}: loss={}".format(epoch, epoch_loss))
if (epoch_loss < best_epoch_loss):
best_epoch_loss = epoch_loss
else:
scheduler.step()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Save a trained model, configuration and tokenizer
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, args.weights_name)
if (not args.no_save):
torch.save(model.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
if (args.do_eval and not args.do_train):
states = torch.load(args.resume_model_path)
model.load_state_dict(states["model"])
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Get gender direction
eval_examples = processor.get_dev_examples(args.data_dir)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_dataset = SentenceDataset(eval_examples)
eval_dataloader = DataLoader(eval_dataset, batch_size=args.eval_batch_size, shuffle=False,
collate_fn=my_collate)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
all_label_ids = []
for data, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
data = data.to(device)
label_ids = label_ids.to(device)
all_label_ids.append(label_ids.cpu().numpy())
with torch.no_grad():
logits = model(data, remove_bias=args.debias, bias_dir=gender_dir)
# logger.info("logits={}".format(logits))
# create eval loss and other metric required by the task
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
# logger.info("preds={}".format(preds[:10]))
preds = preds[0]
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
all_label_ids = np.concatenate(all_label_ids)
logger.info("preds={} {}".format(len(preds), preds[:20]))
logger.info("label={} {}".format(len(all_label_ids), all_label_ids[:20]))
result = compute_metrics(task_name, preds, all_label_ids)
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
if (not args.no_save): writer = open(output_eval_file, "w")
result = best_result
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
if (not args.no_save):
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
'''
cola biased:
CUDA_VISIBLE_DEVICES=2 python run_elmo.py --output_dir elmo-results/CoLA-lstm --task_name CoLA --do_eval --do_lower_case --data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA --bert_model bert-base-uncased --max_seq_length 128 --train_batch_size 32 --learning_rate 0.001 --num_train_epochs 50.0 --normalize --do_train
CUDA_VISIBLE_DEVICES=2 \
python run_elmo.py --output_dir elmo-results/CoLA-lstm-biased \
--task_name CoLA \
--do_eval \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 50.0 \
--normalize \
--do_train
mcc: 39.1
cola debias:
CUDA_VISIBLE_DEVICES=3 \
python run_elmo.py --output_dir elmo-results/CoLA-lstm-debiased \
--debias \
--task_name CoLA \
--do_eval \
--do_train \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 7.0 \
--normalize \
--debias
sst biased:
screen: elmo-sst-biased
CUDA_VISIBLE_DEVICES=0 \
python run_elmo.py --output_dir elmo-results/SST-2-lstm-biased \
--task_name SST-2 \
--do_eval \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/SST-2 \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 50.0 \
--normalize \
--do_train
sst debiased:
screen: elmo-sst-debias
CUDA_VISIBLE_DEVICES=1 \
python run_elmo.py --output_dir elmo-results/SST-2-lstm-debias \
--task_name SST-2 \
--debias \
--do_eval \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/SST-2 \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 50.0 \
--normalize \
--do_train
'''
| 33,150 | 32.250752 | 346 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/extract_elmo.py | from __future__ import print_function, division
from allennlp.modules.elmo import Elmo, batch_to_ids
import torch
import time
device = "cuda" if torch.cuda.is_available() else "cpu"
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
elmo = Elmo(options_file=options_file, weight_file=weight_file,
do_layer_norm=False, dropout=0.0, num_output_representations=1)
elmo = elmo.to(device)
# tokens = {""}
# elmo_tokens = tokens.pop("elmo", None)
# elmo_representations = elmo(elmo_tokens)["elmo_representations"]
start_time = time.time()
sentences = [['First', 'sentence', '.'], ['Another', '.']]
character_ids = batch_to_ids(sentences).to(device)
print("character_ids", character_ids.shape, type(character_ids))
embeddings = elmo(character_ids)
elapsed_time = time.time() - start_time
print("time={}".format(elapsed_time))
print(type(embeddings), embeddings.keys())
elmo_representations = embeddings['elmo_representations']
print(len(elmo_representations))
for i in range(len(elmo_representations)):
e = elmo_representations[i]
print(type(e), e.shape)
| 1,305 | 36.314286 | 148 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/retrieval.py | import pickle
import os
import collections
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# retrieval on biased/debiased sentence embeddings
# model is pretrained BERT
DATA_DIR = "./acl_bias_eval_results/pretrained"
def load_encs(test_id, debiased):
bias_flag = "debiased" if debiased else "biased"
filename = "{}_encs{}.pkl".format(bias_flag, test_id)
with open(os.path.join(DATA_DIR, filename), 'rb') as f:
encs = pickle.load(f)
# return encs
# Levels of keys in encs:
# 1: 'targ1', 'targ2', 'attr1', 'attr2'
# 2: 'category', 'examples', 'text_ids', 'encs'
# 3: 'encs': text->embedding
gender_data = dict()
nogender_data = dict()
group_names = encs.keys()
for group_name in group_names:
group = encs[group_name]
category = group["category"]
group_encs = group["encs"]
text_ids = group["text_ids"]
group_size = len(text_ids)
# print("group={} category={} size={}".format(group_name, category, group_size))
# print(text_ids.values())
text2encs = dict()
for text_id in text_ids:
text = text_ids[text_id]
sent_enc = group_encs[str(text_id)]
text2encs[text] = sent_enc
if ("male" in category.lower()):
gender_data[category] = text2encs
else:
nogender_data[category] = text2encs
return gender_data, nogender_data
def index_list(A, indices):
return [A[i] for i in indices]
def get_source_sents(gender_data, avg):
if (not avg):
# choose gender specific sentences
gender_key = list(gender_data.keys())[0].lower()
if ("term" in gender_key):
# source_sents = ["Here he is.", "There she is."]
source_sents = ["This is a man.", "This is a woman."]
elif ("name" in gender_key):
source_sents = ["This is John.", "This is Amy."]
source_dict = dict()
# obtain source embeddings
for category in gender_data:
text2encs = gender_data[category]
# print(text2encs.keys())
for sent in source_sents:
if (sent in text2encs):
source_dict[sent] = text2encs[sent]
for sent in source_sents: assert(sent in source_dict)
else:
source_dict = dict()
for category in gender_data:
category_data = gender_data[category]
avg_ebd = np.array([category_data[text] for text in category_data])
avg_ebd = np.mean(avg_ebd, axis=0)
source_dict[category] = avg_ebd
return source_dict
def retrieve_topk(test_id, debiased, k=10):
gender_data, nogender_data = load_encs(test_id, debiased)
if (debiased): print("DEBIASED")
else: print("BIASED")
# print("gendered categories: {}".format(gender_data.keys()))
# print("non-gendered categories: {}".format(nogender_data.keys()))
source_dict = get_source_sents(gender_data, avg=True)
sent_keys = list(source_dict.keys())
sources = np.array([source_dict[sent] for sent in sent_keys])
# construct targets
targets = []
cat_assignment = []
target_texts = []
for category in nogender_data:
text2encs = nogender_data[category]
for text in text2encs:
targets.append(text2encs[text])
cat_assignment.append(category)
target_texts.append(text)
targets = np.array(targets)
print("sources={} targets={}".format(sources.shape, targets.shape))
# start retrieving!
sim_scores = -cosine_similarity(sources, targets)
rank_matrix = np.argsort(sim_scores, axis=-1)
for i, source_sent in enumerate(sent_keys):
count_dict = collections.defaultdict(int)
print("#"*80)
print("source={}".format(source_sent))
top_indices = rank_matrix[i,:k]
for rank in range(k):
index = top_indices[rank]
category = cat_assignment[index]
count_dict[category] += 1
print(category, target_texts[index], sim_scores[i,index])
print(count_dict)
test_id = "7b"
retrieve_topk(test_id=test_id, debiased=False)
retrieve_topk(test_id=test_id, debiased=True)
'''
6: family/career
7:3, 9:1 -> 7:3, 8:2
'''
| 3,780 | 26.201439 | 82 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/elmo_preprocess.py | from __future__ import absolute_import, division, print_function
import numpy as np
import argparse
import os, sys, math, time
from allennlp.commands.elmo import ElmoEmbedder
import csv
from tqdm import tqdm
'''
python elmo_preprocess.py --set train --data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/SST-2 --task_name sst-2
python elmo_preprocess.py --set train --data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA --task_name CoLA
'''
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
processors = {
"cola": ColaProcessor,
"sst-2": Sst2Processor
}
class ElmoEncoder(object):
def __init__(self):
self.elmo = ElmoEmbedder()
# return: numpy array
def encode_batch(self, sents):
vec_seq = self.elmo.embed_sentences(sents)
vecs = []
for vec in vec_seq:
vecs.append(self.collapse_vec(vec))
# vecs = torch.stack(vecs)
vecs = np.stack(vecs)
return vecs
def collapse_vec(self, vec_seq, time_combine_method="max", layer_combine_method="add"):
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = np.concatenate(vec, axis=0)
elif layer_combine_method == "last":
vec = vec[-1]
else:
raise NotImplementedError
return vec
def encode(self, sents, time_combine_method="max", layer_combine_method="add"):
""" Load ELMo and encode sents """
vecs = {}
for sent in sents:
vec_seq = self.elmo.embed_sentence(sent)
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = np.concatenate(vec, axis=0)
elif layer_combine_method == "last":
vec = vec[-1]
else:
raise NotImplementedError
vecs[' '.join(sent)] = vec
return vecs
def parse_args():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--set",
default=None,
type=str,
choices = ["train", "dev"],
required=True,
help="train or eval")
parser.add_argument("--output_dir",
default="elmo_data",
type=str,
help="The output directory where the embeddings will be stored.")
args = parser.parse_args()
args.task_name = args.task_name.lower()
return args
def get_embeddings(examples, encoder, label_list):
label_map = {label : i for i, label in enumerate(label_list)}
def collate_batch(batch_examples):
# start_time = time.time()
texts = []
labels = []
for example in batch_examples:
texts.append(example.text_a)
labels.append(example.label)
# elapsed_time = time.time() - start_time
# print("collate time={}".format(elapsed_time))
return texts, labels
batch_size = 32
embeddings = []
label_list = []
nexamples = len(examples)
num_batch = int(math.floor(nexamples / batch_size))
for i in tqdm(range(num_batch), desc="Encoding"):
batch_examples = examples[i*batch_size:(i+1)*batch_size]
texts, batch_labels = collate_batch(batch_examples)
# start_time = time.time()
batch_ebds = encoder.encode_batch(texts)
# elapsed_time = time.time() - start_time
# print("encode time={}".format(elapsed_time))
batch_ebds = np.array(batch_ebds)
batch_labels = np.array(batch_labels)
embeddings.append(batch_ebds)
label_list.append(batch_labels)
# flatten
embeddings = np.concatenate(embeddings)
label_list = np.concatenate(label_list)
print("embeddings={} labels={}".format(embeddings.shape, label_list.shape))
# zip
data = list(zip(embeddings, label_list))
data = np.array(data)
return data
def main():
args = parse_args()
processor = processors[args.task_name]()
label_list = processor.get_labels()
if (args.set == "train"):
examples = processor.get_train_examples(args.data_dir)
elif (args.set == "dev"):
examples = processor.get_dev_examples(args.data_dir)
else:
raise NotImplementedError
print("examples={}".format(len(examples)))
elmo_encoder = ElmoEncoder()
start_time = time.time()
data = get_embeddings(examples, elmo_encoder, label_list)
elapsed_time = time.time() - start_time
print("Time={}".format(elapsed_time))
if (not os.path.exists(args.output_dir)): os.makedirs(args.output_dir)
output_file = os.path.join(args.output_dir, "{};{}".format(args.task_name, args.set))
np.save(output_file, data)
if __name__ == '__main__':
main()
| 8,824 | 27.652597 | 121 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/weat.py | ''' Implements the WEAT tests '''
import logging
import math
import itertools as it
import numpy as np
import scipy.special
import scipy.stats
# X and Y are two sets of target words of equal size.
# A and B are two sets of attribute words.
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
def cossim(x, y):
return np.dot(x, y) / math.sqrt(np.dot(x, x) * np.dot(y, y))
def construct_cossim_lookup(XY, AB):
"""
XY: mapping from target string to target vector (either in X or Y)
AB: mapping from attribute string to attribute vectore (either in A or B)
Returns an array of size (len(XY), len(AB)) containing cosine similarities
between items in XY and items in AB.
"""
cossims = np.zeros((len(XY), len(AB)))
for xy in XY:
for ab in AB:
cossims[xy, ab] = cossim(XY[xy], AB[ab])
return cossims
def s_wAB(A, B, cossims):
"""
Return vector of s(w, A, B) across w, where
s(w, A, B) = mean_{a in A} cos(w, a) - mean_{b in B} cos(w, b).
"""
return cossims[:, A].mean(axis=1) - cossims[:, B].mean(axis=1)
def s_XAB(X, s_wAB_memo):
r"""
Given indices of target concept X and precomputed s_wAB values,
return slightly more computationally efficient version of WEAT
statistic for p-value computation.
Caliskan defines the WEAT statistic s(X, Y, A, B) as
sum_{x in X} s(x, A, B) - sum_{y in Y} s(y, A, B)
where s(w, A, B) is defined as
mean_{a in A} cos(w, a) - mean_{b in B} cos(w, b).
The p-value is computed using a permutation test on (X, Y) over all
partitions (X', Y') of X union Y with |X'| = |Y'|.
However, for all partitions (X', Y') of X union Y,
s(X', Y', A, B)
= sum_{x in X'} s(x, A, B) + sum_{y in Y'} s(y, A, B)
= C,
a constant. Thus
sum_{x in X'} s(x, A, B) + sum_{y in Y'} s(y, A, B)
= sum_{x in X'} s(x, A, B) + (C - sum_{x in X'} s(x, A, B))
= C + 2 sum_{x in X'} s(x, A, B).
By monotonicity,
s(X', Y', A, B) > s(X, Y, A, B)
if and only if
[s(X', Y', A, B) - C] / 2 > [s(X, Y, A, B) - C] / 2,
that is,
sum_{x in X'} s(x, A, B) > sum_{x in X} s(x, A, B).
Thus we only need use the first component of s(X, Y, A, B) as our
test statistic.
"""
return s_wAB_memo[X].sum()
def s_XYAB(X, Y, s_wAB_memo):
r"""
Given indices of target concept X and precomputed s_wAB values,
the WEAT test statistic for p-value computation.
"""
return s_XAB(X, s_wAB_memo) - s_XAB(Y, s_wAB_memo)
def p_val_permutation_test(X, Y, A, B, n_samples, cossims, parametric=False):
''' Compute the p-val for the permutation test, which is defined as
the probability that a random even partition X_i, Y_i of X u Y
satisfies P[s(X_i, Y_i, A, B) > s(X, Y, A, B)]
'''
X = np.array(list(X), dtype=np.int)
Y = np.array(list(Y), dtype=np.int)
A = np.array(list(A), dtype=np.int)
B = np.array(list(B), dtype=np.int)
assert len(X) == len(Y)
size = len(X)
s_wAB_memo = s_wAB(A, B, cossims=cossims)
XY = np.concatenate((X, Y))
if parametric:
logger.info('Using parametric test')
s = s_XYAB(X, Y, s_wAB_memo)
logger.info('Drawing {} samples'.format(n_samples))
samples = []
for _ in range(n_samples):
np.random.shuffle(XY)
Xi = XY[:size]
Yi = XY[size:]
assert len(Xi) == len(Yi)
si = s_XYAB(Xi, Yi, s_wAB_memo)
samples.append(si)
# Compute sample standard deviation and compute p-value by
# assuming normality of null distribution
logger.info('Inferring p-value based on normal distribution')
(shapiro_test_stat, shapiro_p_val) = scipy.stats.shapiro(samples)
logger.info('Shapiro-Wilk normality test statistic: {:.2g}, p-value: {:.2g}'.format(
shapiro_test_stat, shapiro_p_val))
sample_mean = np.mean(samples)
sample_std = np.std(samples, ddof=1)
logger.info('Sample mean: {:.2g}, sample standard deviation: {:.2g}'.format(
sample_mean, sample_std))
p_val = scipy.stats.norm.sf(s, loc=sample_mean, scale=sample_std)
return p_val
else:
logger.info('Using non-parametric test')
s = s_XAB(X, s_wAB_memo)
total_true = 0
total_equal = 0
total = 0
num_partitions = int(scipy.special.binom(2 * len(X), len(X)))
if num_partitions > n_samples:
# We only have as much precision as the number of samples drawn;
# bias the p-value (hallucinate a positive observation) to
# reflect that.
total_true += 1
total += 1
logger.info('Drawing {} samples (and biasing by 1)'.format(n_samples - total))
for _ in range(n_samples - 1):
np.random.shuffle(XY)
Xi = XY[:size]
assert 2 * len(Xi) == len(XY)
si = s_XAB(Xi, s_wAB_memo)
if si > s:
total_true += 1
elif si == s: # use conservative test
total_true += 1
total_equal += 1
total += 1
else:
logger.info('Using exact test ({} partitions)'.format(num_partitions))
for Xi in it.combinations(XY, len(X)):
Xi = np.array(Xi, dtype=np.int)
assert 2 * len(Xi) == len(XY)
si = s_XAB(Xi, s_wAB_memo)
if si > s:
total_true += 1
elif si == s: # use conservative test
total_true += 1
total_equal += 1
total += 1
if total_equal:
logger.warning('Equalities contributed {}/{} to p-value'.format(total_equal, total))
return total_true / total
def mean_s_wAB(X, A, B, cossims):
return np.mean(s_wAB(A, B, cossims[X]))
def stdev_s_wAB(X, A, B, cossims):
return np.std(s_wAB(A, B, cossims[X]), ddof=1)
def effect_size(X, Y, A, B, cossims):
"""
Compute the effect size, which is defined as
[mean_{x in X} s(x, A, B) - mean_{y in Y} s(y, A, B)] /
[ stddev_{w in X u Y} s(w, A, B) ]
args:
- X, Y, A, B : sets of target (X, Y) and attribute (A, B) indices
"""
X = list(X)
Y = list(Y)
A = list(A)
B = list(B)
numerator = mean_s_wAB(X, A, B, cossims=cossims) - mean_s_wAB(Y, A, B, cossims=cossims)
denominator = stdev_s_wAB(X + Y, A, B, cossims=cossims)
return numerator / denominator
def convert_keys_to_ints(X, Y):
return (
dict((i, v) for (i, (k, v)) in enumerate(X.items())),
dict((i + len(X), v) for (i, (k, v)) in enumerate(Y.items())),
)
def run_test(encs, n_samples=100000, parametric=False):
''' Run a WEAT.
args:
- encs (Dict[str: Dict]): dictionary mapping targ1, targ2, attr1, attr2
to dictionaries containing the category and the encodings
- n_samples (int): number of samples to draw to estimate p-value
(use exact test if number of permutations is less than or
equal to n_samples)
'''
X, Y = encs["targ1"]["encs"], encs["targ2"]["encs"]
A, B = encs["attr1"]["encs"], encs["attr2"]["encs"]
# First convert all keys to ints to facilitate array lookups
(X, Y) = convert_keys_to_ints(X, Y)
(A, B) = convert_keys_to_ints(A, B)
XY = X.copy()
XY.update(Y)
AB = A.copy()
AB.update(B)
logger.info("Computing cosine similarities...")
cossims = construct_cossim_lookup(XY, AB)
logger.info("Null hypothesis: no difference between %s and %s in association to attributes %s and %s",
encs["targ1"]["category"], encs["targ2"]["category"],
encs["attr1"]["category"], encs["attr2"]["category"])
logger.info("Computing pval...")
pval = p_val_permutation_test(X, Y, A, B, n_samples, cossims=cossims, parametric=parametric)
logger.info("pval: %g", pval)
logger.info("computing effect size...")
esize = effect_size(X, Y, A, B, cossims=cossims)
logger.info("esize: %g", esize)
return esize, pval
if __name__ == "__main__":
X = {"x" + str(i): 2 * np.random.rand(10) - 1 for i in range(25)}
Y = {"y" + str(i): 2 * np.random.rand(10) - 1 for i in range(25)}
A = {"a" + str(i): 2 * np.random.rand(10) - 1 for i in range(25)}
B = {"b" + str(i): 2 * np.random.rand(10) - 1 for i in range(25)}
# A = X
# B = Y
print(X.keys())
print(Y.keys())
print(A.keys())
print(B.keys())
(X, Y) = convert_keys_to_ints(X, Y)
(A, B) = convert_keys_to_ints(A, B)
XY = X.copy()
XY.update(Y)
AB = A.copy()
AB.update(B)
cossims = construct_cossim_lookup(XY, AB)
logger.info("computing pval...")
pval = p_val_permutation_test(X, Y, A, B, cossims=cossims, n_samples=10000)
logger.info("pval: %g", pval)
logger.info("computing effect size...")
esize = effect_size(X, Y, A, B, cossims=cossims)
logger.info("esize: %g", esize)
| 8,207 | 28.52518 | 103 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/eval_bias.py | from __future__ import absolute_import, division, print_function
import numpy as np
import json
import os
import logging
import argparse
from scipy import spatial
import torch
import itertools
from itertools import combinations
import collections
import pickle
# word embeddings
import gensim
import gensim.downloader as api
from gensim.utils import tokenize
from eval_utils import isInSet
# first party
import weat
from run_classifier import get_encodings, compute_gender_dir, get_tokenizer_encoder
from run_classifier import get_def_examples
from experiments.def_sent_utils import get_all, get_all_domains, get_def_pairs
from my_debiaswe import my_we
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
DATA_DIR = "../gender_tests/"
MAX_SEQ_LENGTH = 128
DEVICE = torch.device("cuda") if torch.cuda.is_available() else None
def load_json(sent_file):
''' Load from json. We expect a certain format later, so do some post processing '''
logger.info("Loading %s..." % sent_file)
all_data = json.load(open(sent_file, 'r'))
data = {}
for k, v in all_data.items():
examples = v["examples"]
data[k] = examples
return all_data # data
def parse_args():
'''Parse command line arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument("--model_path",
type=str,
default="bert-base-uncased",
help="Path of the model to be evaluated")
parser.add_argument("--debias",
action='store_true',
help="Whether to debias.")
parser.add_argument("--equalize",
action='store_true',
help="Whether to equalize.")
parser.add_argument("--def_pairs_name", default="all", type=str,
help="Name of definitional sentence pairs.")
parser.add_argument("--model", "-m", type=str, default="dummy")
parser.add_argument("--output_name", type=str)
parser.add_argument("--results_dir", type=str,
help="directory for storing results")
parser.add_argument("--encode_only", action='store_true')
parser.add_argument("--num_dimension", "-k", type=int, default=1,
help="dimensionality of bias subspace")
args = parser.parse_args()
if (args.output_name == None):
args.output_name = args.def_pairs_name if args.debias else "biased"
print("outputname: {}".format(args.output_name))
if (args.results_dir == None):
args.results_dir = os.path.join("results", args.model)
args.do_lower_case = True
args.cache_dir = None
args.local_rank = -1
args.max_seq_length = 128
args.eval_batch_size = 8
args.n_samples = 100000
args.parametric = True
args.tune_bert = False
args.normalize = True
# word embeddings
args.word_model = 'fasttext-wiki-news-subwords-300'
wedata_path = 'my_debiaswe/data'
args.wedata_path = wedata_path
args.definitional_filename = os.path.join(wedata_path, 'definitional_pairs.json')
args.equalize_filename = os.path.join(wedata_path, 'equalize_pairs.json')
args.gendered_words_filename = os.path.join(wedata_path, 'gender_specific_complete.json')
return args
def binary_weat(targets, attributes):
targetOne = []
targetTwo = []
for x in targets[0]:
targetOne.append(_binary_s(x, attributes))
for y in targets[1]:
targetTwo.append(_binary_s(y, attributes))
weat_score = np.absolute(sum(targetOne) - sum(targetTwo))
wtmp = [_binary_s(t, attributes) for t in targets[0] + targets[1]]
effect_std = np.std(wtmp)
num = np.absolute((sum(targetOne)/float(len(targetOne)) - sum(targetTwo)/float(len(targetTwo))))
effect_size = (num/effect_std)
return weat_score, effect_size
def _binary_s(target, attributes):
groupOne = []
groupTwo = []
for ai in attributes[0]:
groupOne.append(spatial.distance.cosine(target, ai))
for aj in attributes[1]:
groupTwo.append(spatial.distance.cosine(target, aj))
return sum(groupOne)/float(len(groupOne)) - sum(groupTwo)/float(len(groupTwo))
def save_dict_to_json(D, output_eval_file):
with open(output_eval_file, 'w') as f:
json.dump(D, f)
def run_binary_weat_test(encs):
targ1 = list(encs['targ1']['encs'].values())
targ2 = list(encs['targ2']['encs'].values())
attr1 = list(encs['attr1']['encs'].values())
attr2 = list(encs['attr2']['encs'].values())
targets = [targ1, targ2]
attributes = [attr1, attr2]
weat_score, effect_size = binary_weat(targets, attributes)
return weat_score, effect_size
def evaluate(args, def_pairs, word_level=False):
'''Evaluate bias level with given definitional sentence pairs.'''
results_path = os.path.join(args.results_dir, args.output_name)
if (not args.encode_only):
if (os.path.exists(results_path)):
print("Results already evaluated in {}".format(results_path))
return
if (not os.path.exists(args.results_dir)): os.makedirs(args.results_dir)
results = []
all_tests_dict = dict()
tokenizer, bert_encoder = get_tokenizer_encoder(args, DEVICE)
print("tokenizer: {}".format(tokenizer==None))
gender_subspace = None
if (args.debias):
gender_subspace = compute_gender_dir(DEVICE, tokenizer, bert_encoder, def_pairs,
args.max_seq_length, k=args.num_dimension, load=True, task=args.model, word_level=word_level, keepdims=True)
logger.info("Computed (gender) bias direction")
with open(args.gendered_words_filename, "r") as f:
gender_specific_words = json.load(f)
specific_set = set(gender_specific_words)
abs_esizes = []
for test_id in ['6', '6b', '7', '7b', '8', '8b']:
filename = "sent-weat{}.jsonl".format(test_id)
sent_file = os.path.join(DATA_DIR, filename)
data = load_json(sent_file)
encs = get_encodings(args, data, tokenizer, bert_encoder, gender_subspace,
DEVICE, word_level=word_level, specific_set=specific_set)
if (args.encode_only):
if (args.debias):
outfile_name = 'debiased_encs{}.pkl'.format(test_id)
else:
outfile_name = 'biased_encs{}.pkl'.format(test_id)
with open(os.path.join(args.results_dir, outfile_name), 'wb') as outfile:
pickle.dump(encs, outfile)
continue
'''
encs: targ1, targ2, attr1, attr2
-> category
-> encs
-> (id1, sent1_emb), (id2, sent2_emb), ...
'''
esize, pval = weat.run_test(encs, n_samples=args.n_samples, parametric=args.parametric)
abs_esizes.append(abs(esize))
result = "{}: esize={} pval={}".format(filename, esize, pval)
print(filename, result)
results.append(result)
test_results = {"esize": esize, "pval": pval}
all_tests_dict[filename] = test_results
avg_absesize = np.mean(np.array(abs_esizes))
print("Averge of Absolute esize: {}".format(avg_absesize))
all_tests_dict['avg_absesize'] = avg_absesize
if (args.encode_only): return
# print and save results
for result in results: logger.info(result)
save_dict_to_json(all_tests_dict, results_path)
return
def eval_sent_debias():
'''
Evaluate bias level using definitional sentences
specified in args.
'''
args = parse_args()
def_pairs_name = args.def_pairs_name
size_prefix = "allsize"
accdomain_prefix = "accdomain"
domain_prefix = "moredomain"
if (def_pairs_name.startswith(size_prefix)):
# evaluate model
bucket_list = get_single_domain_in_buckets()
indices = np.arange(len(bucket_list))
size = int(def_pairs_name[len(size_prefix):])
choices_list = list(combinations(indices, size))
logger.info(choices_list)
for choices in choices_list:
logger.info(choices)
chosen_buckets = [bucket_list[i] for i in choices]
def_pairs = []
for bucket in chosen_buckets:
def_pairs += bucket
evaluate(args, def_pairs)
elif (def_pairs_name.startswith(accdomain_prefix)):
domain_list = get_all_domains(1000)
for domain in domain_list: print("domain size={}".format(len(domain)))
indices = np.arange(len(domain_list))
size = int(def_pairs_name[len(accdomain_prefix):])
choices_list = list(combinations(indices, size))
logger.info(choices_list)
for choices in choices_list:
logger.info(choices)
chosen_buckets = [domain_list[i] for i in choices]
def_pairs = []
for bucket in chosen_buckets:
def_pairs += bucket
evaluate(args, def_pairs)
elif (def_pairs_name.startswith(domain_prefix)):
indices = np.arange(4) # 4 domains
size = int(def_pairs_name[len(domain_prefix):])
choices_list = list(combinations(indices, size))
fixed_size = 1080
domain_size = int(fixed_size / size)
logger.info("{} samples per domain; domain: {}".format(domain_size, choices_list))
domain_list = get_all_domains(domain_size)
for choices in choices_list:
logger.info(choices)
chosen_buckets = [domain_list[i] for i in choices]
def_pairs = []
for bucket in chosen_buckets: def_pairs += bucket
evaluate(args, def_pairs)
else:
def_pairs = get_def_pairs(def_pairs_name)
evaluate(args, def_pairs)
class WordEvaluator(object):
"""Evaluator for fastText"""
def __init__(self, args):
super(WordEvaluator, self).__init__()
self.args = args
# define files for evaluation
self.filenames = []
for i in [6, 7, 8]:
self.filenames.append("sent-weat{}.jsonl".format(i))
self.filenames.append("sent-weat{}b.jsonl".format(i))
self.word_filenames = []
for i in [6, 7, 8]:
self.word_filenames.append("weat{}.jsonl".format(i))
self.word_filenames.append("weat{}b.jsonl".format(i))
self.vocab = self.init_vocab() # 190 words
self.expand_specific_vocab()
self.E = my_we.WordEmbedding(args.word_model, self.vocab)
if (args.debias): self.debias()
def init_vocab(self):
print("Initializing vocab for evaluation...")
vocab = set()
for filename in self.filenames:
sent_file = os.path.join(DATA_DIR, filename)
data = load_json(sent_file)
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
texts = data[key]['examples']
for text in texts:
words = set(tokenize(text))
vocab = vocab.union(words)
args = self.args
with open(args.definitional_filename, "r") as f:
definitional = json.load(f)
with open(args.equalize_filename, "r") as f:
equalize = json.load(f)
with open(args.gendered_words_filename, "r") as f:
gender_specific_words = json.load(f)
print("gender specific", len(gender_specific_words), gender_specific_words[:10])
for pair in definitional:
vocab = vocab.union(set(pair))
for pair in equalize:
if (pair[0] in vocab): vocab.add(pair[1])
if (pair[1] in vocab): vocab.add(pair[0])
print("Vocabulary size {}.".format(len(vocab)))
assert('gal' in vocab)
self.definitional = definitional
self.equalize = equalize
self.gender_specific_words = gender_specific_words
return vocab
# expanding gender_specific_full to gender_specific_complete
# with gender specific words from tests.
def expand_specific_vocab(self):
# expand gender specific words
gender_specific_words = set(self.gender_specific_words)
for word_filename in self.word_filenames:
word_file = os.path.join(DATA_DIR, word_filename)
data = load_json(word_file)
for key in ['targ1', 'targ2', "attr1", "attr2"]:
category = data[key]["category"]
print("category={}".format(category))
if (not "male" in category.lower()): continue
words = data[key]["examples"]
print(words)
gender_specific_words = gender_specific_words.union(set(words))
self.gender_specific_words = list(gender_specific_words)
def debias(self):
print("debiasing...")
definitional = self.definitional
equalize = self.equalize
gender_specific_words = self.gender_specific_words
gender_subspace = my_we.doPCA(definitional, self.E).components_[:args.num_dimension]
print("gender subspace shape: {}".format(gender_subspace.shape))
specific_set = set(gender_specific_words)
for i, w in enumerate(self.vocab):
if (not isInSet(w, specific_set)):
self.E.vecs[i] = my_we.dropspace(self.E.vecs[i], gender_subspace)
self.E.normalize()
# Equalize
equalize_subset = []
for pair in equalize:
if (pair[0] in self.vocab):
equalize_subset.append(pair)
candidates = {x for e1, e2 in equalize_subset for x in [(e1.lower(), e2.lower()),
(e1.title(), e2.title()),
(e1.upper(), e2.upper())]}
for (a, b) in candidates:
if (a in self.E.index and b in self.E.index):
y = my_we.drop((self.E.v(a) + self.E.v(b)) / 2, gender_direction)
z = np.sqrt(1 - np.linalg.norm(y)**2)
if (self.E.v(a) - self.E.v(b)).dot(gender_direction) < 0:
z = -z
self.E.vecs[self.E.index[a]] = z * gender_direction + y
self.E.vecs[self.E.index[b]] = -z * gender_direction + y
self.E.normalize()
print("finished debiasing")
def get_sent_embedding(self, sent):
words = tokenize(sent)
word_embeddings = np.array([self.E.v(w) for w in words]) # T x W(300)
sent_embeddings = np.mean(word_embeddings, axis=0)
return sent_embeddings
def get_encodings(self, data):
results = collections.defaultdict(dict)
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
texts = data[key]['examples']
category = data[key]['category'].lower()
logger.info("category={}".format(category))
results[key]['category'] = category
encs = dict()
for i, text in enumerate(texts):
encs[text] = self.get_sent_embedding(text)
results[key]['encs'] = encs
return results
def evaluate(self):
args = self.args
if (not os.path.exists(args.results_dir)): os.makedirs(args.results_dir)
results_path = os.path.join(args.results_dir, args.output_name)
results = []
all_tests_dict = dict()
for filename in self.filenames:
sent_file = os.path.join(DATA_DIR, filename)
data = load_json(sent_file)
encs = self.get_encodings(data)
esize, pval = weat.run_test(encs, n_samples=args.n_samples, parametric=args.parametric)
result = "{}: esize={} pval={}".format(filename, esize, pval)
print(filename, result)
results.append(result)
test_results = {"esize": esize, "pval": pval}
all_tests_dict[filename] = test_results
# print and save results
for result in results: logger.info(result)
save_dict_to_json(all_tests_dict, results_path)
def test_fastText():
args = parse_args()
evaluator = WordEvaluator(args)
evaluator.evaluate()
def test_bertword():
args = parse_args()
def_pairs = json.load(open(args.definitional_filename, "r"))
evaluate(args, def_pairs, word_level=True)
if __name__ == '__main__':
eval_sent_debias()
| 14,292 | 30.691796 | 111 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/elmo_models.py | from __future__ import print_function, division
import numpy as np
from allennlp.commands.elmo import ElmoEmbedder
import time
import torch
class ElmoEncoder(object):
def __init__(self):
self.elmo = ElmoEmbedder()
def encode_batch(self, sents):
start_time = time.time()
vec_seq = self.elmo.embed_sentences(sents)
elapsed_time = time.time() - start_time
print("embed_sentences {}".format(elapsed_time))
vecs = []
start_time = time.time()
for vec in vec_seq:
vecs.append(self.collapse_vec(vec))
# vecs = torch.stack(vecs)
vecs = np.stack(vecs)
elapsed_time =time.time() - start_time
print("collapse {}".format(elapsed_time))
print("vecs ", vecs.shape)
return vecs
def collapse_vec(self, vec_seq, time_combine_method="max", layer_combine_method="add"):
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = np.concatenate(vec, axis=0)
elif layer_combine_method == "last":
vec = vec[-1]
else:
raise NotImplementedError
return vec
def encode(self, sents, time_combine_method="max", layer_combine_method="add"):
""" Load ELMo and encode sents """
vecs = {}
for sent in sents:
vec_seq = self.elmo.embed_sentence(sent)
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = np.concatenate(vec, axis=0)
elif layer_combine_method == "last":
vec = vec[-1]
else:
raise NotImplementedError
vecs[' '.join(sent)] = vec
return vecs
def encode(sents, time_combine_method="max", layer_combine_method="add"):
""" Load ELMo and encode sents """
elmo = ElmoEmbedder()
vecs = {}
for sent in sents:
vec_seq = elmo.embed_sentence(sent)
if time_combine_method == "max":
vec = vec_seq.max(axis=1)
elif time_combine_method == "mean":
vec = vec_seq.mean(axis=1)
elif time_combine_method == "concat":
vec = np.concatenate(vec_seq, axis=1)
elif time_combine_method == "last":
vec = vec_seq[:, -1]
else:
raise NotImplementedError
if layer_combine_method == "add":
vec = vec.sum(axis=0)
elif layer_combine_method == "mean":
vec = vec.mean(axis=0)
elif layer_combine_method == "concat":
vec = np.concatenate(vec, axis=0)
elif layer_combine_method == "last":
vec = vec[-1]
else:
raise NotImplementedError
vecs[' '.join(sent)] = vec # 1024
return vecs
sentences = [
"hello, world!",
"happy birthday!",
"let's get it.",
"She's a girl.",
"That's a baby",
"how long was it?"]
for _ in range(3):
sentences += sentences
print(len(sentences))
start_time = time.time()
elmo_encoder = ElmoEncoder()
elapsed_time = time.time() - start_time
print("Initializing takes {}".format(elapsed_time))
start_time = time.time()
elmo_encoder.encode_batch(sentences)
elapsed_time = time.time() - start_time
print("Encoding takes {}".format(elapsed_time))
for sent in sentences:
start_time = time.time()
singleton_list = [sent]
elmo_encoder.encode(singleton_list)
elapsed_time = time.time() - start_time
print(elapsed_time)
| 3,806 | 26.388489 | 88 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/def_sent_utils.py | # standard library
from itertools import combinations
import numpy as np
import os, sys
from collections import defaultdict
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
np.random.seed(42)
words2 = [["woman", "man"], ["girl", "boy"], ["she", "he"], ["mother", "father"], ["daughter", "son"], ["gal", "guy"], ["female", "male"], ["her", "his"], ["herself", "himself"], ["Mary", "John"]]
words3 = [
["jewish", "christian", "muslim"],
["jews", "christians", "muslims"],
["torah", "bible", "quran"],
["synagogue", "church", "mosque"],
["rabbi", "priest", "imam"],
["judaism", "christianity", "islam"],
]
DIRECTORY = '../text_corpus/'
GENDER = 0
RACE = 1
def match(a,L):
for b in L:
if a == b:
return True
return False
def replace(a,new,L):
Lnew = []
for b in L:
if a == b:
Lnew.append(new)
else:
Lnew.append(b)
return ' '.join(Lnew)
def template2(words, sent, sent_list, all_pairs):
for i, (female, male) in enumerate(words):
if match(female, sent_list):
sent_f = sent
sent_m = replace(female,male,sent_list)
all_pairs[i]['f'].append(sent_f)
all_pairs[i]['m'].append(sent_m)
if match(male, sent_list):
sent_f = replace(male,female,sent_list)
sent_m = sent
all_pairs[i]['f'].append(sent_f)
all_pairs[i]['m'].append(sent_m)
return all_pairs
def template3(words, sent, sent_list, all_pairs):
for (b1,b2,b3) in words:
if match(b1, sent_list):
sent_b1 = sent
sent_b2 = replace(b1,b2,sent_list)
sent_b3 = replace(b1,b3,sent_list)
pair = (sent_b1,sent_b2,sent_b3)
all_pairs.append(pair)
if match(b2, sent_list):
sent_b1 = replace(b2,b1,sent_list)
sent_b2 = sent
sent_b3 = replace(b2,b3,sent_list)
pair = (sent_b1,sent_b2,sent_b3)
all_pairs.append(pair)
if match(b3, sent_list):
sent_b1 = replace(b3,b1,sent_list)
sent_b2 = replace(b3,b2,sent_list)
sent_b3 = sent
pair = (sent_b1,sent_b2,sent_b3)
all_pairs.append(pair)
return all_pairs
def get_pom():
all_pairs2 = defaultdict(lambda: defaultdict(list))
all_pairs3 = []
pom_loc = os.path.join(DIRECTORY, 'POM/')
total = 0
num = 0
for file in os.listdir(pom_loc):
if file.endswith(".txt"):
f = open(os.path.join(pom_loc, file), 'r')
data = f.read()
for sent in data.lower().split('.'):
sent = sent.strip()
sent_list = sent.split(' ')
total += len(sent_list)
num += 1
all_pairs2 = template2(words2, sent, sent_list, all_pairs2)
all_pairs3 = template3(words3, sent, sent_list, all_pairs3)
return all_pairs2, all_pairs3
def get_rest(filename):
all_pairs2 = defaultdict(lambda: defaultdict(list))
all_pairs3 = []
total = 0
num = 0
f = open(os.path.join(DIRECTORY, filename), 'r')
data = f.read()
for sent in data.lower().split('\n'):
sent = sent.strip()
sent_list = sent.split(' ')
total += len(sent_list)
num += 1
all_pairs2 = template2(words2, sent, sent_list, all_pairs2)
all_pairs3 = template3(words3, sent, sent_list, all_pairs3)
print(filename, len(all_pairs2))
return all_pairs2, all_pairs3
def get_sst():
all_pairs2 = defaultdict(lambda: defaultdict(list))
all_pairs3 = []
total = 0
num = 0
for sent in open(os.path.join(DIRECTORY,'sst.txt'), 'r'):
try:
num = int(sent.split('\t')[0])
sent = sent.split('\t')[1:]
sent = ' '.join(sent)
except:
pass
sent = sent.lower().strip()
sent_list = sent.split(' ')
total += len(sent_list)
num += 1
all_pairs2 = template2(words2, sent, sent_list, all_pairs2)
all_pairs3 = template3(words3, sent, sent_list, all_pairs3)
return all_pairs2, all_pairs3
def get_more_domains():
def sample(data, n_samples=1000):
n = len(data)
indices = np.random.choice(n, n_samples, replace=False)
sampled_data = []
for index in indices: sampled_data.append(data[index])
return sampled_data
print("More domains")
domains = ["reddit", "sst", "pom", "wikitext"]
bucket_list = []
for i, domain in enumerate(domains):
domain_data = get_single_domain(domain)
domain_data = sample(domain_data)
print(domain, len(domain_data))
if (i == 0):
bucket_list.append(domain_data)
else:
bucket_list.append(bucket_list[-1] + domain_data)
print("bucket sizes:")
for bucket in bucket_list:
print(len(bucket))
return bucket_list
'''
Collect n_samples templates from each source: reddit, sst, pom, wikitext
Return a list where each element is a list of sentence pairs from one source.
'''
def get_all_domains(n_samples):
def sample(data):
n = len(data)
indices = np.random.choice(n, n_samples, replace=False)
sampled_data = []
for index in indices: sampled_data.append(data[index])
return sampled_data
bucket_list = []
domains = ["reddit", "sst", "pom", "wikitext"]
for i, domain in enumerate(domains):
domain_data = get_single_domain(domain)
domain_data = sample(domain_data)
bucket_list.append(domain_data)
return bucket_list
def old_get_more_domains():
print("More domains")
b21, b31 = get_rest('news.txt')
print ('news', len(b21), len(b31))
b22, b32 = get_rest('reddit.txt')
print ('reddit', len(b22), len(b32))
b23, b33 = get_sst()
print ('sst', len(b23), len(b33))
b24, b34 = get_pom()
print ('pom', len(b24), len(b34))
b25, b35 = get_rest('wikitext.txt')
print ('wikitext', len(b25), len(b35))
b22 += b21
b32 += b31
b23 += b22
b33 += b32
b24 += b23
b34 += b33
b25 += b24
b35 += b34
bucket1 = (b21,b31)
bucket2 = (b22,b32)
bucket3 = (b23,b33)
bucket4 = (b24,b34)
bucket5 = (b25,b35)
print(len(b21), len(b31))
print(len(b22), len(b32))
print(len(b23), len(b33))
print(len(b24), len(b34))
print(len(b25), len(b35))
return bucket1, bucket2, bucket3, bucket4, bucket5
def get_single_domain_in_buckets(domain="wikitext", buckets=5):
print("Same domain with divided into buckets")
all_pairs_gender = get_single_domain(domain)
bucket_size = int(len(all_pairs_gender)/buckets)
bucket_list = []
for i in range(buckets):
bucket_list.append(all_pairs_gender[i*bucket_size:(i+1)*bucket_size])
return bucket_list
def get_same_domain_more_size(domain="wikitext"):
print("Same domain with different sizes")
all_pairs2, all_pairs3 = get_single_domain(domain)
print (domain, len(all_pairs2), len(all_pairs3))
buckets = 5
each2 = int(len(all_pairs2)/5)
each3 = int(len(all_pairs3)/5)
b21 = all_pairs2[:each2]
b22 = all_pairs2[:2*each2]
b23 = all_pairs2[:3*each2]
b24 = all_pairs2[:4*each2]
b25 = all_pairs2
b31 = all_pairs3[:each3]
b32 = all_pairs3[:2*each3]
b33 = all_pairs3[:3*each3]
b34 = all_pairs3[:4*each3]
b35 = all_pairs3
bucket1 = (b21,b31)
bucket2 = (b22,b32)
bucket3 = (b23,b33)
bucket4 = (b24,b34)
bucket5 = (b25,b35)
print(len(b21), len(b31))
print(len(b22), len(b32))
print(len(b23), len(b33))
print(len(b24), len(b34))
print(len(b25), len(b35))
return bucket1, bucket2, bucket3, bucket4, bucket5
def check_bucket_size(D):
n = 0
for i in D:
for key in D[i]:
n += len(D[i][key])
break
return n
# domain: news, reddit, sst, pom, wikitext
def get_single_domain(domain):
if (domain == "pom"):
gender, race = get_pom()
elif (domain == "sst"):
gender, race = get_sst()
else:
gender, race = get_rest("{}.txt".format(domain))
return gender
def get_all():
domains = ["reddit", "sst", "wikitext", "pom", "meld", "news_200"] #, "yelp_review_10mb"] # "news_200"]
print("Get data from {}".format(domains))
all_data = defaultdict(lambda: defaultdict(list))
for domain in domains:
bucket = get_single_domain(domain)
bucket_size = check_bucket_size(bucket)
print("{} has {} pairs of templates".format(domain, bucket_size))
for i in bucket:
for term in bucket[i]:
all_data[i][term].extend(bucket[i][term])
total_size = check_bucket_size(all_data)
print("{} pairs of templates in total".format(total_size))
return all_data
def get_def_pairs(def_pairs_name):
eqsize_prefix = 'eqsize'
# all 5 sets
if (def_pairs_name == "all"):
return get_all()
elif (def_pairs_name.startswith(eqsize_prefix)):
n_samples = int(def_pairs_name[len(eqsize_prefix):])
print("Select {} templates from each source.".format(n_samples))
domain_list = get_all_domains(n_samples)
def_pairs = []
for domain_data in domain_list: def_pairs.extend(domain_data)
return def_pairs
# wikitext, with varying size
elif (def_pairs_name.startswith("size")):
size = int(def_pairs_name[4:])
buckets = get_same_domain_more_size()
bucket = buckets[size-1][GENDER]
return bucket
# varying number of domains
elif (def_pairs_name.startswith("domain")):
num_domains = int(def_pairs_name[6:])
buckets = old_get_more_domains()
bucket = buckets[num_domains-1][GENDER]
return bucket
# accumulating domains with same number of samples
elif (def_pairs_name.startswith("accdomain")):
start_idx = len("accdomain")
num_domains = int(def_pairs_name[start_idx:])
buckets = get_more_domains()
bucket = buckets[num_domains-1]
return bucket
# single-domain
elif (def_pairs_name in ["news", "reddit", "sst", "pom", "wikitext"]):
return get_single_domain(def_pairs_name)
else:
raise Exception("invalid defining pairs name")
if __name__ == '__main__':
data = get_all() | 9,154 | 26.492492 | 196 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/visualize.py | import os
import argparse
import json, pickle
from collections import defaultdict
from pattern.en import pluralize, singularize
import nltk
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import numpy as np
from numpy import linalg as LA
import matplotlib
import matplotlib.pyplot as plt
import gensim.downloader as api
from run_classifier import get_encodings, compute_gender_dir, get_tokenizer_encoder
DATA_DIR = "../bias_data/gender_tests/"
def load_json(file):
''' Load from json. We expect a certain format later, so do some post processing '''
all_data = json.load(open(file, 'r'))
data = {}
for k, v in all_data.items():
examples = v["examples"]
data[k] = examples
return all_data # data
def my_pluralize(word):
if (word in ["he", "she", "her", "hers"]): return word
if (word == "brother"): return "brothers"
if (word == "drama"): return "dramas"
return pluralize(word)
def my_singularize(word):
if (word in ["hers", "his", "theirs"]): return word
return singularize(word)
def match_one_test(test_name):
# load words
word_filename = "weat{}.jsonl".format(test_name)
word_file = os.path.join(DATA_DIR, word_filename)
word_data = load_json(word_file)
# load simple sentences
sent_filename = "sent-weat{}.jsonl".format(test_name)
sent_file = os.path.join(DATA_DIR, sent_filename)
sent_data = load_json(sent_file)
word2sents = dict()
num_sents = 0
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
words = word_data[key]['examples']
for word in words: word2sents[word] = []
all_words = set(word2sents.keys())
print("all words")
print(all_words)
unmatched_sents = []
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
sents = sent_data[key]['examples']
for sent in sents:
matched = False
for word in all_words:
word_ = word.lower()
sent_ = sent.lower()
tokens = nltk.word_tokenize(sent_)
word_variants = set({word})
word_variants.add(my_pluralize(word_))
word_variants.add(my_singularize(word_))
matched_words = []
for word_variant in word_variants:
if (word_variant in tokens):
matched_words.append(word)
if (matched):
print("'{}' is matched to {}!.".format(sent, word))
print(matched_words)
matched = True
word2sents[word].append(sent)
break
if (not matched): unmatched_sents.append(sent)
with open(os.path.join(DATA_DIR, 'word2sents{}.jsonl'.format(test_name)), 'w') as outfile:
json.dump(word2sents, outfile)
print("unmatched: {}".format(unmatched_sents))
return
def match():
for test_name in [6, 7, 8]:
match_one_test("{}".format(test_name))
match_one_test("{}b".format(test_name))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_path",
type=str,
default="bert-base-uncased",
help="Path of the model to be evaluated")
parser.add_argument("--debias",
action='store_true',
help="Whether to debias.")
parser.add_argument("--equalize",
action='store_true',
help="Whether to equalize.")
parser.add_argument("--def_pairs_name", default="large_real", type=str,
help="Name of definitional sentence pairs.")
parser.add_argument("--model", "-m", type=str, default="dummy")
parser.add_argument("--output_name", type=str, required=True)
args = parser.parse_args()
args.results_dir = os.path.join("acl_bias_eval_results", args.model)
args.do_lower_case = True
args.cache_dir = None
args.local_rank = -1
args.max_seq_length = 128
args.eval_batch_size = 8
args.n_samples = 100000
args.parametric = True
args.tune_bert = False
args.normalize = True
# word embeddings
args.word_model = 'fasttext-wiki-news-subwords-300'
wedata_path = 'my_debiaswe/data'
args.definitional_filename = os.path.join(wedata_path, 'definitional_pairs.json')
args.equalize_filename = os.path.join(wedata_path, 'equalize_pairs.json')
args.gendered_words_filename = os.path.join(wedata_path, 'gender_specific_full.json')
return args
def get_sent_vectors(test_name, debiased):
if (debiased):
sent_encs_filename = "debiased_sent_encs{}.pkl".format(test_name)
else:
sent_encs_filename = "sent_encs{}.pkl".format(test_name)
file = open(os.path.join(DATA_DIR, sent_encs_filename), 'rb')
data = pickle.load(file)
all_sent_vectors = dict()
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
text_ids = data[key]['text_ids']
encs = data[key]['encs']
for i in text_ids:
text = text_ids[i]
vector = encs[str(i)]
all_sent_vectors[text] = vector
print("loaded sentence vectors")
return all_sent_vectors
def tsne_plot(word_vectors, word_labels, plot_name, title, do_PCA=False):
words = list(word_vectors.keys())
X = np.array([word_vectors[word] for word in words])
# PCA (optional)
if (do_PCA):
print("PCA")
pca = PCA()
pca.fit(X.T)
components = pca.components_
X = components.T # Nx50
print("After PCS: {}".format(X.shape))
# t-SNE
X_embedded = TSNE(n_components=2, perplexity=len(X)-1).fit_transform(X) # Nx2
y, z = X_embedded[:, 0], X_embedded[:, 1]
data_by_label = defaultdict(list)
for i, word in enumerate(words):
label = word_labels[word]
data_by_label[label].append(i)
fig, ax = plt.subplots()
colors = ['r', 'g', 'b', 'c']
for label_id, label in enumerate(data_by_label.keys()):
indices = np.array(data_by_label[label])
sub_y = y[indices]
sub_z = z[indices]
ax.scatter(sub_y, sub_z, label=label, c=colors[label_id])
for word_id in indices:
ax.annotate(words[word_id], (y[word_id], z[word_id]), size=14)
ax.legend()
directory = "tsne_plots"
if (not os.path.exists(directory)): os.makedirs(directory)
ax.title(title)
plt.savefig(os.path.join(directory, plot_name))
plt.clf()
def simple_tsne_plot(word_vectors, perplexity, title, filename, do_PCA=True, do_tsne=False):
assert(do_PCA or do_tsne)
if (do_tsne): assert(perplexity != None)
words = ['woman', 'man', 'family', 'career', 'math', 'art',
"science", "literature", "technology", "dance"]
X = np.array([word_vectors[word] for word in words])
# PCA (optional)
if (do_PCA):
print("PCA")
pca = PCA()
pca.fit(X.T)
components = pca.components_
X = components.T # Nx50
print("After PCS: {}".format(X.shape))
# t-SNE
if (do_tsne):
X_embedded = TSNE(n_components=2, perplexity=perplexity).fit_transform(X) # Nx2
X1, X2 = X_embedded[:, 0], X_embedded[:, 1]
else:
X1, X2 = X[:, 0], X[:, 1]
word_dict = dict()
for i, word in enumerate(words):
word_dict[word] = (X1[i], X2[i])
with open('{}.pkl'.format(filename), 'wb') as f:
pickle.dump(word_dict, f)
print("write to {}".format(filename))
return
color_dict = {"woman": 'r', "man": "b"}
colors = [color_dict.get(word, 'k') for word in words]
plt.scatter(X1, X2, color=colors)
for word_id, word in enumerate(words):
plt.annotate(word, (X1[word_id], X2[word_id]))
x_margin = (max(X1)-min(X1)) * 0.1
y_margin = (max(X2)-min(X2)) * 0.1
plt.xlim(min(X1)-x_margin, max(X1)+x_margin)
plt.ylim(min(X2)-y_margin, max(X2)+y_margin)
plt.title(title, fontsize=20)
plt.xticks([])
plt.yticks([])
plot_dir = "visual_plots"
filename = os.path.join(plot_dir, plot_name)
directory = os.path.dirname(filename)
if (not os.path.exists(directory)): os.makedirs(directory)
print("Saving to {}".format(filename))
plt.savefig(filename)
plt.clf()
def get_word_labels(test_name):
word_filename = "weat{}.jsonl".format(test_name)
word_file = os.path.join(DATA_DIR, word_filename)
word_data = load_json(word_file)
labels = dict()
for key in ['targ1', 'targ2', 'attr1', 'attr2']:
words = word_data[key]['examples']
category = word_data[key]['category']
for word in words: labels[word] = category
return labels
def visualize_test(test_name, debiased):
bias_flag = "debiased" if debiased else "biased"
print("Visualize {} {}".format(bias_flag, test_name))
file_name = os.path.join(DATA_DIR, 'word2sents{}.jsonl'.format(test_name))
word2sents = json.load(open(file_name, 'r'))
print(list(word2sents.keys()))
all_sent_vectors = get_sent_vectors(test_name, debiased)
word_labels = get_word_labels(test_name)
word_vectors = dict()
for word in word2sents:
sents = word2sents[word]
sent_vectors = np.array([all_sent_vectors[sent] for sent in sents])
word_vector = np.mean(sent_vectors, axis=0)
word_vectors[word] = word_vector
tsne_plot(word_vectors, word_labels, "{}_{}.png".format(bias_flag, test_name),
"Test {} {} Word Embedding t-SNE Plot".format(test_name, bias_flag))
def visualize_all(debiased):
for test_name in ['6', '6b', '7', '7b', '8', '8b']:
visualize_test(test_name, debiased=debiased)
def words_from_sents(word2sents, test_name, debiased):
all_sent_vectors = get_sent_vectors(test_name, debiased)
word_vectors = dict()
for word in word2sents:
sents = word2sents[word]
sent_vectors = np.array([all_sent_vectors[sent] for sent in sents])
sent_vectors = sent_vectors / LA.norm(sent_vectors, ord=2, axis=-1, keepdims=True)
word_vector = np.mean(sent_vectors, axis=0)
word_vector = word_vector / LA.norm(word_vector, ord=2, axis=-1, keepdims=True)
# word_vector = all_sent_vectors[sents[0]]
word_vectors[word] = word_vector
return word_vectors
def get_fasttext(words):
model = api.load("fasttext-wiki-news-subwords-300") # takes a few minutes
word_vectors = dict()
for word in words:
word_vectors[word] = model.word_vec(word)
return word_vectors
def visualize_few_words(debiased, do_PCA, do_tsne, perplexity=None, use_sents=True):
assert(do_PCA or do_tsne)
bias_flag = "debiased" if debiased else "biased"
sent_flag = "sent" if use_sents else "word"
print("Visualize a few words")
all_word_vectors = dict()
for test_name in ['6', '7', '8']:
file_name = os.path.join(DATA_DIR, 'word2sents{}.jsonl'.format(test_name))
word2sents = json.load(open(file_name, 'r'))
words = list(word2sents.keys())
print(words)
if (use_sents):
word_vectors = words_from_sents(word2sents, test_name, debiased)
else:
word_vectors = get_fasttext(words)
all_word_vectors.update(word_vectors)
# Plot
if (not do_tsne):
directory = "pca"
filename = "{}_{}_pca".format(bias_flag, sent_flag)
# plot_name = "{}.png".format(filename)
title = "{} Word Embedding PCA Plot".format(bias_flag.capitalize())
elif (do_tsne and not do_PCA):
directory = "tsne"
filename = "{}_{}_p{}".format(bias_flag, sent_flag, perplexity)
title = "{} Word Embedding t-SNE Plot".format(bias_flag.capitalize())
elif (do_tsne and do_PCA):
directory = "pca_tsne"
filename = "{}_{}_pca_p{}".format(bias_flag, sent_flag, perplexity)
title = "{} Word Embedding t-SNE Plot (perplexity={}) with PCA".format(bias_flag.capitalize(), perplexity)
simple_tsne_plot(all_word_vectors, perplexity, title,
filename, do_PCA=do_PCA, do_tsne=do_tsne)
if __name__ == '__main__':
# tsne only
for p in [4]:
visualize_few_words(debiased=True, do_PCA=False, do_tsne=True, perplexity=p)
visualize_few_words(debiased=False, do_PCA=False, do_tsne=True, perplexity=p)
# PCA and tsne
for p in [2, 4, 8, 16]:
visualize_few_words(debiased=True, do_PCA=True, do_tsne=True, perplexity=p)
visualize_few_words(debiased=False, do_PCA=True, do_tsne=True, perplexity=p)
# PCA only
visualize_few_words(debiased=True, do_PCA=True, do_tsne=False)
visualize_few_words(debiased=False, do_PCA=True, do_tsne=False) | 11,309 | 31.877907 | 108 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/eval_utils.py | from pattern3.en import pluralize, singularize
def my_pluralize(word):
if (word in ["he", "she", "her", "hers"]): return word
if (word == "brother"): return "brothers"
if (word == "drama"): return "dramas"
return pluralize(word)
def my_singularize(word):
if (word in ["hers", "his", "theirs"]): return word
return singularize(word)
def isInSet(word, word_set):
for wi in [word, my_pluralize(word), my_singularize(word)]:
if (wi in word_set): return True
if (wi.lower() in word_set): return True
return False | 524 | 26.631579 | 60 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/elmo_text_cls.py | from pathlib import Path
from typing import *
import torch
import torch.optim as optim
import numpy as np
import pandas as pd
from functools import partial
from overrides import overrides
from allennlp.data import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.nn import util as nn_util
class Config(dict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def set(self, key, val):
self[key] = val
setattr(self, key, val)
config = Config(
testing=True,
seed=1,
batch_size=64,
lr=3e-4,
epochs=2,
hidden_sz=64,
max_seq_len=100, # necessary to limit memory usage
max_vocab_size=100000,
)
from allennlp.common.checks import ConfigurationError
USE_GPU = torch.cuda.is_available()
DATA_ROOT = Path("../data") / "jigsaw"
torch.manual_seed(config.seed)
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers import DatasetReader
label_cols = ["toxic", "severe_toxic", "obscene",
"threat", "insult", "identity_hate"]
from allennlp.data.fields import TextField, MetadataField, ArrayField
class JigsawDatasetReader(DatasetReader):
def __init__(self, tokenizer: Callable[[str], List[str]]=lambda x: x.split(),
token_indexers: Dict[str, TokenIndexer] = None,
max_seq_len: Optional[int]=config.max_seq_len) -> None:
super().__init__(lazy=False)
self.tokenizer = tokenizer
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_seq_len = max_seq_len
@overrides
def text_to_instance(self, tokens: List[Token]) -> Instance:
sentence_field = TextField(tokens, self.token_indexers)
fields = {"tokens": sentence_field}
return Instance(fields)
@overrides
def _read(self) -> Iterator[Instance]:
for sent in sentences:
yield self.text_to_instance(
[Token(x) for x in self.tokenizer(sent)]
)
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper, ELMoTokenCharactersIndexer
# the token indexer is responsible for mapping tokens to integers
token_indexer = ELMoTokenCharactersIndexer()
def tokenizer(x: str):
return [w.text for w in
SpacyWordSplitter(language='en_core_web_sm',
pos_tags=False).split_words(x)[:config.max_seq_len]]
sent = "Hello world!"
print("sent", sent)
print(tokenizer(sent))
tokens = [Token(x) for x in tokenizer(sent)]
for token in tokens: print(type(token))
reader = JigsawDatasetReader(
tokenizer=tokenizer,
token_indexers={"tokens": token_indexer}
)
# train_ds, test_ds = (reader.read(DATA_ROOT / fname) for fname in ["train.csv", "test_proced.csv"])
# val_ds = None
vocab = Vocabulary()
from allennlp.data.iterators import BucketIterator
iterator = BucketIterator(batch_size=config.batch_size,
sorting_keys=[("tokens", "num_tokens")],
)
iterator.index_with(vocab)
batch = next(iter(iterator()))
import torch
import torch.nn as nn
import torch.optim as optim
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder, PytorchSeq2VecWrapper
from allennlp.nn.util import get_text_field_mask
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
class BaselineModel(Model):
def __init__(self, word_embeddings: TextFieldEmbedder,
encoder: Seq2VecEncoder,
out_sz: int=len(label_cols)):
super().__init__(vocab)
self.word_embeddings = word_embeddings
self.encoder = encoder
self.projection = nn.Linear(self.encoder.get_output_dim(), out_sz)
self.loss = nn.BCEWithLogitsLoss()
def forward(self, tokens: Dict[str, torch.Tensor],
id: Any, label: torch.Tensor) -> torch.Tensor:
mask = get_text_field_mask(tokens)
embeddings = self.word_embeddings(tokens)
state = self.encoder(embeddings, mask)
class_logits = self.projection(state)
output = {"class_logits": class_logits}
return output
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import ElmoTokenEmbedder
options_file = 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json'
weight_file = 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5'
elmo_embedder = ElmoTokenEmbedder(options_file, weight_file)
word_embeddings = BasicTextFieldEmbedder({"tokens": elmo_embedder})
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
encoder: Seq2VecEncoder = PytorchSeq2VecWrapper(nn.LSTM(word_embeddings.get_output_dim(), config.hidden_sz, bidirectional=True, batch_first=True))
model = BaselineModel(
word_embeddings,
encoder,
)
if USE_GPU: model.cuda()
else: model
batch = nn_util.move_to_device(batch, 0 if USE_GPU else -1)
tokens = batch["tokens"]
labels = batch
mask = get_text_field_mask(tokens)
embeddings = model.word_embeddings(tokens)
state = model.encoder(embeddings, mask)
class_logits = model.projection(state)
model(**batch)
loss = model(**batch)["loss"]
loss.backward()
| 5,308 | 29.337143 | 148 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/my_debiaswe/data.py |
gender_specific_full = ["he", "his", "He", "her", "she", "him", "She", "man", "women",
"men", "His", "woman", "spokesman", "wife", "himself", "son", "mother", "father",
"chairman", "daughter", "husband", "guy", "girls", "girl", "Her", "boy", "King",
"boys", "brother", "Chairman", "spokeswoman", "female", "sister", "Women", "Man",
"male", "herself", "Lions", "Lady", "brothers", "dad", "actress", "mom", "sons",
"girlfriend", "Kings", "Men", "daughters", "Prince", "Queen", "teenager", "lady",
"Bulls", "boyfriend", "sisters", "Colts", "mothers", "Sir", "king", "businessman",
"Boys", "grandmother", "grandfather", "deer", "cousin", "Woman", "ladies", "Girls",
"Father", "uncle", "PA", "Boy", "Councilman", "mum", "Brothers", "MA", "males", "Girl",
"Mom", "Guy", "Queens", "congressman", "Dad", "Mother", "grandson", "twins", "bull",
"queen", "businessmen", "wives", "widow", "nephew", "bride", "females", "aunt",
"Congressman", "prostate_cancer", "lesbian", "chairwoman", "fathers", "Son", "moms",
"Ladies", "maiden", "granddaughter", "younger_brother", "Princess", "Guys", "lads",
"Ma", "Sons", "lion", "Bachelor", "gentleman", "fraternity", "bachelor", "niece",
"Lion", "Sister", "bulls", "husbands", "prince", "colt", "salesman", "Bull", "Sisters",
"hers", "dude", "Spokesman", "beard", "filly", "Actress", "Him", "princess", "Brother",
"lesbians", "councilman", "actresses", "Viagra", "gentlemen", "stepfather", "Deer",
"monks", "Beard", "Uncle", "ex_girlfriend", "lad", "sperm", "Daddy", "testosterone",
"MAN", "Female", "nephews", "maid", "daddy", "mare", "fiance", "Wife", "fiancee",
"kings", "dads", "waitress", "Male", "maternal", "heroine", "feminist", "Mama",
"nieces", "girlfriends", "Councilwoman", "sir", "stud", "Mothers", "mistress",
"lions", "estranged_wife", "womb", "Brotherhood", "Statesman", "grandma", "maternity",
"estrogen", "ex_boyfriend", "widows", "gelding", "diva", "teenage_girls", "nuns",
"Daughter", "czar", "ovarian_cancer", "HE", "Monk", "countrymen", "Grandma",
"teenage_girl", "penis", "bloke", "nun", "Husband", "brides", "housewife",
"spokesmen", "suitors", "menopause", "monastery", "patriarch", "Beau", "motherhood",
"brethren", "stepmother", "Dude", "prostate", "Moms", "hostess", "twin_brother", "Colt",
"schoolboy", "eldest", "brotherhood", "Godfather", "fillies", "stepson", "congresswoman",
"Chairwoman", "Daughters", "uncles", "witch", "Mommy", "monk", "viagra", "paternity",
"suitor", "chick", "Pa", "sorority", "macho", "Spokeswoman", "businesswoman",
"eldest_son", "gal", "statesman", "schoolgirl", "fathered", "goddess", "hubby",
"mares", "stepdaughter", "blokes", "dudes", "socialite", "strongman", "Witch",
"uterus", "grandsons", "Bride", "studs", "mama", "Aunt", "godfather", "hens",
"hen", "mommy", "Babe", "estranged_husband", "Fathers", "elder_brother", "boyhood", "baritone", "Diva", "Lesbian", "grandmothers", "grandpa", "boyfriends", "feminism", "countryman", "stallion", "heiress", "queens", "Grandpa", "witches", "aunts", "semen", "fella", "granddaughters", "chap", "knight", "widower", "Maiden", "salesmen", "convent", "KING", "vagina", "beau", "babe", "HIS", "beards", "handyman", "twin_sister", "maids", "gals", "housewives", "Gentlemen", "horsemen", "Businessman", "obstetrics", "fatherhood", "beauty_queen", "councilwoman", "princes", "matriarch", "colts", "manly", "ma", "fraternities", "Spokesmen", "pa", "fellas", "Gentleman", "councilmen", "dowry", "barbershop", "Monks", "WOMAN", "fraternal",
"ballerina", "manhood", "Dads", "heroines", "granny", "gynecologist", "princesses",
"Goddess", "yo", "Granny", "knights", "eldest_daughter", "HER", "underage_girls",
"masculinity", "Girlfriend", "bro", "Grandmother", "grandfathers", "crown_prince",
"Restless", "paternal", "Queen_Mother", "Boyfriend", "womens", "Males", "SHE",
"Countess", "stepchildren", "Belles", "bachelors", "matron", "momma", "Legs",
"maidens", "goddesses", "landlady", "sisterhood", "Grandfather", "Fraternity",
"Majesty", "Babes", "lass", "maternal_grandmother", "blondes", "ma'am", "Womens",
"divorcee", "Momma", "fathering", "Effie", "Lad", "womanhood", "missus", "Sisterhood",
"granddad", "Mens", "papa", "gf", "sis", "Husbands", "Hen", "womanizer", "gynecological",
"stepsister", "Handsome", "Prince_Charming", "BOY", "stepdad", "teen_ager", "GIRL",
"dame", "Sorority", "beauty_pageants", "raspy", "harem", "maternal_grandfather", "Hes",
"deliveryman", "septuagenarian", "damsel", "paternal_grandmother", "paramour", "paternal_grandparents", "Nun", "DAD", "mothering", "shes", "HE_'S", "Nuns", "teenage_daughters", "auntie", "widowed_mother", "Girlfriends", "FATHER", "virile", "COUPLE", "grandmas", "Hubby", "nan", "vixen", "Joan_Crawford", "stepdaughters", "endometrial_cancer", "stepsons", "loins", "Grandson", "Mitchells", "erections", "Matron", "Fella", "daddies", "ter", "Sweetie", "Dudes", "Princesses", "Lads", "lioness", "Mamma", "virility", "bros", "womenfolk", "Heir", "BROTHERS", "manliness", "patriarchs", "earl", "sisterly", "Whore", "Gynaecology", "countess", "convents", "Oratory", "witch_doctor", "mamas", "yah", "aunty", "aunties", "Heiress", "lasses", "Breasts", "fairer_sex", "sorority_sisters", "WIFE", "Laurels", "penile", "nuh", "mah", "toms", "mam", "Granddad", "premenopausal_women", "Granddaddy", "nana", "coeds", "dames", "herdsman", "Mammy", "Fellas", "Niece", "menfolk", "Grandad", "bloods", "Gramps", "damsels", "Granddaughter", "mamma", "concubine", "Oros", "Blarney", "filial", "broads", "Ethel_Kennedy", "ACTRESS", "Tit", "fianc", "Hunk", "Night_Shift", "wifey", "Lothario", "Holy_Roman_Emperor", "horse_breeder", "grandnephew", "Lewises", "Muscular", "feminist_movement", "Sanan", "Fiancee", "dowries", "Carmelite", "rah", "n_roller", "bay_filly", "belles", "Uncles", "PRINCESS", "womans", "Homeboy", "Blokes", "Charmer", "codger", "Delta_Zeta", "courtesans", "grandaughter", "SISTER", "Highness", "grandbabies", "crone", "Skip_Away", "noblewoman", "bf", "jane", "philandering_husband", "Sisqo", "mammy", "daugher", "director_Skip_Bertman", "DAUGHTER", "Royal_Highness", "mannish", "spinsters", "Missus", "madame", "Godfathers", "saleswomen", "beaus", "Risha", "luh", "sah", "negligee", "Hos", "salesgirl", "grandmom", "Grandmas", "Lawsons", "countrywomen", "Booby", "darlin", "Sheiks", "boyz", "wifes", "Bayi", "Il_Duce", "fem", "daugther", "Potti", "hussy", "tch", "Gelding", "stemmed_roses", "Damson", "puh", "Tylers", "neice", "Mutha", "GRANDMOTHER", "youse", "spurned_lover", "mae", "Britt_Ekland", "clotheshorse", "Carlita_Kilpatrick", "Cambest", "Pretty_Polly", "banshees", "male_chauvinist", "Arliss", "mommas", "maidservant", "Gale_Harold", "Little_Bo_Peep", "Cleavers", "hags", "blowsy", "Queen_Elizabeth_I.", "lassies", "papas", "BABE", "ugly_ducklings", "Jims", "hellion", "Beautician", "coalminer", "relaxin", "El_Mahroug", "Victoria_Secret_Angel", "shepherdess", "Mosco", "Slacks", "nanna", "wifely", "tomboys", "LAH", "hast", "apo", "Kaplans", "milkmaid", "Robin_Munis", "John_Barleycorn", "royal_highness", "Meanie", "NAH", "trollop", "roh", "Jewess", "Sheik_Hamad", "mumsy", "Big_Pussy", "chil_dren", "Aunt_Bea", "basso", "sista", "girlies", "nun_Sister", "chica", "Bubbas", "massa", "Southern_belles", "Nephews", "castrations", "Mister_Ed", "Grandsons", "Calaf", "Malachy_McCourt", "Shamash", "hey_hey", "Harmen", "sonofabitch", "Donovans", "Grannie", "Kalinka", "hisself", "Devean", "goatherd", "hinds", "El_Corredor", "Kens", "notorious_womanizer", "goh", "Mommas", "washerwoman", "Samaira", "Coo_Coo", "Governess", "grandsire", "PRINCE_WILLIAM", "gramma", "him.He", "Coptic_priest", "Corbie", "Kennys", "thathe", "Pa_Pa", "Bristols", "Hotep", "snowy_haired", "El_Prado_Ire", "Girl_hitmaker", "Hurleys", "St._Meinrad", "sexually_perverted", "authoress", "Prudie", "raven_haired_beauty", "Bonos", "domestic_shorthair", "brothas", "nymphet", "Neelma", "Seita", "stud_muffin", "St._Judes", "yenta", "bare_shouldered", "Pinkney_Sr.", "PRINCE_CHARLES", "Bisutti", "sistas", "Blanche_Devereaux", "Momoa", "Quiff", "Scotswoman", "balaclava_clad_men", "Louis_Leakey", "dearie", "vacuum_cleaner_salesman", "grandads", "postulant", "SARAH_JESSICA_PARKER", "AUNT", "Prince_Dauntless", "Dalys", "Darkie", "Czar_Nicholas", "Lion_Hearted", "Boy_recliner", "baby_mamas", "giantess", "Lawd", "GRANNY", "fianc_e", "Bilqis", "WCTU", "famly", "Ellas", "feminazis", "Pentheus", "MAMAS", "Town_Criers", "Saggy", "youngman", "grandam", "divorce", "bosomed", "roon", "Simmentals", "eponymous_heroine", "LEYLAND", "REE'", "cain't", "Evelynn", "WAH'", "sistah", "Horners", "Elsie_Poncher", "Coochie", "rat_terriers", "Limousins", "Buchinski", "Schicchi", "Carpitcher", "Khwezi", "HAH'", "Shazza", "Mackeson", "ROH'", "kuya", "novice_nun", "Shei", "Elmasri", "ladykiller", "6yo", "Yenta", "SHEL", "pater", "Souse", "Tahirah", "comedian_Rodney_Dangerfield", "Shottle", "carryin", "Sath", "fa'afafine", "royal_consort", "hus_band", "maternal_uncles", "dressing_provocatively", "dreamgirl", "millionaire_industrialist", "Georgie_Girl", "Must_Be_Obeyed", "joh", "Arabian_stallion", "ahr", "mso_para_margin_0in", "SOO'", "Biddles", "Chincoteague_Volunteer_Fire", "Lisa_Miceli", "gorgeous_brunette", "Moved_fluently", "Afternoon_Deelites", "biker_dude", "Vito_Spatafore", "MICK_JAGGER", "Adesida", "Reineman", "witz", "Djamila", "Glenroe", "daddys", "Romanzi", "gentlewomen", "Dandie_Dinmont_terrier", "Excess_Ire", "By_SYVJ_Staff", "zan", "CONFESSIONS", "Magees", "wimmin", "tash", "Theatrical_Ire", "Prince_Charmings", "chocolate_eclair", "bron", "daughers", "Felly", "fiftyish", "Spritely", "GRANDPA", "distaffer", "Norbertines", "DAH'", "leader_Muammar_Gadaffi", "swains", "Prince_Tomohito", "Honneur", "Soeur", "jouster", "Pharaoh_Amenhotep_III", "QUEEN_ELIZABETH_II", "Ne'er", "Galileo_Ire", "Fools_Crow", "Lannisters", "Devines", "gonzales", "columnist_Ann_Landers", "Moseleys", "hiz", "busch", "roastee", "toyboys", "Sheffields", "grandaunt", "Galvins", "Giongo", "geh", "flame_haired_actress", "Grammarian", "Greg_Evigan", "frontierswoman", "Debele", "rabs", "nymphets", "aai", "BREE", "Shaqs", "ZAY", "pappa", "Housa", "refrigerator_repairman", "artificial_inseminations", "chickie", "Rippa", "teenager_Tracy_Turnblad", "homebred_colt", "Abigaille", "hen_pecked_husband", "businesman", "her.She", "Kaikeyi", "Stittsworth", "self_proclaimed_redneck", "Khella", "NeW", "Evers_Swindell", "Asmerom_Gebreselassie", "Boy_recliners", "Cliff_Claven", "Legge_Bourke", "Costos", "d'_honneur", "sistahs", "Cabble", "sahn", "CROW_AGENCY_Mont", "jezebel", "Harrolds", "ROSARIO_DAWSON", "INXS_frontman_Michael_Hutchence", "Gursikh", "Dadas", "VIAGA", "keen_horsewoman", "Theodoric", "Eldery", "lihn", "Alice_Kramden", "Santarina", "radical_cleric_al_Sadr", "Curleys", "SY'", "Fidaa", "Saptapadi", "Actor_Sean_Astin", "Kellita_Smith", "Doly", "Libertina", "Money_McBags", "Chief_Bearhart", "choirgirl", "chestnut_stallion", "VIGRA", "BY_JIM_McCONNELL", "Sal_Vitale", "Trivia_buffs", "kumaris", "fraternal_lodge", "galpals", "Borino_Quinn", "lina", "LATEST_Rapper", "Bezar", "Manro", "bakla", "Grisetti", "blond_bimbo", "spinster_aunt", "gurls", "hiswife", "paleface", "Charlye", "hippie_chicks", "Khalifas", "Picture_JUSTIN_SANSON", "Hepburns", "yez", "ALDER", "Sanussi", "Lil_Sis", "McLoughlins", "Barbra_Jean", "Lulua", "thatshe", "actress_Shohreh_Aghdashloo", "SIR_ANTHONY_HOPKINS", "Gloddy", "ZAH'", "ORANGE_'S", "Danielle_Bimber", "grandmum", "Kulkis", "Brazington", "Marisa_Lenhard_CFA", "SIR_JOHN", "Clareman", "Aqila", "Heavily_tattooed", "Libbys", "thim", "elocutionist", "submissives", "Inja", "rahm", "Agnes_Gooch", "fake_tits", "nancy_boys", "Swaidan", "SHAH'", "ain'ta_bed", "Shumail_Raj", "Duchesse", "diethylstilbestrol_DES", "colt_foal", "unfaithful_lover", "Maseri", "nevah", "SAHN", "Barths", "Toughkenamon", "GUEST_STARS", "him.But", "Donna_Claspell", "gingham_dresses", "Massage_Parlour", "wae", "Wasacz", "Magistra", "vihl", "Smriti_Iraani", "boyish_haircut", "workingwoman", "borthers", "Capuchin_friars", "Nejma", "yes_sirs", "bivocational_pastor", "Grafters", "HOPWOOD", "Nicknamed_Godzilla", "yos", "Berkenfield", "Missis", "sitcom_Designing_Women", "Kafoa", "trainer_Emma_Lavelle", "sadomasochistic_dungeon", "iht", "desperates", "predessor", "wolf_cub", "indigenous_Peruvians", "Livia_Soprano", "troh", "colt_sired", "BOND_HILL", "ihl", "Drydens", "rahs", "Piserchia", "Sonny_Corinthos", "bankrobber", "Fwank", "feisty_redhead", "booze_guzzling", "COOPERS", "actress_Q'orianka_Kilcher", "Cortezar", "twe", "Jacoub", "Cindy_Iannarelli", "Hell_Raiser", "Fondly_referred", "Bridal_Shoppe", "Noleta", "Christinas", "IAGRA", "LaTanya_Richardson", "Sang_Bender", "Assasins", "sorrel_gelding", "septugenarian", "Hissy", "Muqtada_al_Sadr_mook", "Pfeni", "MADRID_AFX_Banco_Santander", "tuchis", "LeVaughn", "Gadzicki", "transvestite_hooker", "Fame_jockey_Laffit", "nun_Sister_Mary", "SAMSONOV", "Mayflower_Madam", "Shaque", "well.He", "Trainer_Julio_Canani", "sorrel_mare", "minivehicle_joint_venture", "wife_Dwina", "Aasiya_AH'_see", "Baratheon", "Rick_O'Shay", "Mammies", "goatie", "Nell_Gwynne", "charmingly_awkward", "Slamma", "DEHL", "Lorenzo_Borghese", "ALMA_Wis.", "Anne_Scurria", "father_Peruvians_alternately", "JULIE_ANDREWS", "Slim_Pickins", "Victoria_Secret_stunner", "BY'", "Sanam_Devdas", "pronounced_luh", "Pasha_Selim", "rson", "maternal_grandmothers", "IOWA_CITY_Ia", "Madame_de_Tourvel", "JAY'", "Sheika_Mozah_bint_Nasser", "Hotsy_Totsy", "D'_Ginto", "singer_Johnny_Paycheck", "uterine_prolapse_surgery", "SCOTTDALE_Pa.", "AdelaideNow_reports", "Marcus_Schenkenberg", "Clyse", "Obiter_Dicta", "comic_Sam_Kinison", "bitties", "ROCKVILLE_Ind.", "swimsuit_calendars", "Decicio_Smith", "Ma_ma", "Rie_Miyazawa", "celibate_chastity", "gwah", "ZAY'", "HER_Majesty", "Defrere", "Las_Madrinas", "Bea_Hamill", "ARCADIA_Calif._Trainer", "Bold_Badgett", "stakes_victress", "Hoppin_Frog", "Narumiya", "Flayfil", "hardman_Vinnie_Jones", "Marilyn_Monroe_lookalike", "Kivanc_Tatlitug", "Persis_Khambatta", "SINKING_SPRING_Pa.", "len_3rd", "DEAR_TRYING", "Farndon_Cheshire", "Krishna_Madiga", "daughter_Princess_Chulabhorn", "Marshall_Rooster_Cogburn", "Kitty_Kiernan", "Yokich", "Jarou", "Serdaris", "ee_ay", "Montifiore", "Chuderewicz", "Samuel_Le_Bihan", "filly_Proud_Spell", "Umm_Hiba", "pronounced_koo", "Sandy_Fonzo", "KOR'", "Fielder_Civil_kisses", "Federalsburg_Maryland", "Nikah_ceremony", "Brinke_Stevens", "Yakama_Tribal_Council", "Capuchin_Father", "wife_Callista_Bisek", "Beau_Dare", "Bedoni", "Arjun_Punj", "JOHNNY_KNOXVILLE", "cap_tain", "Alderwood_Boys", "Chi_Eta_Phi", "ringleader_Charles_Graner", "Savoies", "Lalla_Salma", "Mrs._Potiphar", "fahn", "name_Taylor_Sumers", "Vernita_Green", "Bollywood_baddie", "BENBROOK_Texas", "Assemblyman_Lou_Papan", "virgin_brides", "Cho_Eun", "CATHY_Freeman", "Uncle_Saul", "Lao_Brewery", "Ibo_tribe", "ruf", "rival_Edurne_Pasaban", "Hei_Shangri_La", "Mommy_dearest", "interest_Angola_Sonogal", "Ger_Monsun", "PUSSYCAT_DOLL", "Crown_Jewels_Condoms", "Lord_Marke", "Patootie", "Nora_Bey", "huntin_shootin", "Minister_Raymond_Tshibanda", "La_Nina_la_NEEN", "signature_Whoppers", "estranged_hubby_Kevin_Federline", "UR'", "pill_poppin", "GEHR'", "purebred_Arabians", "husbandly_duties", "VIAGRA_TIMING", "Hereford_heifer", "hushed_monotone_voice", "Pola_Uddin", "Wee_Jimmy_Krankie", "Kwakwanso", "Our_Galvinator", "shoh", "Codependency_Anonymous_Group", "LA'", "Taufa'ahau", "Invincible_Spirit_colt", "SAH'_dur", "MOUNT_CARMEL_Pa.", "watches_attentively", "SNL_spinoffs", "Seth_Nitschke", "Duns_Berwickshire", "defendant_Colleen_LaRose", "Silky_O'Sullivan", "Highcliff_Farm", "REN'", "Comestar", "Satisfied_Frog", "Jai_Maharashtra", "ATTICA_Ind.", "lover_Larry_Birkhead", "Tami_Megal", "chauvinist_pigs", "Phi_sorority", "Micronesian_immigrant", "Lia_Boldt", "Sugar_Tits", "actress_Kathy_Najimy", "zhoo", "Colombo_underboss", "Katsav_accusers", "Bess_Houdini", "rap_mogul_Diddy", "companions_Khin_Khin", "Van_Het", "Mastoi_tribe", "VITALY", "ROLLING_STONES_rocker", "womanizing_cad", "LILY_COLE", "paternal_grandfathers", "Lt._Col._Kurt_Kosmatka", "Kasseem_Jr.", "Ji_Ji", "Wilburforce", "VIAGRA_DOSE", "English_Sheepdogs", "pronounced_Kah", "Htet_Htet_Oo", "Brisk_Breeze", "Eau_du", "BY_MELANIE_EVANS", "Neovasc_Medical", "British_funnyman_RICKY", "4YO_mare", "Hemaida", "MONKTON", "Mrs_Mujuru", "BaGhana_BaGhana", "Shaaban_Abdel_Rahim", "Edward_Jazlowiecki_lawyer", "Ajman_Stud", "manly_pharaoh_even", "Serra_Madeira_Islands", "FRAY'", "panto_dames", "Khin_Myo", "dancer_Karima_El_Mahroug", "CROWN_Princess", "Baseball_HOFer", "Hasta_la_Pasta", "GIRLS_NEXT_DOOR", "Benedict_Groeschel", "Bousamra", "Ruby_Rubacuori_Ruby", "Monde_Bleu", "Un_homme_qui", "Taylor_Sumers", "Rapper_EMINEM", "Joe_Menchetti", "VAY'", "supermodel_NAOMI_CAMPBELL", "Supermodel_GISELE_BUNDCHEN", "Au_Lait", "Radar_Installed", "THOMAS_TOWNSHIP_Mich.", "Rafinesque", "Herman_Weinrich", "Abraxas_Antelope", "raspy_voiced_rocker", "Manurewa_Cosmopolitan_Club", "Paraone", "THE_LEOPARD", "Boy_Incorporated_LZB", "Dansili_filly", "Lumpy_Rutherford", "unwedded_bliss", "Bhavna_Sharma", "Scarvagh", "en_flagrante", "Mottu_Maid", "Dowager_Queen", "NEEN", "model_Monika_Zsibrita", "ROSIE_PEREZ", "Mattock_Ranger", "Valorous", "Surpreme", "Marwari_businessmen", "Grandparents_aunts", "Kimberley_Vlaeminck", "Lyn_Treece_Boys", "PDX_Update", "Virsa_Punjab", "eyelash_fluttering", "Pi_fraternity", "HUNTLEIGH_Mo.", "novelist_Jilly_Cooper", "Naha_Shuri_temple", "Yasmine_Al_Massri", "Mu_Gamma_Xi", "Mica_Ertegun", "Ocleppo", "VIAGRA_CONTRAINDICATIONS", "daughter_PEACHES", "trainer_Geoff_Wragg", "OVERNIGHT_DELIVERY", "Fitts_retiree", "de_Tourvel", "Lil_Lad", "north_easterner", "Aol_Weird_News", "Somewhat_improbably", "Sikh_panth", "Worcester_2m_7f", "Zainab_Jah", "OLYMPIC_medalist", "Enoch_Petrucelly", "collie_Lassie", "LOW'", "clumsiness_Holloway", "ayr", "OHR'", "ROLLING_STONES_guitarist", "LAH'_nee", "Ian_Beefy_Botham", "Awapuni_trainer", "Glamorous_Granny", "Chiang_Ching", "MidAtlantic_Cardiovascular_Associates", "Yeke", "Seaforth_Huron_Expositor", "Westley_Cary_Elwes", "Cate_Blanchett_Veronica_Guerin", "Bellas_Gate", "witch_Glinda", "wives_mistresses", "Woodsville_Walmart", "2YO_colt", "Manav_Sushant_Singh", "Pupi_Avati_Il", "Sigma_Beta_Rho", "Bishop_Christopher_Senyonjo", "Vodou_priest", "Rubel_Chowdhury", "Claddagh_Ring", "TAH'_duh_al", "al_Sadr_mook_TAH'", "ROBIN_GIBB", "GAHN'", "BY_THOMAS_RANSON", "sister_Carine_Jena", "Lyphard_mare", "summa_cum", "Semenya_grandmother_Maputhi", "Clare_Nuns", "Talac", "sex_hormones_androgens", "majeste", "Saint_Ballado_mare", "Carrie_Huchel", "Mae_Dok", "wife_Dieula", "Earnest_Sirls", "spoof_bar_mitzvah", "von_Boetticher", "Audwin_Mosby", "Case_presentationWe", "Vincent_Papandrea", "KRAY'", "Sergi_Benavent", "Le_Poisson", "Von_Cramm", "Patti_Mell", "Raymi_Coya", "Benjamin_BeBe_Winans", "Nana_Akosua", "Auld_Acquaintance", "Desire_Burunga", "Company_Wrangler_Nestea", "ask_Krisy_Plourde", "JUANITA_BYNUM", "livia", "GAMB", "Gail_Rosario_Dawson", "Ramgarhia_Sikh", "Catholic_nun_Sister", "FOUR_WEDDINGS_AND", "Robyn_Scherer", "brother_King_Athelstan", "Santo_Loquasto_Fences", "Wee_Frees", "MARISOL", "Soliloquy_Stakes", "Whatever_Spoetzl", "Marc'Aurelio", "mon_petit", "Sabbar_al_Mashhadani", "KAY'_lee", "m_zah_MAH'", "BY_TAMI_ALTHOFF", "hobbit_Samwise_Gamgee", "Bahiya_Hariri_sister", "daddy_Larry_Birkhead", "Sow_Tracey_Ullman", "coach_Viljo_Nousiainen", "Carmen_Lebbos", "conjoined_twins_Zainab", "Rob_Komosa", "ample_bosomed", "Ageing_rocker", "psychic_Oda"]
| 19,567 | 361.37037 | 15,135 | py |
sent_debias | sent_debias-master/debias-BERT/experiments/my_debiaswe/my_we.py | from __future__ import print_function, division
import re
import sys
import numpy as np
import scipy.sparse
from sklearn.decomposition import PCA
if sys.version_info[0] < 3:
import io
open = io.open
else:
unicode = str
import gensim.models
import gensim.downloader as api
"""
Tools for debiasing word embeddings
Modified from the paper:
Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings
Tolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai
2016
"""
DEFAULT_NUM_WORDS = 27000
FILENAMES = {"g_wiki": "glove.6B.300d.small.txt",
"g_twitter": "glove.twitter.27B.200d.small.txt",
"g_crawl": "glove.840B.300d.small.txt",
"w2v": "GoogleNews-word2vec.small.txt",
"w2v_large": "GoogleNews-word2vec.txt"}
def dedup(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def safe_word(w):
# ignore words with numbers, etc.
# [a-zA-Z\.'_\- :;\(\)\]] for emoticons
return (re.match(r"^[a-z_]*$", w) and len(w) < 20 and not re.match(r"^_*$", w))
def to_utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
class WordEmbedding:
def __init__(self, model_name, vocab):
self.thresh = None
self.max_words = None
self.desc = model_name
print("*** Reading data from " + model_name)
words = sorted([w for w in vocab])
model = api.load(model_name) # takes a few minutes
vecs = [model.word_vec(w) for w in words]
# debug
# vecs = np.random.randn(len(vocab), 300)
self.vecs = np.array(vecs, dtype='float32')
self.words = words
self.reindex()
norms = np.linalg.norm(self.vecs, axis=1)
if max(norms)-min(norms) > 0.0001:
self.normalize()
def reindex(self):
self.index = {w: i for i, w in enumerate(self.words)}
self.n, self.d = self.vecs.shape
assert self.n == len(self.words) == len(self.index)
self._neighbors = None
print(self.n, "words of dimension", self.d, ":", ", ".join(self.words[:4] + ["..."] + self.words[-4:]))
def v(self, word):
return self.vecs[self.index[word]]
def diff(self, word1, word2):
v = self.vecs[self.index[word1]] - self.vecs[self.index[word2]]
return v/np.linalg.norm(v)
def normalize(self):
self.desc += ", normalize"
self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]
self.reindex()
def shrink(self, numwords):
self.desc += ", shrink " + str(numwords)
self.filter_words(lambda w: self.index[w]<numwords)
def filter_words(self, test):
"""
Keep some words based on test, e.g. lambda x: x.lower()==x
"""
self.desc += ", filter"
kept_indices, words = zip(*[[i, w] for i, w in enumerate(self.words) if test(w)])
self.words = list(words)
self.vecs = self.vecs[kept_indices, :]
self.reindex()
def save(self, filename):
with open(filename, "w") as f:
f.write("\n".join([w+" " + " ".join([str(x) for x in v]) for w, v in zip(self.words, self.vecs)]))
print("Wrote", self.n, "words to", filename)
def save_w2v(self, filename, binary=True):
with open(filename, 'wb') as fout:
fout.write(to_utf8("%s %s\n" % self.vecs.shape))
# store in sorted order: most frequent words at the top
for i, word in enumerate(self.words):
row = self.vecs[i]
if binary:
fout.write(to_utf8(word) + b" " + row.tostring())
else:
fout.write(to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
def remove_directions(self, directions): #directions better be orthogonal
self.desc += ", removed"
for direction in directions:
self.desc += " "
if type(direction) is np.ndarray:
v = direction / np.linalg.norm(direction)
self.desc += "vector "
else:
w1, w2 = direction
v = self.diff(w1, w2)
self.desc += w1 + "-" + w2
self.vecs = self.vecs - self.vecs.dot(v)[:, np.newaxis].dot(v[np.newaxis, :])
self.normalize()
def compute_neighbors_if_necessary(self, thresh, max_words):
thresh = float(thresh) # dang python 2.7!
if self._neighbors is not None and self.thresh == thresh and self.max_words == max_words:
return
print("Computing neighbors")
self.thresh = thresh
self.max_words = max_words
vecs = self.vecs[:max_words]
dots = vecs.dot(vecs.T)
dots = scipy.sparse.csr_matrix(dots * (dots >= 1-thresh/2))
from collections import Counter
rows, cols = dots.nonzero()
nums = list(Counter(rows).values())
print("Mean:", np.mean(nums)-1)
print("Median:", np.median(nums)-1)
rows, cols, vecs = zip(*[(i, j, vecs[i]-vecs[j]) for i, j, x in zip(rows, cols, dots.data) if i<j])
self._neighbors = rows, cols, np.array([v/np.linalg.norm(v) for v in vecs])
def neighbors(self, word, thresh=1):
dots = self.vecs.dot(self.v(word))
return [self.words[i] for i, dot in enumerate(dots) if dot >= 1-thresh/2]
def more_words_like_these(self, words, topn=50, max_freq=100000):
v = sum(self.v(w) for w in words)
dots = self.vecs[:max_freq].dot(v)
thresh = sorted(dots)[-topn]
words = [w for w, dot in zip(self.words, dots) if dot>=thresh]
return sorted(words, key=lambda w: self.v(w).dot(v))[-topn:][::-1]
def best_analogies_dist_thresh(self, v, thresh=1, topn=500, max_words=50000):
"""Metric is cos(a-c, b-d) if |b-d|^2 < thresh, otherwise 0
"""
vecs, vocab = self.vecs[:max_words], self.words[:max_words]
self.compute_neighbors_if_necessary(thresh, max_words)
rows, cols, vecs = self._neighbors
scores = vecs.dot(v/np.linalg.norm(v))
pi = np.argsort(-abs(scores))
ans = []
usedL = set()
usedR = set()
for i in pi:
if abs(scores[i])<0.001:
break
row = rows[i] if scores[i] > 0 else cols[i]
col = cols[i] if scores[i] > 0 else rows[i]
if row in usedL or col in usedR:
continue
usedL.add(row)
usedR.add(col)
ans.append((vocab[row], vocab[col], abs(scores[i])))
if len(ans)==topn:
break
return ans
def viz(analogies):
print("\n".join(str(i).rjust(4)+a[0].rjust(29) + " | " + a[1].ljust(29) + (str(a[2]))[:4] for i, a in enumerate(analogies)))
def text_plot_words(xs, ys, words, width=90, height=40, filename=None):
PADDING = 10 # num chars on left and right in case words spill over
res = [[' ' for i in range(width)] for j in range(height)]
def rescale(nums):
a = min(nums)
b = max(nums)
return [(x-a)/(b-a) for x in nums]
print("x:", (min(xs), max(xs)), "y:",(min(ys),max(ys)))
xs = rescale(xs)
ys = rescale(ys)
for (x, y, word) in zip(xs, ys, words):
i = int(x*(width - 1 - PADDING))
j = int(y*(height-1))
row = res[j]
z = list(row[i2] != ' ' for i2 in range(max(i-1, 0), min(width, i + len(word) + 1)))
if any(z):
continue
for k in range(len(word)):
if i+k>=width:
break
row[i+k] = word[k]
string = "\n".join("".join(r) for r in res)
# return string
if filename:
with open(filename, "w", encoding="utf8") as f:
f.write(string)
print("Wrote to", filename)
else:
print(string)
def doPCA(pairs, embedding, num_components=10):
matrix = []
for a, b in pairs:
center = (embedding.v(a) + embedding.v(b))/2
matrix.append(embedding.v(a) - center)
matrix.append(embedding.v(b) - center)
matrix = np.array(matrix)
pca = PCA(n_components=num_components, svd_solver="auto")
pca.fit(matrix)
# bar(range(num_components), pca.explained_variance_ratio_)
return pca
def drop(u, v):
return u - v * u.dot(v) / v.dot(v)
def dropspace(u, V):
norm_sqrd = np.sum(V*V, axis=-1)
vecs = np.divide(V@u, norm_sqrd)[:, None] * V
subspace = np.sum(vecs, axis=0)
return u - subspace
| 7,673 | 29.943548 | 125 | py |
artbench | artbench-main/artbench.py | from torchvision.datasets import CIFAR10
class ArtBench10(CIFAR10):
base_folder = "artbench-10-batches-py"
url = "https://artbench.eecs.berkeley.edu/files/artbench-10-python.tar.gz"
filename = "artbench-10-python.tar.gz"
tgz_md5 = "9df1e998ee026aae36ec60ca7b44960e"
train_list = [
["data_batch_1", "c2e02a78dcea81fe6fead5f1540e542f"],
["data_batch_2", "1102a4dcf41d4dd63e20c10691193448"],
["data_batch_3", "177fc43579af15ecc80eb506953ec26f"],
["data_batch_4", "566b2a02ccfbafa026fbb2bcec856ff6"],
["data_batch_5", "faa6a572469542010a1c8a2a9a7bf436"],
]
test_list = [
["test_batch", "fa44530c8b8158467e00899609c19e52"],
]
meta = {
"filename": "meta",
"key": "styles",
"md5": "5bdcafa7398aa6b75d569baaec5cd4aa",
}
| 827 | 32.12 | 78 | py |
TAILOR | TAILOR-main/inference.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import json
import torch
import numpy as np
import random
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import time
import argparse
from src.models.models import TAILOR
from src.models.optimization import BertAdam
from torch.utils.data import DataLoader
import torch.utils.data as data
from util import parallel_apply, get_logger
from src.dataloaders.cmu_dataloader import MOSEI_Dataset, MOSEI_Dataset_no_align
from src.utils.eval import get_metrics
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def dataloader_test(args):
test_dataset = MOSEI_Dataset(
args.data_path,
'test'
)
label_input, label_mask = test_dataset._get_label_input()
test_dataloader = DataLoader(
test_dataset,
batch_size=args.batch_size,
num_workers=4,
pin_memory=False,
shuffle=False,
drop_last=True
)
test_length = len(test_dataset)
return label_input, label_mask, test_dataloader, test_length
def load_model(args, n_gpu, device, model_file=None):
logger.info("**** loading model_file=%s *****", model_file)
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
# Prepare model
model = TAILOR.from_pretrained(args.bert_model, args.visual_model, args.audio_model, args.cross_model, args.decoder_model, state_dict=model_state_dict,task_config=args)
model.to(device)
logger.info('***** loading model successful! *****')
else:
model = None
return model
def model_test(model, test_dataloader, device, label_input, label_mask):
model.eval()
label_input = label_input.to(device)
label_mask = label_mask.to(device)
with torch.no_grad():
total_pred = []
total_true_label = []
for _, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
text, text_mask, video, video_mask, audio, audio_mask, ground_trunth_labels = batch
batch_pred, true_label = model.inference(text, text_mask, video, video_mask, audio, audio_mask, label_input, label_mask, ground_trunth_labels=ground_trunth_labels)
total_pred.append(batch_pred)
total_true_label.append(true_label)
total_pred=torch.cat(total_pred,0)
total_true_label = torch.cat(total_true_label, 0)
return total_pred, total_true_label
parser = argparse.ArgumentParser(description="model interfence")
parser.add_argument("--do_test", action='store_true', help="whether to run test")
parser.add_argument("--data_path", type=str, help='cmu_mosei data_path')
parser.add_argument("--model_file", type=str, help="model store path")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--max_words', type=int, default=60, help='')
parser.add_argument('--max_frames', type=int, default=60, help='')
parser.add_argument('--max_sequence', type=int, default=60, help='')
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument('--audio_model', default="audio-base", type=str, required=False, help='AUdio module')
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--bert_model", default="bert-base", type=str, required=False,
help="Bert pre-trained model")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument("--num_labels", type=int, default=6, required=False)
parser.add_argument('--video_dim', type=int, default=35, required=False,help='video feature dimension')
parser.add_argument('--audio_dim', type=int, default=74, required=False, help='')
parser.add_argument('--text_dim', type=int, default=300, help='text_feature_dimension')
parser.add_argument('--bert_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=4, help="Layer NO. of visual.")
parser.add_argument('--audio_num_hidden_layers', type=int, default=4, help="Layer NO. of audio")
parser.add_argument('--cross_num_hidden_layers', type=int, default=3, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=1, help="Layer NO. of decoder.")
parser.add_argument("--common_dim",type=int, default=256)
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--seed', type=int, default=42, help='random seed')
args = parser.parse_args()
n_gpu = 1
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
start=time.time()
if args.local_rank ==0:
model = load_model(args, n_gpu, device, model_file=args.model_file)
logger.info("***** dataloader loading *****")
label_input, label_mask, test_dataloader, test_length = dataloader_test(args)
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", 64)
logger.info(" Num steps = %d", len(test_dataloader))
total_pred, total_true_label = model_test(model, test_dataloader, device, label_input, label_mask)
test_micro_f1, test_micro_precision, test_micro_recall, test_acc = get_metrics(total_pred, total_true_label)
logger.info("----- micro_f1: %f, micro_precision: %f, micro_recall: %f, acc: %f", \
test_micro_f1, test_micro_precision, test_micro_recall, test_acc)
logger.info("inference time: {}".format(time.time() - start))
# %%
# %%
| 6,350 | 45.021739 | 176 | py |
TAILOR | TAILOR-main/util.py | import torch
import torch.nn as nn
import threading
from torch._utils import ExceptionWrapper
import logging
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert len(modules) == len(inputs)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker, args=(i, module, input))
for i, (module, input) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger | 2,495 | 33.191781 | 99 | py |
TAILOR | TAILOR-main/__init__.py | 0 | 0 | 0 | py |
|
TAILOR | TAILOR-main/train.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import json
import torch
import numpy as np
import random
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import sys
import time
import argparse
from src.models.models import TAILOR
from src.models.optimization import BertAdam
from src.utils.eval import get_metrics
from src.utils.eval_gap import *
from torch.utils.data import DataLoader, WeightedRandomSampler
import torch.utils.data as data
from util import parallel_apply, get_logger
from src.dataloaders.cmu_dataloader import AlignedMoseiDataset, UnAlignedMoseiDataset
#torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='Multi-modal Multi-label Emotion Recognition'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_test", action='store_true', help="whether to run test")
parser.add_argument("--aligned", action='store_true', help="whether train align of unalign dataset")
parser.add_argument("--data_path", type=str, help='cmu_mosei data_path')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--unaligned_data_path', type=str, default='/amax/cmy/mosei_senti_data_noalign.pkl', help='load unaligned dataset')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--text_dim', type=int, default=300, help='text_feature_dimension')
parser.add_argument('--video_dim', type=int, default=35, help='video feature dimension')
parser.add_argument('--audio_dim', type=int, default=74, help='audio_feature_dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=60, help='')
parser.add_argument('--max_frames', type=int, default=60, help='')
parser.add_argument('--max_sequence', type=int, default=60, help='')
parser.add_argument('--max_label', type=int, default=6, help='')
parser.add_argument("--bert_model", default="bert-base", type=str, required=False, help="Bert module")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--audio_model", default="audio-base", type=str, required=False, help="Audio module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--bert_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=3, help="Layer NO. of visual.")
parser.add_argument('--audio_num_hidden_layers', type=int, default=3, help="Layer No. of audio")
parser.add_argument('--cross_num_hidden_layers', type=int, default=3, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=1, help="Layer NO. of decoder.")
parser.add_argument("--num_classes", default=6, type=int, required=False)
parser.add_argument("--hidden_size",type=int, default=256)
args = parser.parse_args()
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_test:
raise ValueError("At least one of `do_train` or `do_test` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.cuda.set_device(args.local_rank)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = 1
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
model = TAILOR.from_pretrained(args.bert_model, args.visual_model, args.audio_model, args.cross_model, args.decoder_model, task_config=args)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "audio." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "audio." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "audio." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "audio." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * 1.0},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * 1.0},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
return optimizer, scheduler, model
def prep_dataloader(args):
Dataset = AlignedMoseiDataset if args.aligned else UnAlignedMoseiDataset
train_dataset = Dataset(
args.data_path,
'train'
)
val_dataset = Dataset(
args.data_path,
'valid'
)
test_dataset = Dataset(
args.data_path,
'test'
)
label_input, label_mask = train_dataset._get_label_input()
train_dataloader = DataLoader(
train_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=True,
drop_last=True
)
val_dataloader = DataLoader(
val_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=True,
drop_last=True
)
test_dataloader = DataLoader(
test_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=True,
drop_last=True
)
train_length = len(train_dataset)
val_length = len(val_dataset)
test_length = len(test_dataset)
return train_dataloader, val_dataloader, test_dataloader, train_length, val_length, test_length, label_input, label_mask
def save_model(args, model, epoch):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model_{}.bin.".format(epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = TAILOR.from_pretrained(args.bert_model, args.visual_model, args.audio_model, args.cross_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0, label_input=None, label_mask=None):
global logger
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
total_pred = []
total_true_label = []
total_pred_scores = []
for step, batch in enumerate(train_dataloader):
# torch.cuda.empty_cache()
if n_gpu == 1:
# multi-gpu does scattering it-self
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
pairs_text, pairs_mask, video, video_mask,audio, audio_mask, ground_label = batch
model_loss, batch_pred, true_label, pred_scores = model(pairs_text, pairs_mask, video, video_mask, audio, audio_mask, label_input, label_mask, groundTruth_labels=ground_label, training=True)
if n_gpu > 1:
model_loss = model_loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
model_loss = model_loss / args.gradient_accumulation_steps
model_loss.backward()
total_loss += float(model_loss)
total_pred.append(batch_pred)
total_true_label.append(true_label)
total_pred_scores.append(pred_scores)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%d, Step: %d/%d, Lr: %s, loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),float(model_loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
total_pred=torch.cat(total_pred,0)
total_true_label = torch.cat(total_true_label, 0)
total_pred_scores = torch.cat(total_pred_scores, 0)
return total_loss, total_pred, total_true_label, total_pred_scores
def eval_epoch(args, model, val_dataloader, device, n_gpu, label_input, label_mask):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
total_pred = []
total_true_label = []
total_pred_scores = []
for _, batch in enumerate(val_dataloader):
batch = tuple(t.to(device) for t in batch)
text, text_mask, video, video_mask, audio, audio_mask, groundTruth_labels = batch
batch_pred, true_label, pred_scores = model(text, text_mask, video, video_mask, audio, audio_mask, label_input, label_mask, groundTruth_labels=groundTruth_labels, training=False)
total_pred.append(batch_pred)
total_true_label.append(true_label)
total_pred_scores.append(pred_scores)
total_pred=torch.cat(total_pred,0)
total_true_label = torch.cat(total_true_label, 0)
total_pred_scores = torch.cat(total_pred_scores, 0)
return total_pred, total_true_label, total_pred_scores
def main():
global logger
train_time = time.time()
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
model = init_model(args, device, n_gpu, args.local_rank)
model = model.to(device)
if args.aligned == False:
logger.warning("!!!!!!!!!!!!!! you start train unaligned dataset")
else:
logger.warning("!!!!!!!!!!!!!! you start train aligned dataset")
print('***** dataloder preping ... *****')
if args.do_train:
train_dataloader, val_dataloader, test_dataloader, train_length, val_length, test_length, label_input, label_mask = prep_dataloader(args)
label_input = label_input.to(device)
label_mask = label_mask.to(device)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if args.local_rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.000
best_output_model_file = None
global_step = 0
best_model = None
for epoch in range(args.epochs):
total_loss, total_pred, total_label, total_pred_scores= train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.local_rank, label_input=label_input, label_mask=label_mask)
total_micro_f1, total_micro_precision, total_micro_recall, total_acc = get_metrics(total_pred, total_label)
total_pred_scores = total_pred_scores.data.cpu().numpy()
total_label = total_label.data.cpu().numpy()
train_gap = calculate_gap(total_pred_scores, total_label)
if args.local_rank == 0:
logger.info("Epoch %d/%d Finished, Train Loss: %f, Train_micro_f1: %f, Train_micro_precision: %f, Train_micro_recall: %f, Train_acc: %f, train_gap: %f", \
epoch + 1, args.epochs, total_loss, total_micro_f1, total_micro_precision, total_micro_recall, total_acc, train_gap)
if args.local_rank == 0:
logger.info("***** Running valing *****")
logger.info(" Num examples = %d", val_length)
logger.info(" Batch_size = %d", args.batch_size)
val_pred, val_label, val_pred_scores = eval_epoch(args, model, val_dataloader, device, n_gpu, label_input, label_mask)
val_micro_f1, val_micro_precision, val_micro_recall, val_acc = get_metrics(val_pred, val_label)
val_pred_scores = val_pred_scores.data.cpu().numpy()
val_label = val_label.data.cpu().numpy()
val_gap = calculate_gap(val_pred_scores, val_label)
logger.info("----- micro_f1: %f, micro_precision: %f, micro_recall: %f, acc: %f, val_gap: %f", \
val_micro_f1, val_micro_precision, val_micro_recall, val_acc, val_gap)
output_model_file = save_model(args, model, epoch)
if best_score <= val_micro_f1:
best_score = val_micro_f1
best_model = model
best_output_model_file = output_model_file
logger.info("The best model is: {}, the f1 is: {:.4f}".format(best_output_model_file, best_score))
if args.local_rank == 0:
logger.info('***** Running testing *****')
logger.info(' Num examples = %d', test_length)
logger.info(" Batch_size = %d", args.batch_size)
test_pred, test_label, test_pred_scores = eval_epoch(args, best_model, test_dataloader, device, n_gpu, label_input, label_mask)
test_micro_f1, test_micro_precision, test_micro_recall, test_acc = get_metrics(test_pred, test_label)
test_pred_scores = test_pred_scores.data.cpu().numpy()
test_label = test_label.data.cpu().numpy()
test_gap = calculate_gap(test_pred_scores, test_label)
logger.info("----- micro_f1: %f, micro_precision: %f, micro_recall: %f, acc: %f, test_gap: %f", \
test_micro_f1, test_micro_precision, test_micro_recall, test_acc, test_gap)
if __name__ == "__main__":
main()
| 19,444 | 47.370647 | 198 | py |
TAILOR | TAILOR-main/src/models/module_visual.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'visual_config.json'
WEIGHTS_NAME = 'visual_pytorch_model.bin'
class VisualConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `VisualModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file=4096,
hidden_size=768,
num_hidden_layers=3,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02):
"""Constructs VisualConfig.
Args:
vocab_size_or_config_json_file: Size of the encoder layers and the pooler layer.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class VisualEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(VisualEmbeddings, self).__init__()
self.word_embeddings = nn.Linear(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_embeddings):
seq_length = input_embeddings.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(input_embeddings.size(0), -1)
words_embeddings = self.word_embeddings(input_embeddings)
# words_embeddings = self.transform_act_fn(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
# print("!!!INFO: VISUAL", words_embeddings.shape, position_embeddings.shape)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class VisualSelfAttention(nn.Module):
def __init__(self, config):
super(VisualSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in VisualModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class VisualSelfOutput(nn.Module):
def __init__(self, config):
super(VisualSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualAttention(nn.Module):
def __init__(self, config):
super(VisualAttention, self).__init__()
self.self = VisualSelfAttention(config)
self.output = VisualSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class VisualIntermediate(nn.Module):
def __init__(self, config):
super(VisualIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class VisualOutput(nn.Module):
def __init__(self, config):
super(VisualOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualLayer(nn.Module):
def __init__(self, config):
super(VisualLayer, self).__init__()
self.attention = VisualAttention(config)
self.intermediate = VisualIntermediate(config)
self.output = VisualOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class VisualEncoder(nn.Module):
def __init__(self, config):
super(VisualEncoder, self).__init__()
layer = VisualLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class VisualPooler(nn.Module):
def __init__(self, config):
super(VisualPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class VisualPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(VisualPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class VisualLMPredictionHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualLMPredictionHead, self).__init__()
self.transform = VisualPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.weight = visual_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(visual_model_embedding_weights.size(1)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(self.weight) + self.bias
return hidden_states
class VisualOnlyMLMHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualOnlyMLMHead, self).__init__()
self.predictions = VisualLMPredictionHead(config, visual_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class VisualOnlyNSPHead(nn.Module):
def __init__(self, config):
super(VisualOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class VisualPreTrainingHeads(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualPreTrainingHeads, self).__init__()
self.predictions = VisualLMPredictionHead(config, visual_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VisualModel(PreTrainedModel):
"""Visual model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a VisualConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for Visual-base, 24 for Visual-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see 's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
config = modeling.VisualConfig(vocab_size_or_config_json_file=4096, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.VisualModel(config=config)
all_encoder_layers, pooled_output = model(video, video_mask)
```
"""
def __init__(self, config):
super(VisualModel, self).__init__(config)
self.embeddings = VisualEmbeddings(config)
self.encoder = VisualEncoder(config)
self.pooler = VisualPooler(config)
self.apply(self.init_weights)
def forward(self, video, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(video.size(0), video.size(1))
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(video)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output | 19,793 | 45.464789 | 139 | py |
TAILOR | TAILOR-main/src/models/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
# next_m.mul_(beta1).add_(1 - beta1, grad) --> pytorch 1.7
next_m.mul_(beta1).add_(grad, alpha=1 - beta1)
# next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) --> pytorch 1.7
next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | 7,260 | 42.220238 | 141 | py |
TAILOR | TAILOR-main/src/models/module_decoder.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
import time
import torch
from torch import nn
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'decoder_config.json'
WEIGHTS_NAME = 'decoder_pytorch_model.bin'
class DecoderConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `DecoderModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_vocab_size=2,
initializer_range=0.02,
max_target_embeddings=128,
num_decoder_layers=1):
"""Constructs DecoderConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `DecoderModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`DecoderModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
max_target_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
num_decoder_layers:
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.max_target_embeddings = max_target_embeddings
self.num_decoder_layers = num_decoder_layers
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(decoder_model_embedding_weights.size(1),
decoder_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = decoder_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(decoder_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, config):
super(MultiHeadAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, q, k, v, attention_mask):
mixed_query_layer = self.query(q)
mixed_key_layer = self.key(k)
mixed_value_layer = self.value(v)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# print('!!!!!!!!!!INFO:', attention_scores.shape, attention_mask.shape)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(ACT2FN["gelu"](self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class DecoderAttention(nn.Module):
def __init__(self, config):
super(DecoderAttention, self).__init__()
self.att = MultiHeadAttention(config)
self.output = BertSelfOutput(config)
def forward(self, q, k, v, attention_mask):
att_output, attention_probs = self.att(q, k, v, attention_mask)
attention_output = self.output(att_output, q)
return attention_output, attention_probs
class DecoderLayer(nn.Module):
def __init__(self, config):
super(DecoderLayer, self).__init__()
self.slf_attn = DecoderAttention(config)
self.enc_attn = DecoderAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None):
slf_output, _ = self.slf_attn(dec_input, dec_input, dec_input, slf_attn_mask)
dec_output, dec_att_scores = self.enc_attn(slf_output, enc_output, enc_output, dec_enc_attn_mask)
intermediate_output = self.intermediate(dec_output)
dec_output = self.output(intermediate_output, dec_output)
return dec_output, dec_att_scores
class DecoderEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(DecoderEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
# self.position_embeddings = nn.Embedding(config.max_target_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, label_input): #[B, 7]
seq_length = label_input.size(1)
# position_ids = torch.arange(seq_length, dtype=torch.long, device=label_input.device)
# position_ids = position_ids.unsqueeze(0).expand(label_input.size(0), -1)
words_embeddings = self.word_embeddings(label_input)
# position_embeddings = self.position_embeddings(position_ids)
# print('!!!!!!! info: words:{}, pos:{}'.format(words_embeddings.shape, position_embeddings.shape))
# time.sleep(1000)
embeddings = words_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class Decoder(nn.Module):
def __init__(self, config):
super(Decoder, self).__init__()
layer = DecoderLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_decoder_layers)])
def forward(self, hidden_states, encoder_outs, self_attn_mask, attention_mask, output_all_encoded_layers=False):
dec_att_scores = None
all_encoder_layers = []
all_dec_att_probs = []
for layer_module in self.layer:
hidden_states, dec_att_scores = layer_module(hidden_states, encoder_outs, self_attn_mask, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
all_dec_att_probs.append(dec_att_scores)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
all_dec_att_probs.append(dec_att_scores)
return all_encoder_layers, all_dec_att_probs
class DecoderClassifier(nn.Module):
def __init__(self, config, embedding_weights):
super(DecoderClassifier, self).__init__()
self.cls = BertOnlyMLMHead(config, embedding_weights)
def forward(self, hidden_states):
cls_scores = self.cls(hidden_states)
return cls_scores
class DecoderModel(PreTrainedModel):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
final_norm (bool, optional): apply layer norm to the output of the
final decoder layer (default: True).
"""
def __init__(self, config):
super(DecoderModel, self).__init__(config)
self.config = config
self.max_target_length = config.max_target_embeddings
self.embeddings = DecoderEmbeddings(config)
self.decoder = Decoder(config)
self.apply(self.init_weights)
def forward(self, label_input, encoder_outs=None, answer_mask=None, encoder_mask=None):
"""
Args:
input_ids (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing
encoder_outs (Tensor, optional): output from the encoder, used for encoder-side attention
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len, vocab)`
- the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)`
"""
embedding_output = self.embeddings(label_input)
extended_encoder_mask = encoder_mask.unsqueeze(1).unsqueeze(2) # b x 1 x 1 x ls
extended_encoder_mask = extended_encoder_mask.to(dtype=self.dtype) # fp16 compatibility
extended_encoder_mask = (1.0 - extended_encoder_mask) * -10000.0
extended_answer_mask = answer_mask.unsqueeze(1).unsqueeze(2)
extended_answer_mask = extended_answer_mask.to(dtype=self.dtype) # fp16 compatibility
self_attn_mask = (1.0 - extended_answer_mask) * -10000.0
decoded_layers, dec_att_scores = self.decoder(embedding_output,
encoder_outs,
self_attn_mask,
extended_encoder_mask,
)
sequence_output = decoded_layers[-1]
return sequence_output
| 17,660 | 42.823821 | 123 | py |
TAILOR | TAILOR-main/src/models/until_module.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import math
from .until_config import PretrainedConfig
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if prefix is not None:
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append(prefix + key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
if prefix is None and (task_config is None or task_config.local_rank == 0):
logger.info("-" * 20)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
if len(error_msgs) > 0:
logger.error("Weights from pretrained model cause errors in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
return model
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
"""
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
return model
model = cls.init_preweight(model, state_dict)
return model
##################################
###### LOSS FUNCTION #############
##################################
class CrossEn(nn.Module):
def __init__(self,):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1,):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float(".".join(torch.__version__.split(".")[:2]))
self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = sim_matrix + mm_mask * -1e12
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1)
logpt = F.log_softmax(new_sim_matrix, dim=-1)
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1)
masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12
new_logpt = -torch.logsumexp(masked_logpt, dim=-1)
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2)
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
class MaxMarginRankingLoss(nn.Module):
def __init__(self,
margin=1.0,
negative_weighting=False,
batch_size=1,
n_pair=1,
hard_negative_rate=0.5,
):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \
F.relu(self.margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask.to(max_margin.device)
return max_margin.mean()
def Focalloss(predictions, labels, weights=None, alpha=0.25, gamma=2):
"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
predictions: A float tensor of shape [batch_size,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
zeros = torch.zeros_like(predictions, dtype=predictions.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = torch.where(labels > zeros, labels - predictions, zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = torch.where(labels > zeros, zeros, predictions)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * torch.log(torch.clamp(predictions, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * torch.log(torch.clamp(1.0 - predictions, 1e-8, 1.0))
return torch.mean(torch.sum(per_entry_cross_ent, 1))
def getBinaryTensor(imgTensor, boundary = 0.35):
one = torch.ones_like(imgTensor)
zero = torch.zeros_like(imgTensor)
return torch.where(imgTensor > boundary, one, zero)
class GradReverse(torch.autograd.Function):
"""
Extension of grad reverse layer
"""
@staticmethod
def forward(ctx, x, constant):
ctx.constant = constant
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.neg() * ctx.constant
return grad_output, None
def grad_reverse(x, constant):
return GradReverse.apply(x, constant)
class CTCModule(nn.Module): #
def __init__(self, in_dim, out_seq_len):
'''
This module is performing alignment from A (e.g., audio) to B (e.g., text).
:param in_dim: Dimension for input modality A
:param out_seq_len: Sequence length for output modality B
'''
super(CTCModule, self).__init__()
# Use LSTM for predicting the position from A to B
self.pred_output_position_inclu_blank = nn.LSTM(in_dim, out_seq_len+1, num_layers=2, batch_first=True) # 1 denoting blank
self.out_seq_len = out_seq_len
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
'''
:input x: Input with shape [batch_size x in_seq_len x in_dim]
'''
# NOTE that the index 0 refers to blank.
pred_output_position_inclu_blank, _ = self.pred_output_position_inclu_blank(x)
prob_pred_output_position_inclu_blank = self.softmax(pred_output_position_inclu_blank) # batch_size x in_seq_len x out_seq_len+1
prob_pred_output_position = prob_pred_output_position_inclu_blank[:, :, 1:] # batch_size x in_seq_len x out_seq_len
prob_pred_output_position = prob_pred_output_position.transpose(1,2) # batch_size x out_seq_len x in_seq_len
pseudo_aligned_out = torch.bmm(prob_pred_output_position, x) # batch_size x out_seq_len x in_dim
# pseudo_aligned_out is regarded as the aligned A (w.r.t B)
return pseudo_aligned_out, (pred_output_position_inclu_blank) | 13,782 | 40.640483 | 136 | py |
TAILOR | TAILOR-main/src/models/module_audio.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#、、
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'audio_config.json'
WEIGHTS_NAME = 'audio_model.bin'
class AudioConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `AudioModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file=1024,
hidden_size=768,
num_hidden_layers=3,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02):
"""Constructs VisualConfig.
Args:
vocab_size_or_config_json_file: Size of the encoder layers and the pooler layer.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class AudioEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(AudioEmbeddings, self).__init__()
self.word_embeddings = nn.Linear(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_embeddings):
seq_length = input_embeddings.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(input_embeddings.size(0), -1)
words_embeddings = self.word_embeddings(input_embeddings)
# words_embeddings = self.transform_act_fn(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AudioSelfAttention(nn.Module):
def __init__(self, config):
super(AudioSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in VisualModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class AudioSelfOutput(nn.Module):
def __init__(self, config):
super(AudioSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class AudioAttention(nn.Module):
def __init__(self, config):
super(AudioAttention, self).__init__()
self.self = AudioSelfAttention(config)
self.output = AudioSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class AudioIntermediate(nn.Module):
def __init__(self, config):
super(AudioIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class AudioOutput(nn.Module):
def __init__(self, config):
super(AudioOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class AudioLayer(nn.Module):
def __init__(self, config):
super(AudioLayer, self).__init__()
self.attention = AudioAttention(config)
self.intermediate = AudioIntermediate(config)
self.output = AudioOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class AudioEncoder(nn.Module):
def __init__(self, config):
super(AudioEncoder, self).__init__()
layer = AudioLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class AudioPooler(nn.Module):
def __init__(self, config):
super(AudioPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class AudioPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(AudioPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class AudioLMPredictionHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(AudioLMPredictionHead, self).__init__()
self.transform = AudioPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.weight = visual_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(visual_model_embedding_weights.size(1)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(self.weight) + self.bias
return hidden_states
class AudioOnlyMLMHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(AudioOnlyMLMHead, self).__init__()
self.predictions = AudioLMPredictionHead(config, visual_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class AudioOnlyNSPHead(nn.Module):
def __init__(self, config):
super(AudioOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class AudioPreTrainingHeads(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(AudioPreTrainingHeads, self).__init__()
self.predictions = AudioLMPredictionHead(config, visual_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class AudioModel(PreTrainedModel):
"""Audio model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a VisualConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for Visual-base, 24 for Visual-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see 's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
config = modeling.VisualConfig(vocab_size_or_config_json_file=4096, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.VisualModel(config=config)
all_encoder_layers, pooled_output = model(video, video_mask)
```
"""
def __init__(self, config):
super(AudioModel, self).__init__(config)
self.embeddings = AudioEmbeddings(config)
self.encoder = AudioEncoder(config)
self.pooler = AudioPooler(config)
self.apply(self.init_weights)
def forward(self, audio, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(audio.size(0), audio.size(1))
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(audio)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
# %%
# %%
| 19,684 | 44.252874 | 139 | py |
TAILOR | TAILOR-main/src/models/module_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'bert_pytorch_model.bin'
class BertConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `VisualModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file=4096,
hidden_size=768,
num_hidden_layers=3,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02):
"""Constructs VisualConfig.
Args:
vocab_size_or_config_json_file: Size of the encoder layers and the pooler layer.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Linear(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_embeddings):
seq_length = input_embeddings.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(input_embeddings.size(0), -1)
words_embeddings = self.word_embeddings(input_embeddings)
# words_embeddings = self.transform_act_fn(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in VisualModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(1)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(self.weight) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertModel(PreTrainedModel):
"""Visual model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a VisualConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for Visual-base, 24 for Visual-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see 's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
config = modeling.VisualConfig(vocab_size_or_config_json_file=4096, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.VisualModel(config=config)
all_encoder_layers, pooled_output = model(video, video_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def forward(self, video, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(video.size(0), video.size(1))
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(video)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output | 19,604 | 45.129412 | 139 | py |
TAILOR | TAILOR-main/src/models/module_cross.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'cross_config.json'
WEIGHTS_NAME = 'cross_pytorch_model.bin'
class CrossConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `CrossModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs CrossConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CrossModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`CrossModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class CrossEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(CrossEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, concat_embeddings, concat_type=None):
batch_size, seq_length = concat_embeddings.size(0), concat_embeddings.size(1)
if concat_type is None:
concat_type = torch.zeros(batch_size, concat_type).to(concat_embeddings.device)
position_ids = torch.arange(seq_length, dtype=torch.long, device=concat_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(concat_embeddings.size(0), -1)
token_type_embeddings = self.token_type_embeddings(concat_type)
position_embeddings = self.position_embeddings(position_ids)
embeddings = concat_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class CrossSelfAttention(nn.Module):
def __init__(self, config):
super(CrossSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in CrossModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class CrossSelfOutput(nn.Module):
def __init__(self, config):
super(CrossSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CrossAttention(nn.Module):
def __init__(self, config):
super(CrossAttention, self).__init__()
self.self = CrossSelfAttention(config)
self.output = CrossSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class CrossIntermediate(nn.Module):
def __init__(self, config):
super(CrossIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class CrossOutput(nn.Module):
def __init__(self, config):
super(CrossOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CrossLayer(nn.Module):
def __init__(self, config):
super(CrossLayer, self).__init__()
self.attention = CrossAttention(config)
self.intermediate = CrossIntermediate(config)
self.output = CrossOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class CrossEncoder(nn.Module):
def __init__(self, config):
super(CrossEncoder, self).__init__()
layer = CrossLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class CrossPooler(nn.Module):
def __init__(self, config):
super(CrossPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class CrossPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(CrossPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class CrossLMPredictionHead(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossLMPredictionHead, self).__init__()
self.transform = CrossPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(cross_model_embedding_weights.size(1),
cross_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = cross_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(cross_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class CrossOnlyMLMHead(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossOnlyMLMHead, self).__init__()
self.predictions = CrossLMPredictionHead(config, cross_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class CrossOnlyNSPHead(nn.Module):
def __init__(self, config):
super(CrossOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class CrossPreTrainingHeads(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossPreTrainingHeads, self).__init__()
self.predictions = CrossLMPredictionHead(config, cross_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class CrossModel(PreTrainedModel):
def __init__(self, config):
super(CrossModel, self).__init__(config)
self.embeddings = CrossEmbeddings(config)
self.encoder = CrossEncoder(config)
self.pooler = CrossPooler(config)
self.apply(self.init_weights)
def forward(self, concat_input, concat_type=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(concat_input.size(0), concat_input.size(1))
if concat_type is None:
concat_type = torch.zeros_like(attention_mask)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(concat_input, concat_type)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
| 17,540 | 43.183879 | 108 | py |
TAILOR | TAILOR-main/src/models/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,021 | 32.425 | 98 | py |
TAILOR | TAILOR-main/src/models/models.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
#torch.set_printoptions(profile="full")
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
import time
from .module_bert import BertModel, BertConfig, BertOnlyMLMHead
from .until_module import PreTrainedModel, LayerNorm, CrossEn, MILNCELoss, Focalloss
from .module_visual import VisualModel, VisualConfig, VisualOnlyMLMHead
from .module_audio import AudioModel, AudioConfig, AudioOnlyMLMHead
from .module_cross import CrossModel, CrossConfig
from .module_decoder import DecoderModel, DecoderConfig
from .until_module import getBinaryTensor, GradReverse, CTCModule
# from warpctc_pytorch import CTCLoss
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
class EmotionClassifier(nn.Module):
def __init__(self, input_dims, num_classes=1, dropout=0.1):
super(EmotionClassifier, self).__init__()
self.dense = nn.Linear(input_dims, num_classes)
self.activation = nn.Sigmoid()
self.dropout = nn.Dropout(dropout)
def forward(self, seq_input):
output = self.dense(seq_input)
output = self.dropout(output)
output = self.activation(output)
return output
class TAILORPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, bert_config, visual_config, audio_config, cross_config, decoder_config,*inputs, **kwargs):
# utilize bert config as base config
super(TAILORPreTrainedModel, self).__init__(visual_config)
self.bert_config = bert_config
self.visual_config = visual_config
self.audio_config = audio_config
self.cross_config = cross_config
self.decoder_config = decoder_config
self.visual = None
self.vat_cross = None
self.audio = None
self.bert = None
self.decoder = None
@classmethod
def from_pretrained(cls, bert_model_name, visual_model_name, audio_model_name, cross_model_name, decoder_model_name,
state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
bert_config, _= BertConfig.get_config(bert_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
visual_config, _ = VisualConfig.get_config(visual_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
audio_config, _ = AudioConfig.get_config(audio_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
decoder_config, _ = DecoderConfig.get_config(decoder_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(bert_config, visual_config, audio_config, cross_config, decoder_config, *inputs, **kwargs)
assert model.bert is not None
assert model.visual is not None
assert model.audio is not None
assert model.vat_cross is not None
assert model.va_cross is not None
assert model.pc_cross is not None
assert model.decoder is not None
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
class NormalizeText(nn.Module):
def __init__(self, task_config):
super(NormalizeText, self).__init__()
self.text_norm2d = LayerNorm(task_config.text_dim)
def forward(self, text):
text = torch.as_tensor(text).float()
text = text.view(-1, text.shape[-2], text.shape[-1])
text = self.text_norm2d(text)
return text
class NormalizeVideo(nn.Module):
def __init__(self, task_config):
super(NormalizeVideo, self).__init__()
self.visual_norm2d = LayerNorm(task_config.video_dim)
def forward(self, visual):
visual = torch.as_tensor(visual).float()
visual = visual.view(-1, visual.shape[-2], visual.shape[-1])
visual = self.visual_norm2d(visual)
return visual
class NormalizeAudio(nn.Module):
def __init__(self, task_config):
super(NormalizeAudio, self).__init__()
self.audio_norm2d = LayerNorm(task_config.audio_dim)
def forward(self, audio):
audio = torch.as_tensor(audio).float()
audio = audio.view(-1, audio.shape[-2], audio.shape[-1])
audio = self.audio_norm2d(audio)
return audio #输出:[B, L, D]
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class TAILOR(TAILORPreTrainedModel):
def __init__(self, bert_config, visual_config, audio_config, cross_config, decoder_config, task_config):
super(TAILOR, self).__init__(bert_config, visual_config, audio_config, cross_config, decoder_config)
self.task_config = task_config
self.ignore_video_index = -1
self.num_classes = task_config.num_classes
self.aligned = task_config.aligned
assert self.task_config.max_frames <= visual_config.max_position_embeddings
assert self.task_config.max_sequence <= audio_config.max_position_embeddings
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
# Text Encoder ====>
bert_config = update_attr("bert_config", bert_config, "num_hidden_layers",
self.task_config, "bert_num_hidden_layers")
self.bert = BertModel(bert_config)
bert_word_embeddings_weight = self.bert.embeddings.word_embeddings.weight
bert_position_embeddings_weight = self.bert.embeddings.position_embeddings.weight
# <==== End of Text Encoder
# Video Encoder ===>
visual_config = update_attr("visual_config", visual_config, "num_hidden_layers",
self.task_config, "visual_num_hidden_layers")
self.visual = VisualModel(visual_config)
visual_word_embeddings_weight = self.visual.embeddings.word_embeddings.weight
# <=== End of Video Encoder
# Audio Encoder ====>
audio_config = update_attr("audio_config", audio_config, "num_hidden_layers",
self.task_config, "audio_num_hidden_layers")
self.audio = AudioModel(audio_config)
audio_word_embedding_weight = self.audio.embeddings.word_embeddings.weight
# <====End of Audio_Encoder
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers",
self.task_config, "cross_num_hidden_layers")
self.vat_cross = CrossModel(cross_config)
self.va_cross = CrossModel(cross_config)
self.pc_cross = CrossModel(cross_config)
# self.at_cross = CrossModel(cross_config)
# <=== End of Cross Encoder
# Label Decoder =====>
decoder_config = update_attr("decoder_config", decoder_config, "num_decoder_layers",
self.task_config, "decoder_num_hidden_layers")
self.decoder = DecoderModel(decoder_config)
# <=========== End of Decoder
self.common_feature_extractor= nn.Sequential(
nn.Linear(task_config.hidden_size, task_config.hidden_size),
nn.Dropout(p=0.3),
nn.Tanh()
)
self.common_classfier = nn.Sequential(
nn.Linear(task_config.hidden_size, self.num_classes),
nn.Dropout(0.1),
nn.Sigmoid()
)
self.private_feature_extractor = nn.ModuleList([nn.Sequential(
nn.Linear(task_config.hidden_size, task_config.hidden_size),
nn.Dropout(p=0.1),
nn.Tanh()
) for _ in range(3)])
self.modal_discriminator = nn.Sequential(
nn.Linear(task_config.hidden_size, task_config.hidden_size // 2),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(task_config.hidden_size // 2, 3),
)
self.cross_classifier = EmotionClassifier(cross_config.hidden_size, 1)
self.text_norm = NormalizeText(task_config)
self.visual_norm = NormalizeVideo(task_config)
self.audio_norm = NormalizeAudio(task_config)
self.ml_loss = nn.BCELoss()
self.adv_loss = nn.CrossEntropyLoss()
if self.aligned == False:
self.a2t_ctc = CTCModule(task_config.audio_dim, 50)
self.v2t_ctc = CTCModule(task_config.video_dim, 50)
self.ctc_criterion = CTCLoss()
self.apply(self.init_weights)
def forward(self, text, text_mask, visual, visual_mask, audio, audio_mask,
label_input, label_mask, groundTruth_labels=None, training=True):
"""
text: [B, L, Dt]
visual: [B, L, Dv]
audio: [B, L, Da]
"""
label_input = label_input.unsqueeze(0)
batch = text.size(0)
label_input = label_input.repeat(batch, 1)
label_mask = label_mask.unsqueeze(0).repeat(batch, 1)
text = self.text_norm(text)
visual = self.visual_norm(visual)
audio = self.audio_norm(audio)
# ========> aligned
if self.aligned == False:
visual, v2t_position = self.v2t_ctc(visual)
audio, a2t_position = self.a2t_ctc(audio)
text_output, visual_output, audio_output = self.get_text_visual_audio_output(text, text_mask, visual, visual_mask, audio, audio_mask) #[B, L, D]
# =========> common and private feature extractor
private_text = self.private_feature_extractor[0](text_output)
private_visual = self.private_feature_extractor[1](visual_output)
private_audio = self.private_feature_extractor[2](audio_output)
common_text = self.common_feature_extractor(text_output)
common_visual = self.common_feature_extractor(visual_output)
common_audio = self.common_feature_extractor(audio_output)
common_feature = common_text + common_visual + common_audio
# <========= common and private feature extractor
common_mask = torch.ones_like(text_mask)
pooled_output, cross_output, cross_mask = self._get_cross_output(private_text, private_visual, private_audio, common_feature, text_mask, visual_mask, audio_mask, common_mask)
# ==========> label modal alignment
decoder_output = self.decoder(label_input, cross_output, label_mask, cross_mask)
# <========== label modal alignment
cross_predict_scores = self.cross_classifier(decoder_output)
cross_predict_scores = cross_predict_scores.view(-1, self.num_classes)
predict_scores = cross_predict_scores
predict_labels = getBinaryTensor(predict_scores)
groundTruth_labels = groundTruth_labels.view(-1, self.num_classes)
if training:
text_modal = torch.zeros_like(common_mask).view(-1) #[B, L]
visual_modal = torch.ones_like(common_mask).view(-1) #[B, L]
audio_modal = visual_modal.data.new(visual_modal.size()).fill_(2) #[B, L]
private_text_modal_pred = self.modal_discriminator(private_text).view(-1, 3)
private_visual_modal_pred = self.modal_discriminator(private_visual).view(-1, 3)
private_audio_modal_pred = self.modal_discriminator(private_audio).view(-1, 3)
# ==========> adversial Training
common_text_modal_pred = self.modal_discriminator(GradReverse.grad_reverse(common_text, 1)).view(-1, 3)
common_visual_modal_pred = self.modal_discriminator(GradReverse.grad_reverse(common_visual, 1)).view(-1, 3)
common_audio_modal_pred = self.modal_discriminator(GradReverse.grad_reverse(common_audio, 1)).view(-1, 3)
# <========== adversial Training
all_loss = 0.
pooled_common = common_feature[:, 0] #[B, D]
common_pred = self.common_classfier(pooled_common)
ml_loss = self.ml_loss(predict_scores, groundTruth_labels)
cml_loss = self.ml_loss(common_pred, groundTruth_labels)
preivate_diff_loss = self.calculate_orthogonality_loss(private_text, private_visual) + self.calculate_orthogonality_loss(private_text, private_audio) + self.calculate_orthogonality_loss(private_visual, private_audio)
common_diff_loss = self.calculate_orthogonality_loss(common_text, private_text) + self.calculate_orthogonality_loss(common_visual, private_visual) + self.calculate_orthogonality_loss(common_audio, private_audio)
adv_preivate_loss = self.adv_loss(private_text_modal_pred, text_modal) + self.adv_loss(private_visual_modal_pred, visual_modal) + self.adv_loss(private_audio_modal_pred, audio_modal)
adv_common_loss = self.adv_loss(common_text_modal_pred, text_modal) + self.adv_loss(common_visual_modal_pred, visual_modal) + self.adv_loss(common_audio_modal_pred, audio_modal)
if self.aligned == False:
text_length , audio_length, visual_length = text.size(1), audio.size(1), visual.size(1)
t_position = torch.tensor([i+1 for i in range(text_length)] * batch, device=text.device).int().cpu()
t_length = torch.tensor([text_length] * batch).int().cpu()
a_length = torch.tensor([audio_length] * batch).int().cpu()
v_length = torch.tensor([visual_length] * batch).int().cpu()
ctc_v2t_loss = self.ctc_criterion(v2t_position.transpose(0, 1).cpu(), t_position, v_length, t_length)
ctc_a2t_loss = self.ctc_criterion(a2t_position.transpose(0, 1).cpu(), t_position, a_length, t_length)
ctc_loss = ctc_v2t_loss + ctc_a2t_loss
ctc_loss = ctc_loss.cuda()
if self.aligned:
all_loss = ml_loss + 0.01 * (adv_common_loss + adv_preivate_loss) + 5e-6 * (preivate_diff_loss + common_diff_loss) + 0.5 * cml_loss
else:
all_loss = ml_loss + 0.01 * (adv_common_loss + adv_preivate_loss) + 5e-6 * (preivate_diff_loss + common_diff_loss) + 0.5 * cml_loss + 0.5 * ctc_loss
return all_loss, predict_labels, groundTruth_labels, predict_scores
else:
return predict_labels, groundTruth_labels, predict_scores
def get_text_visual_audio_output(self, text, text_mask, visual, visual_mask, audio, audio_mask):
"""
Uni-modal Extractor
"""
text_layers, text_pooled_output = self.bert(text, text_mask, output_all_encoded_layers=True)
text_output = text_layers[-1]
visual_layers, visual_pooled_output = self.visual(visual, visual_mask, output_all_encoded_layers=True)
visual_output = visual_layers[-1]
audio_layers, audio_pooled_output = self.audio(audio, audio_mask, output_all_encoded_layers=True)
audio_output = audio_layers[-1]
return text_output, visual_output, audio_output
def _get_cross_output(self, sequence_output, visual_output, audio_output, common_feature, attention_mask, visual_mask, audio_mask, common_mask):
# =============> visual audio fusion
va_concat_features = torch.cat((audio_output, visual_output), dim=1)
va_concat_mask = torch.cat((audio_mask, visual_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(visual_mask)
audio_type_ = torch.zeros_like(audio_mask)
va_concat_type = torch.cat((audio_type_, video_type_), dim=1)
va_cross_layers, va_pooled_output = self.va_cross(va_concat_features, va_concat_type, va_concat_mask)
va_cross_output = va_cross_layers[-1]
# <============= visual audio fusion
# =============> VisualAudio and text fusion
vat_concat_features = torch.cat((sequence_output, va_cross_output), dim=1)
vat_concat_mask = torch.cat((attention_mask, va_concat_mask), dim=1)
va_type_ = torch.ones_like(va_concat_mask)
vat_concat_type = torch.cat((text_type_, va_type_), dim=1)
vat_cross_layers, vat_pooled_output = self.vat_cross(vat_concat_features, vat_concat_type, vat_concat_mask)
vat_cross_output = vat_cross_layers[-1]
# <============= VisualAudio and text fusion
# =============> private common fusion
pc_concate_features = torch.cat((vat_cross_output, common_feature), dim=1)
specific_type = torch.zeros_like(vat_concat_mask)
common_type = torch.ones_like(common_mask)
pc_concate_type = torch.cat((specific_type, common_type), dim=1)
pc_concat_mask = torch.cat((vat_concat_mask, common_mask), dim=1)
pc_cross_layers, pc_pooled_output = self.pc_cross(pc_concate_features, pc_concate_type, pc_concat_mask)
pc_cross_output = pc_cross_layers[-1]
# <============= private common fusion
return pc_pooled_output, pc_cross_output, pc_concat_mask
def inference(self, text, text_mask, visual, visual_mask, audio, audio_mask, \
label_input, label_mask, groundTruth_labels=None):
label_input = label_input.unsqueeze(0)
batch = text.size(0)
label_input = label_input.repeat(batch, 1)
label_mask = label_mask.unsqueeze(0).repeat(batch, 1)
text = self.text_norm(text)
visual = self.visual_norm(visual)
audio = self.audio_norm(audio)
if self.aligned == False:
visual, _ = self.v2t_ctc(visual)
audio, _ = self.a2t_ctc(audio)
text_output, visual_output, audio_output = self.get_text_visual_audio_output(text, text_mask, visual, visual_mask, audio, audio_mask)
private_text = self.private_feature_extractor[0](text_output)
private_visual = self.private_feature_extractor[1](visual_output)
private_audio = self.private_feature_extractor[2](audio_output)
common_text = self.common_feature_extractor(text_output)
common_visual = self.common_feature_extractor(visual_output)
common_audio = self.common_feature_extractor(audio_output)
common_feature = (common_text + common_visual + common_audio) #[B, L, D]
preivate_feature = private_text + private_visual + private_audio
pooled_common = common_feature[:, 0] #[B, D]
pooled_preivate = preivate_feature[:, 0]
common_pred = self.common_classfier(pooled_common)
preivate_pred = self.common_classfier(pooled_preivate)
common_mask = torch.ones_like(text_mask)
pooled_output, cross_output, cross_mask = self._get_cross_output(private_text, private_visual, private_audio, common_feature, text_mask, visual_mask, audio_mask, common_mask)
decoder_output = self.decoder(label_input, cross_output, label_mask, cross_mask)
cross_predict_scores = self.cross_classifier(decoder_output)
cross_predict_scores = cross_predict_scores.view(-1, self.num_classes)
predict_scores = cross_predict_scores
predict_labels = getBinaryTensor(predict_scores)
groundTruth_labels = groundTruth_labels.view(-1, self.num_classes)
return predict_labels, groundTruth_labels
def calculate_orthogonality_loss(self, first_feature, second_feature):
diff_loss = torch.norm(torch.bmm(first_feature, second_feature.transpose(1, 2)), dim=(1, 2)).pow(2).mean()
return diff_loss
| 21,639 | 47.848758 | 228 | py |
TAILOR | TAILOR-main/src/models/__init__.py | 0 | 0 | 0 | py |
|
TAILOR | TAILOR-main/src/models/until_config.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import logging
import tarfile
import tempfile
import shutil
import torch
from .file_utils import cached_path
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ""
weights_name = ""
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if os.path.exists(archive_file) is False:
if pretrained_model_name in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if task_config is None or task_config.local_rank == 0:
logger.error(
"Model name '{}' was not found in model name list. "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
archive_file))
return None
if resolved_archive_file == archive_file:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {}".format(archive_file))
else:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
if task_config is None or task_config.local_rank == 0:
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(archive, tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if task_config is None or task_config.local_rank == 0:
logger.info("Model config {}".format(config))
if state_dict is None:
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
else:
if task_config is None or task_config.local_rank == 0:
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return config, state_dict
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" | 5,941 | 39.97931 | 105 | py |
TAILOR | TAILOR-main/src/dataloaders/cmu_dataloader.py | # %%
import os
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from collections import defaultdict
import json
import random
import time
import pickle
"""
CMU-MOSEI info
Train 16326 samples
Val 1871 samples
Test 4659 samples
CMU-MOSEI feature shapes
visual: (60, 35)
audio: (60, 74)
text: GLOVE->(60, 300)
label: (6) -> [happy, sad, anger, surprise, disgust, fear]
averaged from 3 annotators
unaligned:
text: (50, 300)
visual: (500, 35)
audio: (500, 74)
"""
emotion_dict = {4:0, 5:1, 6:2, 7:3, 8:4, 9:5}
class AlignedMoseiDataset(Dataset):
def __init__(self, data_path, data_type):
self.data_path = data_path
self.data_type = data_type
self.visual, self.audio, \
self.text, self.labels = self._get_data(self.data_type)
def _get_data(self, data_type):
data = torch.load(self.data_path)
data = data[data_type]
visual = data['src-visual']
audio = data['src-audio']
text = data['src-text']
labels = data['tgt']
return visual, audio, text, labels
def _get_text(self, index):
text = self.text[index]
text_mask = [1] * text.shape[0]
text_mask = np.array(text_mask)
return text, text_mask
def _get_visual(self, index):
visual = self.visual[index]
visual_mask = [1] * visual.shape[0]
visual_mask = np.array(visual_mask)
return visual, visual_mask
def _get_audio(self, index):
audio = self.audio[index]
audio[audio == -np.inf] = 0
audio_mask = [1] * audio.shape[0]
audio_mask = np.array(audio_mask)
return audio, audio_mask
def _get_labels(self, index):
label_list = self.labels[index]
label = np.zeros(6, dtype=np.float32)
filter_label = label_list[1:-1]
for emo in filter_label:
label[emotion_dict[emo]] = 1
return label
def _get_label_input(self):
labels_embedding = np.arange(6)
labels_mask = [1] * labels_embedding.shape[0]
labels_mask = np.array(labels_mask)
labels_embedding = torch.from_numpy(labels_embedding)
labels_mask = torch.from_numpy(labels_mask)
return labels_embedding, labels_mask
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
text, text_mask = self._get_text(index)
visual, visual_mask = self._get_visual(index)
audio, audio_mask = self._get_audio(index)
label = self._get_labels(index)
return text, text_mask, visual, visual_mask, \
audio, audio_mask, label
class UnAlignedMoseiDataset(Dataset):
def __init__(self, data_path, data_type):
self.data_path = data_path
self.data_type = data_type
self.visual, self.audio, \
self.text, self.labels = self._get_data(self.data_type)
def _get_data(self, data_type):
label_data = torch.load(self.data_path)
label_data = label_data[data_type]
with open('/amax/cmy/mosei_senti_data_noalign.pkl', 'rb') as f:
data = pickle.load(f)
data = data[data_type]
visual = data['vision']
audio = data['audio']
text = data['text']
audio = np.array(audio)
labels = label_data['tgt']
return visual, audio, text, labels
def _get_text(self, index):
text = self.text[index]
text_mask = [1] * text.shape[0]
text_mask = np.array(text_mask)
return text, text_mask
def _get_visual(self, index):
visual = self.visual[index]
visual_mask = [1] * 50
visual_mask = np.array(visual_mask)
return visual, visual_mask
def _get_audio(self, index):
audio = self.audio[index]
audio[audio == -np.inf] = 0
audio_mask = [1] * 50
audio_mask = np.array(audio_mask)
return audio, audio_mask
def _get_labels(self, index):
label_list = self.labels[index]
label = np.zeros(6, dtype=np.float32)
filter_label = label_list[1:-1]
for emo in filter_label:
label[emotion_dict[emo]] = 1
return label
def _get_label_input(self):
labels_embedding = np.arange(6)
labels_mask = [1] * labels_embedding.shape[0]
labels_mask = np.array(labels_mask)
labels_embedding = torch.from_numpy(labels_embedding)
labels_mask = torch.from_numpy(labels_mask)
return labels_embedding, labels_mask
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
text, text_mask = self._get_text(index)
visual, visual_mask = self._get_visual(index)
audio, audio_mask = self._get_audio(index)
label = self._get_labels(index)
return text, text_mask, visual, visual_mask, \
audio, audio_mask, label | 4,973 | 27.422857 | 71 | py |
TAILOR | TAILOR-main/src/dataloaders/__init__.py | 0 | 0 | 0 | py |
|
TAILOR | TAILOR-main/src/utils/eval_gap.py | import sys,os
import argparse
import numpy as np
import json
import heapq
import random
import numbers
# utils
def flatten(l):
""" Merges a list of lists into a single list. """
return [item for sublist in l for item in sublist]
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
@property
def heap_size(self):
"""Gets the heap size maintained in the class."""
return len(self._heap)
@property
def num_accumulated_positives(self):
"""Gets the number of positive samples that have been accumulated."""
return self._total_positives
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += np.size(np.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(np.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def clear(self):
"""Clear the accumulated predictions."""
self._heap = []
self._total_positives = 0
def peek_ap_at_n(self):
"""Peek the non-interpolated average precision at n.
Returns:
The non-interpolated average precision at n (default 0).
If n is larger than the length of the ranked list,
the average precision will be returned.
"""
if self.heap_size <= 0:
return 0
predlists = np.array(list(zip(*self._heap)))
ap = self.ap_at_n(predlists[0],
predlists[1],
n=self._top_n,
total_num_positives=self._total_positives)
return ap
@staticmethod
def ap(predictions, actuals):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
return AveragePrecisionCalculator.ap_at_n(predictions,
actuals,
n=None)
@staticmethod
def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
n: the top n items to be considered in ap@n.
total_num_positives : (optionally) you can specify the number of total
positive
in the list. If specified, it will be used in calculation.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when
1) the format of the input is not the numpy 1-D array;
2) the shape of predictions and actuals does not match;
3) the input n is not a positive integer.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
predictions = np.array(predictions)
actuals = np.array(actuals)
# add a shuffler to avoid overestimating the ap
predictions, actuals = AveragePrecisionCalculator._shuffle(predictions,
actuals)
sortidx = sorted(
range(len(predictions)),
key=lambda k: predictions[k],
reverse=True)
if total_num_positives is None:
numpos = np.size(np.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
@staticmethod
def _shuffle(predictions, actuals):
random.seed(0)
suffidx = random.sample(range(len(predictions)), len(predictions))
predictions = predictions[suffidx]
actuals = actuals[suffidx]
return predictions, actuals
@staticmethod
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = np.max(predictions) - np.min(predictions)
ret = (predictions - np.min(predictions)) / np.max(denominator,
epsilon)
return ret
def calculate_gap(predictions, actuals, top_k=6):
gap_calculator = AveragePrecisionCalculator()
sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, actuals, top_k)
gap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
return gap_calculator.peek_ap_at_n()
def top_k_by_class(predictions, labels, k=20):
if k <= 0:
raise ValueError("k must be a positive integer.")
k = min(k, predictions.shape[1])
num_classes = predictions.shape[1]
prediction_triplets= []
for video_index in range(predictions.shape[0]):
prediction_triplets.extend(top_k_triplets(predictions[video_index],labels[video_index], k))
out_predictions = [[] for v in range(num_classes)]
out_labels = [[] for v in range(num_classes)]
for triplet in prediction_triplets:
out_predictions[triplet[0]].append(triplet[1])
out_labels[triplet[0]].append(triplet[2])
out_true_positives = [np.sum(labels[:,i]) for i in range(num_classes)]
return out_predictions, out_labels, out_true_positives
def top_k_triplets(predictions, labels, k=20):
"""Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in
(prediction, class) format"""
m = len(predictions)
k = min(k, m)
indices = np.argpartition(predictions, -k)[-k:]
return [(index, predictions[index], labels[index]) for index in indices]
def get_tag_id_dict(tag_id_file):
tag_id_dict={}
with open(tag_id_file, 'r') as lnf:
for line in lnf:
tag, idx = line.strip().split('\t')
tag_id_dict[tag] = int(idx)
return tag_id_dict
def convert_to_hot(tag_list, scores, tag_dict):
hot_list = np.zeros(len(tag_dict))
for i in range(len(tag_list)):
hot_list[int(tag_dict[tag_list[i]])] = float(scores[i])
return hot_list
def parse_gt_json(gt_json, tag_dict):
gt_dict = {}
with open(gt_json, "r", encoding='utf-8') as f:
gts = json.load(f)
for key in gts:
x = []
for ann in gts[key]["annotations"]:
x.extend(ann['labels'])
x = list(set(x))
gt_dict[key] = convert_to_hot(x, np.ones(len(x)), tag_dict)
return gt_dict
def parse_input_json(input_json, tag_dict):
pred_dict = {}
videos_list = []
with open(input_json, "r", encoding='utf-8') as f:
pred_result = json.load(f)
for video in pred_result:
videos_list.append(video)
pred_dict[video] = convert_to_hot(pred_result[video]["result"][0]["labels"],
pred_result[video]["result"][0]["scores"],tag_dict)
return pred_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pred_json', type=str, default="test100_pred.json")
parser.add_argument('--tag_id_file', type=str, default="tag-id-tagging.txt")
parser.add_argument('--gt_json', type=str, default="test100.json")
parser.add_argument('--top_k', type=int, default=20)
args = parser.parse_args()
assert os.path.exists(args.tag_id_file), "dict file {} not found".format(args.tag_id_file)
tag_dict = get_tag_id_dict(args.tag_id_file)
pred_dict = parse_input_json(args.pred_json, tag_dict)
gt_dict = parse_gt_json(args.gt_json, tag_dict)
assert(pred_dict.keys() == gt_dict.keys())
preds, labels = [], []
for k in pred_dict:
preds.append(pred_dict[k])
labels.append(gt_dict[k])
preds = np.stack(preds)
labels = np.stack(labels)
gap = calculate_gap(preds, labels, top_k = args.top_k)
print("The GAP result is {:.3f}".format(gap))
| 11,485 | 34.125382 | 100 | py |
TAILOR | TAILOR-main/src/utils/pr_calculator_per_tag.py | #encoding: utf-8
#Author: [email protected]
from utils.metrics.pr_calculator import PRCalculator
import numpy as np
import time
def count_func_time(func):
def call_fun(*args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
print('{} cost {:.3f} sec'.format(func.__name__, end_time-start_time))
return call_fun
def map_func(obj, x1, x2):
obj.accumulate(x1, x2)
class PRCalculatorPerTag():
def __init__(self, tag_num):
self.tag_num = tag_num
self.pr_calculators = []
for i in range(self.tag_num):
self.pr_calculators.append(PRCalculator())
#@count_func_time
def accumulate(self, predictions, actuals):
"""
predictions: n_example X n_classes
actuals: n_example X n_classes
"""
#n_example X n_classes ==> n_classes * [n_example x 1]
pred_per_tag_list = np.expand_dims(predictions.transpose(), -1)
actuals_per_tag_list = np.expand_dims(actuals.transpose(), -1)
for i in range(self.tag_num):
self.pr_calculators[i].accumulate(pred_per_tag_list[i], actuals_per_tag_list[i])
#ret = list(map(map_func, self.pr_calculators, pred_per_tag_list, actuals_per_tag_list))
def get_precision_list(self, th=0.5):
return [self.pr_calculators[i].get_precision_at_conf(th) for i in range(self.tag_num)]
def get_recall_list(self, th=0.5):
return [self.pr_calculators[i].get_recall_at_conf(th) for i in range(self.tag_num)]
def clear(self):
for i in range(self.tag_num):
self.pr_calculators[i].clear()
| 1,555 | 30.755102 | 92 | py |
TAILOR | TAILOR-main/src/utils/pr_calculator.py | #encoding: utf-8
#Author: [email protected]
import numpy as np
class PRCalculator():
def __init__(self):
# use only two threshold to save eval time
self.threshold_dict={0.5:0, 0.1:1} #TODO(jefxiong, range from 0.9~0.01)
self.precision = np.zeros((len(self.threshold_dict)))
self.recall = np.zeros((len(self.threshold_dict)))
self.accumulate_count = np.zeros((len(self.threshold_dict)))
def accumulate(self, predictions, actuals):
"""
predictions: n_example X n_classes
actuals: n_example X n_classes
"""
#assert isinstance(predictions, np.ndarray)
#assert isinstance(actuals, np.ndarray)
n_example = predictions.shape[0]
precision_all = np.zeros((n_example, len(self.threshold_dict)))
recall_all = np.zeros((n_example, len(self.threshold_dict)))
for i in range(n_example):
gt_index = np.nonzero(actuals[i])[0]
for th, th_index in self.threshold_dict.items():
pred_index = np.nonzero(predictions[i]>th)[0]
tp = np.sum([actuals[i][k] for k in pred_index])
precision_all[i][th_index] = tp*1.0/len(pred_index) if len(pred_index)>0 else np.nan
recall_all[i][th_index] = tp*1.0/len(gt_index) if len(gt_index)>0 else np.nan
valid_accumlate = (np.sum(~np.isnan(precision_all), axis=0)) != 0
self.accumulate_count = self.accumulate_count + valid_accumlate
precision_all = np.nansum(precision_all,axis=0)/(np.sum(~np.isnan(precision_all), axis=0)+1e-10)
recall_all = np.nansum(recall_all,axis=0)/(np.sum(~np.isnan(recall_all), axis=0)+1e-10)
self.precision = precision_all + self.precision
self.recall = recall_all + self.recall
def get_precision_at_conf(self, th=0.5):
index = self.threshold_dict[th]
return self.precision[index]/(1e-10+self.accumulate_count[index])
def get_recall_at_conf(self, th=0.5):
index = self.threshold_dict[th]
return self.recall[index]/(1e-10+self.accumulate_count[index])
def clear(self):
self.accumulate_count = np.zeros((len(self.threshold_dict)))
self.precision = np.zeros((len(self.threshold_dict)))
self.recall = np.zeros((len(self.threshold_dict)))
| 2,223 | 40.185185 | 102 | py |
TAILOR | TAILOR-main/src/utils/mean_average_precision_calculator.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate the mean average precision.
It provides an interface for calculating mean average precision
for an entire list or the top-n ranked items.
Example usages:
We first call the function accumulate many times to process parts of the ranked
list. After processing all the parts, we call peek_map_at_n
to calculate the mean average precision.
```
import random
p = np.array([[random.random() for _ in xrange(50)] for _ in xrange(1000)])
a = np.array([[random.choice([0, 1]) for _ in xrange(50)]
for _ in xrange(1000)])
# mean average precision for 50 classes.
calculator = mean_average_precision_calculator.MeanAveragePrecisionCalculator(
num_class=50)
calculator.accumulate(p, a)
aps = calculator.peek_map_at_n()
```
"""
import utils.metrics.average_precision_calculator as average_precision_calculator
class MeanAveragePrecisionCalculator(object):
"""This class is to calculate mean average precision.
"""
def __init__(self, num_class):
"""Construct a calculator to calculate the (macro) average precision.
Args:
num_class: A positive Integer specifying the number of classes.
top_n_array: A list of positive integers specifying the top n for each
class. The top n in each class will be used to calculate its average
precision at n.
The size of the array must be num_class.
Raises:
ValueError: An error occurred when num_class is not a positive integer;
or the top_n_array is not a list of positive integers.
"""
if not isinstance(num_class, int) or num_class <= 1:
raise ValueError("num_class must be a positive integer.")
self._ap_calculators = [] # member of AveragePrecisionCalculator
self._num_class = num_class # total number of classes
for i in range(num_class):
self._ap_calculators.append(
average_precision_calculator.AveragePrecisionCalculator())
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
Args:
predictions: A list of lists storing the prediction scores. The outer
dimension corresponds to classes.
actuals: A list of lists storing the ground truth labels. The dimensions
should correspond to the predictions input. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives: If provided, it is a list of numbers representing the
number of true positives for each class. If not provided, the number of
true positives will be inferred from the 'actuals' array.
Raises:
ValueError: An error occurred when the shape of predictions and actuals
does not match.
"""
if not num_positives:
num_positives = [None for i in predictions.shape[1]]
calculators = self._ap_calculators
for i in range(len(predictions)):
calculators[i].accumulate(predictions[i], actuals[i], num_positives[i])
def clear(self):
for calculator in self._ap_calculators:
calculator.clear()
def is_empty(self):
return ([calculator.heap_size for calculator in self._ap_calculators] ==
[0 for _ in range(self._num_class)])
def peek_map_at_n(self):
"""Peek the non-interpolated mean average precision at n.
Returns:
An array of non-interpolated average precision at n (default 0) for each
class.
"""
aps = [self._ap_calculators[i].peek_ap_at_n()
for i in range(self._num_class)]
return aps
| 4,098 | 35.598214 | 81 | py |
TAILOR | TAILOR-main/src/utils/train_utlis.py | """Provides functions to help with evaluating models."""
import numpy as np
import collections
import re
import glob
import os
import tensorflow as tf
from tensorflow import logging
import utils.metrics.mean_average_precision_calculator as map_calculator
import utils.metrics.average_precision_calculator as ap_calculator
from utils.metrics.pr_calculator import PRCalculator
from utils.metrics.pr_calculator_per_tag import PRCalculatorPerTag
###
###Training utils
###
def find_class_by_name(name, modules):
"""Searches the provided modules for the named class and returns it."""
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def Dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2):
"""Dequantize the feature from the byte format to the float format.
Args:
feat_vector: the input 1-d vector.
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
A float vector which has the same shape as feat_vector.
"""
assert max_quantized_value > min_quantized_value
quantized_range = max_quantized_value - min_quantized_value
scalar = quantized_range / 255.0
bias = (quantized_range / 512.0) + min_quantized_value
return feat_vector * scalar + bias
def MakeSummary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
#def FormatBatchInfo(global_step_val, global_step_info_dict):
#this_hit_at_one = global_step_info_dict["hit_at_one"]
#this_perr = global_step_info_dict["perr"]
#this_loss = global_step_info_dict["loss"]
#examples_per_second = global_step_info_dict.get("examples_per_second", -1)
#info = ("global_step {0} | Batch Hit@1: {1:.3f} | Batch PERR: {2:.3f} | Batch Loss: {3:.3f} "
# "| Examples_per_sec: {4:.3f}").format(
# global_step_val, this_hit_at_one, this_perr, this_loss,
# examples_per_second)
#return info
def FormatEvalInfo(summary_writer,
global_step_val,
epoch_info_dict,
prefix='eval_fusion',
AddSummary=True):
"""Add the epoch summary to the Tensorboard.
Args:
summary_writer: Tensorflow summary_writer.
global_step_val: a int value of the global step.
epoch_info_dict: a dictionary of the evaluation metrics calculated for the
whole epoch.
summary_scope: Train or Eval.
Returns:
A string of this global_step summary
"""
epoch_id = epoch_info_dict["epoch_id"]
avg_hit_at_one = epoch_info_dict["avg_hit_at_one"]
avg_perr = epoch_info_dict["avg_perr"]
avg_loss = epoch_info_dict["avg_loss"]
aps = epoch_info_dict["aps"]
gap = epoch_info_dict["gap"]
precision_at_1 = epoch_info_dict["precision_at_1"]
precision_at_5 = epoch_info_dict["precision_at_5"]
recall_at_1 = epoch_info_dict['recall_at_1']
recall_at_5 = epoch_info_dict['recall_at_5']
mean_ap = np.mean(aps)
if AddSummary:
summary_writer.add_summary(
MakeSummary(prefix + "/Avg_Hit@1", avg_hit_at_one), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix + "/Avg_Perr", avg_perr),global_step_val)
summary_writer.add_summary(
MakeSummary(prefix + "/Avg_Loss", avg_loss), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix + "/MAP", mean_ap), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix +"/GAP", gap), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix +"/[email protected]", precision_at_1), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix +"/[email protected]", precision_at_5), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix +"/[email protected]", recall_at_1), global_step_val)
summary_writer.add_summary(
MakeSummary(prefix +"/[email protected]", recall_at_5), global_step_val)
summary_writer.flush()
info = "epoch/eval number {} | MAP: {:.3f} | GAP: {:.3f} | [email protected]: {:.3f} | [email protected]:{:.3f} | [email protected]:{:.3f} | [email protected]: {:.3f} | Avg_Loss: {:3f}".format(epoch_id, mean_ap, gap, precision_at_1, precision_at_5, recall_at_1, recall_at_5, avg_loss)
return info
def GetListOfFeatureNamesAndSizes(feature_names, feature_sizes):
"""Extract the list of feature names and the dimensionality of each feature
from string of comma separated values.
Args:
feature_names: string containing comma separated list of feature names
feature_sizes: string containing comma separated list of feature sizes
Returns:
List of the feature names and list of the dimensionality of each feature.
Elements in the first/second list are strings/integers.
"""
list_of_feature_names = [
feature_names.strip() for feature_names in feature_names.split(',')]
list_of_feature_sizes = [
int(feature_sizes) for feature_sizes in feature_sizes.split(',')]
if len(list_of_feature_names) != len(list_of_feature_sizes):
logging.error("length of the feature names (=" +
str(len(list_of_feature_names)) + ") != length of feature "
"sizes (=" + str(len(list_of_feature_sizes)) + ")")
return list_of_feature_names, list_of_feature_sizes
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
tmp = tf.clip_by_norm(grad.values, max_norm)
grad = tf.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = tf.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def combine_gradients(tower_grads):
"""Calculate the combined gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been summed
across all towers.
"""
filtered_grads = [[x for x in grad_list if x[0] is not None] for grad_list in tower_grads]
final_grads = []
for i in range(len(filtered_grads[0])):
grads = [filtered_grads[t][i] for t in range(len(filtered_grads))]
grad = tf.stack([x[0] for x in grads], 0)
grad = tf.reduce_mean(grad, 0)
final_grads.append((grad, filtered_grads[0][i][1],))
return final_grads
###
###Validate while training
###
def flatten(l):
""" Merges a list of lists into a single list. """
return [item for sublist in l for item in sublist]
def get_tag_stat(labels):
"""
get freq num of each tag
"""
num_classes = labels.shape[1]
num_stat = np.zeros(num_classes)
for i in range(num_classes):
num_stat[i] = np.sum(labels[:,i])
return num_stat
def get_tag_correlation(preds, labels, top_k=10):
n_example, n_class = preds.shape
tag_correlation = np.zeros((n_class, n_class))
top_k = min(n_class, top_k)
#convert pred to top_k index
pred_indx = np.zeros((n_example, n_class), dtype=np.int8)
for i in range(n_example):
for idx in np.argpartition(preds[i], -top_k)[-top_k:]:
pred_indx[i][idx] = 1
#get correlation matrix
for i in range(n_example):
label_index = np.nonzero(labels[i])[0]
pred_index = np.nonzero(pred_indx[i])[0]
for li in label_index:
for pi in pred_index:
tag_correlation[li][pi] +=1
return tag_correlation
def get_tag_confidence(predictions, labels):
n_example, n_class = predictions.shape
tag_confidence = np.zeros(n_class)
for i in range(n_example):
label_index = np.nonzero(labels[i])[0]
for j in label_index:
tag_confidence[j]+=predictions[i][j]
return tag_confidence
def calculate_hit_at_one(predictions, actuals):
"""Performs a local (numpy) calculation of the hit at one.
Args:
predictions: Matrix containing the outputs of the model.
Dimensions are 'batch' x 'num_classes'.
actuals: Matrix containing the ground truth labels.
Dimensions are 'batch' x 'num_classes'.
Returns:
float: The average hit at one across the entire batch.
"""
top_prediction = np.argmax(predictions, 1)
hits = actuals[np.arange(actuals.shape[0]), top_prediction]
return np.average(hits)
def calculate_precision_at_equal_recall_rate(predictions, actuals):
"""Performs a local (numpy) calculation of the PERR.
Args:
predictions: Matrix containing the outputs of the model.
Dimensions are 'batch' x 'num_classes'.
actuals: Matrix containing the ground truth labels.
Dimensions are 'batch' x 'num_classes'.
Returns:
float: The average precision at equal recall rate across the entire batch.
"""
aggregated_precision = 0.0
num_videos = actuals.shape[0]
for row in np.arange(num_videos):
num_labels = int(np.sum(actuals[row]))
top_indices = np.argpartition(predictions[row],
-num_labels)[-num_labels:]
item_precision = 0.0
for label_index in top_indices:
if predictions[row][label_index] > 0:
item_precision += actuals[row][label_index]
item_precision /= top_indices.size
aggregated_precision += item_precision
aggregated_precision /= num_videos
return aggregated_precision
def calculate_gap(predictions, actuals, top_k=20):
"""Performs a local (numpy) calculation of the global average precision.
Only the top_k predictions are taken for each of the videos.
Args:
predictions: Matrix containing the outputs of the model.
Dimensions are 'batch' x 'num_classes'.
actuals: Matrix containing the ground truth labels.
Dimensions are 'batch' x 'num_classes'.
top_k: How many predictions to use per video.
Returns:
float: The global average precision.
"""
gap_calculator = ap_calculator.AveragePrecisionCalculator()
sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, actuals, top_k)
gap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
return gap_calculator.peek_ap_at_n()
def top_k_by_class(predictions, labels, k=20):
"""Extracts the top k predictions for each video, sorted by class.
Args:
predictions: A numpy matrix containing the outputs of the model.
Dimensions are 'batch' x 'num_classes'.
k: the top k non-zero entries to preserve in each prediction.
Returns:
A tuple (predictions,labels, true_positives). 'predictions' and 'labels'
are lists of lists of floats. 'true_positives' is a list of scalars. The
length of the lists are equal to the number of classes. The entries in the
predictions variable are probability predictions, and
the corresponding entries in the labels variable are the ground truth for
those predictions. The entries in 'true_positives' are the number of true
positives for each class in the ground truth.
Raises:
ValueError: An error occurred when the k is not a positive integer.
"""
if k <= 0:
raise ValueError("k must be a positive integer.")
k = min(k, predictions.shape[1])
num_classes = predictions.shape[1]
prediction_triplets= []
for video_index in range(predictions.shape[0]):
prediction_triplets.extend(top_k_triplets(predictions[video_index],labels[video_index], k))
out_predictions = [[] for v in range(num_classes)]
out_labels = [[] for v in range(num_classes)]
for triplet in prediction_triplets:
out_predictions[triplet[0]].append(triplet[1])
out_labels[triplet[0]].append(triplet[2])
out_true_positives = [np.sum(labels[:,i]) for i in range(num_classes)]
return out_predictions, out_labels, out_true_positives
def top_k_triplets(predictions, labels, k=20):
"""Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in
(prediction, class) format"""
m = len(predictions)
k = min(k, m)
indices = np.argpartition(predictions, -k)[-k:]
return [(index, predictions[index], labels[index]) for index in indices]
class EvaluationMetrics(object):
"""A class to store the evaluation metrics."""
def __init__(self, num_class, top_k, accumulate_per_tag=False):
"""Construct an EvaluationMetrics object to store the evaluation metrics.
Args:
num_class: A positive integer specifying the number of classes.
top_k: A positive integer specifying how many predictions are considered per video.
Raises:
ValueError: An error occurred when MeanAveragePrecisionCalculator cannot
not be constructed.
"""
self.sum_hit_at_one = 0.0
self.sum_perr = 0.0
self.sum_loss = 0.0
self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)
self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()
self.pr_calculator = PRCalculator()
self.pr_calculator_per_tag = PRCalculatorPerTag(num_class)
self.accumulate_per_tag = accumulate_per_tag
self.top_k = top_k
self.num_examples = 0
self.nums_per_tag = np.zeros(num_class)
self.tag_corrlation = np.zeros((num_class, num_class))
self.tag_confidence = np.zeros(num_class)
def accumulate(self, predictions, labels, loss):
"""Accumulate the metrics calculated locally for this mini-batch.
Args:
predictions: A numpy matrix containing the outputs of the model.
Dimensions are 'batch' x 'num_classes'.
labels: A numpy matrix containing the ground truth labels.
Dimensions are 'batch' x 'num_classes'.
loss: A numpy array containing the loss for each sample.
Returns:
dictionary: A dictionary storing the metrics for the mini-batch.
Raises:
ValueError: An error occurred when the shape of predictions and actuals
does not match.
"""
batch_size = labels.shape[0]
mean_hit_at_one = calculate_hit_at_one(predictions, labels)
mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)
mean_loss = loss
self.nums_per_tag = self.nums_per_tag + get_tag_stat(labels)
self.tag_correlation = self.tag_correlation + get_tag_correlation(predictions, labels, self.top_k)
self.tag_confidence = self.tag_confidence + get_tag_confidence(predictions, labels)
self.pr_calculator.accumulate(predictions, labels)
if self.accumulate_per_tag:
self.pr_calculator_per_tag.accumulate(predictions, labels)
# Take the top 20 predictions.
sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, labels, self.top_k)
self.map_calculator.accumulate(sparse_predictions, sparse_labels, num_positives)
self.global_ap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
self.num_examples += batch_size
self.sum_hit_at_one += mean_hit_at_one * batch_size
self.sum_perr += mean_perr * batch_size
self.sum_loss += mean_loss * batch_size
return {"hit_at_one": mean_hit_at_one, "perr": mean_perr, "loss": mean_loss}
def get(self):
"""Calculate the evaluation metrics for the whole epoch.
Raises:
ValueError: If no examples were accumulated.
Returns:
dictionary: a dictionary storing the evaluation metrics for the epoch. The
dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and
aps (default nan).
"""
if self.num_examples <= 0:
raise ValueError("total_sample must be positive.")
avg_hit_at_one = self.sum_hit_at_one / self.num_examples
avg_perr = self.sum_perr / self.num_examples
avg_loss = self.sum_loss / self.num_examples
aps = self.map_calculator.peek_map_at_n()
gap = self.global_ap_calculator.peek_ap_at_n()
tag_confidence = self.tag_confidence/(self.nums_per_tag+1e-10)
precision_at_1 = self.pr_calculator.get_precision_at_conf(0.1)
recall_at_1 = self.pr_calculator.get_recall_at_conf(0.1)
precision_at_5 = self.pr_calculator.get_precision_at_conf(0.5)
recall_at_5 = self.pr_calculator.get_recall_at_conf(0.5)
tag_precision = self.pr_calculator_per_tag.get_precision_list(0.5) if self.accumulate_per_tag else []
tag_recall = self.pr_calculator_per_tag.get_recall_list(0.5) if self.accumulate_per_tag else []
epoch_info_dict= {"avg_hit_at_one": avg_hit_at_one, "avg_perr": avg_perr,
"avg_loss": avg_loss, "aps": aps, "gap": gap,
'num': self.nums_per_tag,
'tag_correlation': self.tag_correlation,
'tag_confidence': tag_confidence,
"precision_at_1": precision_at_1,
"recall_at_1": recall_at_1,
"precision_at_5": precision_at_5,
"recall_at_5": recall_at_5,
"tag_precision": tag_precision,
"tag_recall": tag_recall
}
return epoch_info_dict
def clear(self):
"""Clear the evaluation metrics and reset the EvaluationMetrics object."""
self.sum_hit_at_one = 0.0
self.sum_perr = 0.0
self.sum_loss = 0.0
self.map_calculator.clear()
self.global_ap_calculator.clear()
self.pr_calculator.clear()
self.pr_calculator_per_tag.clear()
self.num_examples = 0
self.tag_correlation = 0.0
self.nums_per_tag = 0.0
self.tag_confidence = 0.0
# 匹配加载 pretrained model
def get_assignment_map_from_checkpoint(tvars, init_checkpoint,show=False, var_prefix='tower/text/'):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
#print(name)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if var_prefix+name not in name_to_variable:
if show:
print('not in variables: '+name)
continue
assignment_map[name] = var_prefix+name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
print("assign: ",name, var_prefix+name)
return (assignment_map, initialized_variable_names)
def get_latest_checkpoint(train_dir):
index_files = glob.glob(os.path.join(train_dir,'model.ckpt-*.index'))
if not index_files:
return None
# Index file path with the maximum step size.
latest_index_file = sorted(
[(int(os.path.basename(f).split("-")[-1].split(".")[0]), f)
for f in index_files])[-1][1]
# Chop off .index suffix and return
return latest_index_file[:-6]
def get_label_name_dict(tag_id_file, tag_max_num=5):
label_name_dict={}
with open(tag_id_file, 'r') as lnf:
for line in lnf:
tag, idx = line.strip().split('\t')
if int(idx) not in label_name_dict:
label_name_dict[int(idx)] = [tag]
else:
label_name_dict[int(idx)].append(tag)
for key in label_name_dict:
label_name_dict[key] = '-'.join(label_name_dict[key][:tag_max_num])
return label_name_dict
def get_tag_id_dict(tag_id_file):
tag_id_dict={}
with open(tag_id_file, 'r') as lnf:
for line in lnf:
tag, idx = line.strip().split('\t')
tag_id_dict[tag] = int(idx)
return tag_id_dict
def task_as_string(task):
return "/job:%s/task:%s" % (task.type, task.index)
class ParameterServer(object):
def __init__(self, cluster, task):
self.cluster = cluster
self.task = task
def run(self):
logging.info("%s: Starting parameter server within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
server.join()
def start_server(cluster, task):
if not task.type:
raise ValueError("%s: The task type must be specified." %task_as_string(task))
if task.index is None:
raise ValueError("%s: The task index must be specified." %task_as_string(task))
return tf.train.Server(tf.train.ClusterSpec(cluster),protocol="grpc", job_name=task.type, task_index=task.index)
#inference utils
def format_lines(video_ids, predictions, top_k, label_name_dict):
batch_size = len(video_ids)
for video_index in range(batch_size):
top_indices = np.argpartition(predictions[video_index], -top_k)[-top_k:]
line = [(class_index, predictions[video_index][class_index])
for class_index in top_indices]
line = sorted(line, key=lambda p: -p[1])
yield video_ids[video_index] + "\t" + "\t".join(
"%s##%.3f" % (label_name_dict.get(int(label), 'NULL'), score) for (label, score) in line) + "\n"
| 21,337 | 38.296501 | 238 | py |
TAILOR | TAILOR-main/src/utils/eval.py | import codecs
from sklearn import metrics
import numpy as np
import os
def dense(y):
label_y = []
for i in range(len(y)):
for j in range(len(y[i])):
label_y.append(y[i][j])
return label_y
def get_accuracy(y, y_pre):
# print('metric_acc: ' + str(round(metrics.accuracy_score(y, y_pre),4)))
sambles = len(y)
count = 0.0
for i in range(sambles):
y_true = 0
all_y = 0
for j in range(len(y[i])):
if y[i][j] > 0 and y_pre[i][j] > 0:
y_true += 1
if y[i][j] > 0 or y_pre[i][j] > 0:
all_y += 1
if all_y <= 0:
all_y = 1
count += float(y_true) / float(all_y)
acc = float(count) / float(sambles)
acc=round(acc,4)
return acc
# print('accuracy_hand:' + str(acc))
def get_metrics(y, y_pre):
"""
:param y:1071*6
:param y_pre: 1071*6
:return:
"""
y = y.cpu().detach().numpy()
y_pre = y_pre.cpu().detach().numpy()
test_labels = dense(y)
test_pred = dense(y_pre)
# print(metrics.classification_report(test_labels, test_pred, digits=4))
# print(metrics.classification_report(test_labels, test_pred, digits=4))
# print(metrics.precision_recall_fscore_support(test_labels, test_pred, average='macro'))
# print("Micro average Test Precision, Recall and F1-Score...")
# print(metrics.precision_recall_fscore_support(test_labels,test_pred, average='micro'))
y=np.array(y)
y_pre=np.array(y_pre)
# print("hammloss: "+str(round(hamming_loss,4)))
macro_f1 = metrics.f1_score(y, y_pre, average='macro')
macro_precision = metrics.precision_score(y, y_pre, average='macro')
macro_recall = metrics.recall_score(y, y_pre, average='macro')
acc = get_accuracy(y, y_pre)
y = np.array(y)
y_pre = np.array(y_pre)
# print(metrics.classification_report(y, y_pre, digits=4))
# print("micro_precision, micro_precison,micro_recall")
micro_f1 = metrics.f1_score(y, y_pre, average='micro')
micro_precision = metrics.precision_score(y, y_pre, average='micro')
micro_recall = metrics.recall_score(y, y_pre, average='micro')
# print(""+str(round(micro_precision,4))+"\t"+str(round(micro_recall,4))+"\t"+str(round(micro_f1,4)))
return micro_f1, micro_precision, micro_recall, acc
| 2,109 | 28.71831 | 102 | py |
TAILOR | TAILOR-main/src/utils/average_precision_calculator.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate or keep track of the interpolated average precision.
It provides an interface for calculating interpolated average precision for an
entire list or the top-n ranked items. For the definition of the
(non-)interpolated average precision:
http://trec.nist.gov/pubs/trec15/appendices/CE.MEASURES06.pdf
Example usages:
1) Use it as a static function call to directly calculate average precision for
a short ranked list in the memory.
```
import random
p = np.array([random.random() for _ in xrange(10)])
a = np.array([random.choice([0, 1]) for _ in xrange(10)])
ap = average_precision_calculator.AveragePrecisionCalculator.ap(p, a)
```
2) Use it as an object for long ranked list that cannot be stored in memory or
the case where partial predictions can be observed at a time (Tensorflow
predictions). In this case, we first call the function accumulate many times
to process parts of the ranked list. After processing all the parts, we call
peek_interpolated_ap_at_n.
```
p1 = np.array([random.random() for _ in xrange(5)])
a1 = np.array([random.choice([0, 1]) for _ in xrange(5)])
p2 = np.array([random.random() for _ in xrange(5)])
a2 = np.array([random.choice([0, 1]) for _ in xrange(5)])
# interpolated average precision at 10 using 1000 break points
calculator = average_precision_calculator.AveragePrecisionCalculator(10)
calculator.accumulate(p1, a1)
calculator.accumulate(p2, a2)
ap3 = calculator.peek_ap_at_n()
```
"""
import heapq
import random
import numbers
import numpy
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
@property
def heap_size(self):
"""Gets the heap size maintained in the class."""
return len(self._heap)
@property
def num_accumulated_positives(self):
"""Gets the number of positive samples that have been accumulated."""
return self._total_positives
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += numpy.size(numpy.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(numpy.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def clear(self):
"""Clear the accumulated predictions."""
self._heap = []
self._total_positives = 0
def peek_ap_at_n(self):
"""Peek the non-interpolated average precision at n.
Returns:
The non-interpolated average precision at n (default 0).
If n is larger than the length of the ranked list,
the average precision will be returned.
"""
if self.heap_size <= 0:
return 0
predlists = numpy.array(list(zip(*self._heap)))
ap = self.ap_at_n(predlists[0],
predlists[1],
n=self._top_n,
total_num_positives=self._total_positives)
return ap
@staticmethod
def ap(predictions, actuals):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
return AveragePrecisionCalculator.ap_at_n(predictions,
actuals,
n=None)
@staticmethod
def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
n: the top n items to be considered in ap@n.
total_num_positives : (optionally) you can specify the number of total
positive
in the list. If specified, it will be used in calculation.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when
1) the format of the input is not the numpy 1-D array;
2) the shape of predictions and actuals does not match;
3) the input n is not a positive integer.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
predictions = numpy.array(predictions)
actuals = numpy.array(actuals)
# add a shuffler to avoid overestimating the ap
predictions, actuals = AveragePrecisionCalculator._shuffle(predictions,
actuals)
sortidx = sorted(
range(len(predictions)),
key=lambda k: predictions[k],
reverse=True)
if total_num_positives is None:
numpos = numpy.size(numpy.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
@staticmethod
def _shuffle(predictions, actuals):
random.seed(0)
suffidx = random.sample(range(len(predictions)), len(predictions))
predictions = predictions[suffidx]
actuals = actuals[suffidx]
return predictions, actuals
@staticmethod
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
epsilon)
return ret
| 9,766 | 34.516364 | 87 | py |
learning-idk | learning-idk-main/temperature_scaling.py | """
Authors: Nicholas Kashani Motlagh, Aswathnarayan Radhakrishnan, Dr. Jim Davis
Affiliation: Ohio State University
Email: [email protected]
Date: 11/01/21
URL: https://github.com/osu-cvl/calibration/tree/main/temperature_scaling
Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger, "On Calibration of Modern Neural Networks,"
In ICML, pp. 2130-2143, 2017.
Available: https://arxiv.org/abs/1706.04599v2.
"""
# Import torch modules
import torch
from torch import nn, optim
from torch.nn import functional as F
# Import plot module
import matplotlib.pyplot as plt
class IdentityNet(nn.Module):
"""Simple network used when logits only are provided.
"""
def __init__(self):
super(IdentityNet, self).__init__()
def forward(self, input):
return input
class ModelWithTemperature(nn.Module):
"""
A thin decorator, which wraps a model with temperature scaling
model (nn.Module):
Note: Output of the neural network should be the classification logits,
NOT the softmax (or log softmax)!
"""
def __init__(self, model=None, n_bins=15, strategy='learn', per_class=False, device='cpu', verbose=False):
"""
Args:
model: A torch nn.Module neural network. Defaults to identity network.
n_bins: The number of bins used in ECE. Default: 15.
strategy: The strategy used to temperature scale, either 'learn' or 'grid'. Default: 'grid'
per_class: Perform temperature scaling per-class. Default: False.
device: The device to perform computations. Default: 'cpu'.
verbose: Report updates on process. Default: False.
"""
super(ModelWithTemperature, self).__init__()
# Save parameters
# Make new model
if model is None:
self.model = IdentityNet()
else:
self.model = model
self.model.eval()
self.model.to(device)
self.strategy = strategy
self.device = device
self.per_class = per_class
self.verbose = verbose
self.n_bins = n_bins
# Use ece loss criterion
self.ece_criterion = ECE(n_bins=n_bins, device=device)
def forward(self, input):
"""Forward function for nn.module
Args:
input: A tensor of inputs (rows are examples, and columns are features)
Returns:
A tensor of temperature scaled logits (rows are examples, and columns are features)
"""
logits = self.model(input)
return self.temperature_scale(logits)
def temperature_scale(self, logits):
"""Perform temperature scaling on logits.
Args:
logits: A tensor of logits (rows are examples, and columns are features).
Returns:
A tensor of temperature scaled logits.
"""
# Expand temperature to match the size of logits (in global case)
if not self.per_class:
temperature = self.temperature.expand(logits.size(0), logits.size(1))
else:
# get argmax predictions to determine per-class temperatures
preds = torch.argmax(logits, dim=1)
# Get appropriate temperature values
temperature = self.temperature[preds].unsqueeze(1).expand(logits.size(0), logits.size(1))
# Divide logits by appropriate temperatures
return logits / temperature
def global_temperature_scale(self, logits, temp):
"""Static method which performs temperature scaling on logits with specified temperature.
Args:
logits: A tensor of logits (rows are examples, and columns are features).
temp: A scalar temperature value.
Returns:
A tensor of logits scaled using a scalar temperature.
"""
# Expand temperature to match the size of logits
return logits / temp.expand(logits.size(0), logits.size(1))
def set_temperature(self, valid_loader, t_vals=[1.0], lrs=[0.01], num_iters=[50]):
"""
Resets and tunes the temperature of the model (using the validation set).
We're going to set it to optimize NLL for the learning approach and ECE for grid search.
Args:
valid_loader (DataLoader): validation set loader.
t_vals (List of floats): list of temperature values to search over (must use 'grid' strategy).
lr (List of floats): Learning rate for learned temperature scaling (must use 'learn' strategy).
num_iters (List of ints): Maximum number of iterations for learned temperature scaling
(must use 'learn' strategy).
Returns:
Either a scalar float temperature, or a tensor of float temperatures.
"""
# Initialize nll criterion
self.nll_criterion = nn.CrossEntropyLoss().to(self.device)
# First: collect all the logits and labels for the validation set
logits_list = []
labels_list = []
with torch.no_grad():
for input, label in valid_loader:
input = input.to(self.device)
labels_list.append(label)
logits = self.model(input)
logits_list.append(logits)
logits = torch.cat(logits_list).to(self.device)
labels = torch.cat(labels_list).to(self.device)
# Save parameters
self.logits = logits
self.targets = labels
self.num_classes = logits.shape[1]
# Perform the appropriate strategy
if self.strategy == "grid":
return self.set_temperature_grid(logits, labels, t_vals=t_vals)
else:
return self.set_temperature_learn(logits, labels, lrs=lrs, num_iters=num_iters)
def set_temperature_learn(self, all_logits, all_labels, lrs=[0.01], num_iters=[50]):
"""Tune the temperature of the model (using the validation set) and learned temperature scaling.
We're going to user torch LBFGS optimizer to optimize NLL.
Args:
all_logits: A tensor of all the logits that were in the validation loader.
all_labels: A tensor of all the labels that were in the validation loader.
lrs: A list of float learning rates to search through. Default: [0.01].
num_iters: A list of int maximum number of iterations to search through. Default: [50].
Return:
The best temperature or tensor of temperatures.
"""
# Per-class learned temperature
if self.per_class:
# List of optimal temperatures
optim_temps = []
# Network predictions
preds = torch.argmax(all_logits, dim=1)
# For each class, learn temperature
for l in range(self.num_classes):
if self.verbose:
print(f'Searching optimal temperature for class {l}')
# Default temperature is 1
optim_temps.append(torch.ones((1), device=self.device))
# Get all logits which were predicted as class l (and their associated targets)
logits = all_logits[preds==l]
labels = all_labels[preds==l]
# Check that there are any such logits (skip if not)
if labels.shape[0] == 0:
continue
# Get class ece and nll
before_temperature_nll = self.nll_criterion(logits, labels).item()
before_temperature_ece = self.ece_criterion(logits, labels).item()
if self.verbose:
print('Before temperature - NLL: %.3f, ECE: %.3f' % (before_temperature_nll, before_temperature_ece))
# Initialize best_ece/best_nll for class as base ece/nll.
best_ece = before_temperature_ece
best_nll = before_temperature_nll
# Search through hyper parameters
for lr in lrs:
for num_iter in num_iters:
# Initialize temperature
temperature = nn.Parameter(torch.ones((1), device=self.device, requires_grad=True)* 1.)
def eval():
"""Perform forward and backward pass of nll.
Returns:
A loss from nll.
"""
loss = self.nll_criterion(self.global_temperature_scale(logits, temperature), labels)
loss.backward()
return loss
# Optimize the temperature w.r.t. NLL
optimizer = optim.LBFGS([temperature], lr=lr, max_iter=num_iter)
optimizer.step(eval)
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = self.nll_criterion(self.global_temperature_scale(logits, temperature), labels).item()
after_temperature_ece = self.ece_criterion(self.global_temperature_scale(logits, temperature), labels).item()
# Save temperature if ece was improved
if after_temperature_ece < best_ece:
if self.verbose: print(f'New Optimal temperature: {temperature.data.item():.3f}')
if self.verbose: print('After temperature - NLL: %.3f, ECE: %.3f' % (after_temperature_nll, after_temperature_ece))
optim_temps[l] = temperature
best_ece = after_temperature_ece
best_nll = after_temperature_nll
if self.verbose: print(f'Optimal temperature: {optim_temps[-1].data.item():.3f}')
if self.verbose: print('After temperature - NLL: %.3f, ECE: %.3f' % (best_nll, best_ece))
# Save all optimal temperatures
self.temperature = torch.tensor(optim_temps).data.to(self.device)
else:
# No best temperature found yet
optim_temp = nn.Parameter(torch.ones((1), device=self.device, requires_grad=True)* 1.)
# Get global nll and ece before temperature scaling
before_temperature_nll = self.nll_criterion(all_logits, all_labels).item()
before_temperature_ece = self.ece_criterion(all_logits, all_labels).item()
if self.verbose:
print('Before temperature - NLL: %.3f, ECE: %.3f' % (before_temperature_nll, before_temperature_ece))
# Save as best ece and nll
best_ece = before_temperature_ece
best_nll = before_temperature_nll
# Search through hyper parameters
for lr in lrs:
for num_iter in num_iters:
# Initialize temperature
temperature = nn.Parameter(torch.ones((1), device=self.device, requires_grad=True)* 1.)
def eval():
"""Perform forward and backward pass of nll.
Returns:
A loss from nll.
"""
loss = self.nll_criterion(self.global_temperature_scale(all_logits, temperature), all_labels)
loss.backward()
return loss
# Optimize the temperature w.r.t. NLL
optimizer = optim.LBFGS([temperature], lr=lr, max_iter=num_iter)
optimizer.step(eval)
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = self.nll_criterion(self.global_temperature_scale(all_logits, temperature), all_labels).item()
after_temperature_ece = self.ece_criterion(self.global_temperature_scale(all_logits, temperature), all_labels).item()
# Save if improved ECE
if after_temperature_ece < best_ece:
if self.verbose: print(f'Optimal temperature: {temperature.data.item():.3f}')
optim_temp = temperature
best_ece = after_temperature_ece
best_nll = after_temperature_nll
# Save the optimal temperature
self.temperature = optim_temp.data
if self.verbose: print(f'Optimal temperature: {optim_temp.data.item():.3f}')
if self.verbose: print('After temperature - NLL: %.3f, ECE: %.3f' % (best_nll, best_ece))
return self.temperature
# This function probably should live outside of this class, but whatever
def set_temperature_grid(self, all_logits, all_labels, t_vals=[0.5, 1.0, 2.0]):
"""Tune the temperature of the model (using the validation set) and grid search temperature scaling.
Args:
all_logits: A tensor of all the logits that were in the validation loader.
all_labels: A tensor of all the labels that were in the validation loader.
t_vals: A list of float temperature values to search through. Default: [0.5, 1.0, 2.0].
Return:
The best temperature or tensor of temperatures.
"""
if self.per_class:
# Get network predictions
preds = torch.argmax(all_logits, dim=1)
# Initialize results
optim_temps = []
# Search over number of classes
for l in range(self.num_classes):
# Default temperature is 1
optim_temps.append(torch.ones((1,1)).to(self.device))
if self.verbose: print(f'Searching optimal temperature for label class: {l}')
# Get logits and corresponding labels of predicted class examples
c_idx = torch.where(preds == l)[0]
logits = all_logits[c_idx]
labels = all_labels[c_idx]
# Check that there are predictions of the current class
if labels.shape[0] == 0:
continue
# Get ece and nll before temperature scaling
before_temperature_nll = self.nll_criterion(logits, labels).item()
before_temperature_ece = self.ece_criterion(logits, labels).item()
if self.verbose: print('\tBefore temperature - NLL: %.3f, ECE: %.3f' % (before_temperature_nll, before_temperature_ece))
# Initialize best ece to base ece
best_ece = before_temperature_ece
# Get most-recently added temperature (1.0)
optim_temp = optim_temps[-1]
# Search over possible temperature values
for t in t_vals:
# Get current temperature value
temp = torch.ones((1,1)).to(self.device) * t
if self.verbose: print(f'\t\tTemperature values: {t}')
# Get ece score after temperature scaling with t
after_temperature_ece = self.ece_criterion(self.global_temperature_scale(logits, temp), labels).item()
if self.verbose: print(f'\t\tAfter temperature - ECE: {after_temperature_ece:.3f}')
# Save parameter if best ece found
if after_temperature_ece < best_ece:
best_ece = after_temperature_ece
optim_temp = temp
if self.verbose: print(f'\t\tCurrent best ECE: {best_ece}')
if self.verbose: print(f'\t\tCurrent optimum T: {optim_temp.item()}')
if self.verbose: print(f'\tFinal best ECE for label class {l}: {best_ece}')
if self.verbose: print(f'\tFinal optimum T for label class {l}: {optim_temp.item()}')
# Save optimal temperature
optim_temps[-1] = optim_temp
# Save as temperature
self.temperature = torch.tensor(optim_temps).data.to(self.device)
else:
if self.verbose: print(f'Searching optimal global temperature')
logits = all_logits
labels = all_labels
# Get base ece score
before_temperature_ece = self.ece_criterion(logits, labels).item()
if self.verbose: print(f'Before temperature - ECE: {before_temperature_ece:.3f}')
# Save base ece as best ece
best_ece = before_temperature_ece
# Initialize optimal temperature to 1
optim_temp = torch.ones((1,1)).to(self.device)
# Iterate through all temperature values
for t in t_vals:
if self.verbose: print(f'\tTemperature values: {t}')
# Set temperature to t
temp = torch.ones((1,1)).to(self.device) * t
# Compute ece using new temperature
after_temperature_ece = self.ece_criterion(self.global_temperature_scale(logits, temp), labels).item()
if self.verbose: print(f'\tAfter temperature - ECE: {after_temperature_ece:.3f}')
# If temperature reduced ece, save temperature and best ece
if after_temperature_ece<best_ece:
best_ece = after_temperature_ece
optim_temp = temp
if self.verbose: print(f'\tCurrent best ECE: {best_ece}')
if self.verbose: print(f'\tCurrent optimum T: {optim_temp.item()}')
# Save optimal temperature
if self.verbose: print(f'Final best ECE: {best_ece}')
if self.verbose: print(f'Final optimum T: {optim_temp.item()}')
self.temperature = optim_temp.data
return self.temperature
def reliability_diagram_and_bin_count(self):
"""Plots reliability and bin count diagrams
"""
if self.per_class:
preds = torch.argmax(self.logits, dim=1)
for c_idx in range(self.num_classes):
class_logits = self.logits[preds == c_idx]
class_targets = self.targets[preds == c_idx]
self.ece_criterion.reliability_diagram_and_bin_count(logits=class_logits, targets=class_targets,
title=f"Class-{c_idx}")
else:
self.ece_criterion.reliability_diagram_and_bin_count(logits=self.logits, targets=self.targets)
class ECE(nn.Module):
"""
ADAPTED FROM: https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py
Calculates the Expected Calibration Error of a model.
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin.
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
def __init__(self, n_bins=15, device='cpu'):
"""
n_bins (int): number of confidence interval bins
"""
super(ECE, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
self.n_bins = n_bins
self.device = device
def compute_ece(self, model, val_loader):
"""Will compute the ECE of the given model on the data loader.
Args:
model: A model to compute ECE on.
val_loader: A pytorch data loader.
Returns:
ECE on data loader.
"""
logits_list = []
labels_list = []
with torch.no_grad():
for input, label in val_loader:
input = input.to(self.device)
labels_list.append(label)
logits = model(input)
logits_list.append(logits)
logits = torch.cat(logits_list).to(self.device)
labels = torch.cat(labels_list).to(self.device)
return self.forward(logits, labels)
def forward(self, logits, labels, sm=False):
if sm:
self.sms = logits
else:
self.sms = torch.softmax(logits, dim=1)
# Save for later plotting
self.targets = labels
# Get softmax scores and predictions
confidences, predictions = torch.max(self.sms, 1)
# Get accuracy
accuracies = predictions.eq(labels.int())
# Initialize ece
ece = torch.zeros(1, device=logits.device)
# Iterate through bins
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
# Save bin if there are elements
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
def reliability_diagram_and_bin_count(self, logits=None, targets=None, sm=False, title=""):
"""Creates reliability diagram and bin count plots for saved logits. Logits are saved in the forward pass.
Can also optionally pass logits and targets to plot specific reliability diagrams and bin counts.
Args:
logits (optional): A tensor of logits. Defaults to None.
targets (optional): A tensor of targets. Defaults to None.
title (optional): A title to prepend to the default title. Defaults to "".
"""
if logits is not None:
if sm:
self.sms = logits
else:
self.sms = torch.softmax(logits, dim=1)
if targets is not None:
self.targets = targets
# Get bin precision and counts
bin_precision, count_in_bin = self.get_full_range_bin_precision()
# Get the number of bins
n_bin = len(bin_precision)
# Get the width and center of each bin
bin_width = 1./n_bin
bin_center = torch.linspace(0.0+0.5*bin_width,1.0+0.5*bin_width,n_bin+1)[:-1]
# Create plots
fig, (ax0, ax1) = plt.subplots(1,2, figsize=(12,5))
if title != "":
title = title + " "
fig.suptitle(title + 'Reliability Diagram and Bin Counts')
# Create relaiability diagram
ax0.bar(bin_center,bin_precision,align='center',width=bin_width*0.7,label=f'Bin precision',color='orange')
ax0.set_xlim(0,1)
ax0.set_ylim(0,1)
ax0.plot(bin_center,bin_center,label='ideal case',color='blue',linestyle='-.')
ax0.set_xlabel('Estimated label posterior')
ax0.set_ylabel('Actual precision')
ax0.legend()
# Create bin counts diagram
ax1.bar(bin_center,count_in_bin,align='center',width=bin_width*0.7,label=f'Bin counts',color='blue')
for k,c in enumerate(count_in_bin):
ax1.text(bin_center[k]-.005,count_in_bin[k]+.1,str(int(c)),color='black',fontsize='small',fontweight='bold')
ax1.set_xlim(0,1)
ax1.set_xlabel('Estimated label posterior')
ax1.set_ylabel('Example counts in bin')
ax1.legend()
plt.show()
def get_full_range_bin_precision(self):
conf, preds = torch.max(self.sms, dim=1)
acc = (preds == self.targets)
bin_precision = torch.zeros(self.n_bins)
prop_in_bin = torch.zeros(self.n_bins)
count_in_bin = torch.zeros(self.n_bins)
for i, (bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):
in_bin = (conf>= bin_lower) * (conf<=bin_upper)
# proportion of examples in bin over all examples
prop_in_bin[i] = in_bin.float().mean()
count_in_bin[i] = in_bin.sum()
if prop_in_bin[i]>0:
bin_precision[i] = (1.*acc[in_bin]).mean()
return bin_precision, count_in_bin
class PerClassECE(nn.Module):
"""
Calculates the MEAN Expected Calibration Error of a model.
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin
"""
def __init__(self, n_bins=15, device='cpu'):
"""
n_bins (int): number of confidence interval bins
"""
super(PerClassECE, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
self.n_bins = n_bins
self.device = device
def compute_ece(self, model, val_loader):
"""Will compute the per-class ECE of the given model on the data loader.
Args:
model: A model to compute per-class ECE on.
val_loader: A pytorch data loader.
Returns:
Tensor of ECE scores per-class on data loader.
"""
logits_list = []
labels_list = []
with torch.no_grad():
for input, label in val_loader:
input = input.to(self.device)
labels_list.append(label)
logits = model(input)
logits_list.append(logits)
logits = torch.cat(logits_list).to(self.device)
labels = torch.cat(labels_list).to(self.device)
return self.forward(logits, labels)
def forward(self, logits, labels, sm=False):
self.num_classes = logits.shape[1]
if sm:
self.sms = logits
else:
self.sms = torch.softmax(logits, dim=1)
self.targets = labels
confidences, predictions = torch.max(self.sms, dim=1)
accuracies = predictions.eq(labels)
ece = torch.zeros(self.num_classes).to(self.device)
self.bin_accuracy = torch.zeros((self.num_classes,self.n_bins))
self.prop_in_bin = torch.zeros((self.num_classes,self.n_bins))
self.count_in_bin = torch.zeros((self.num_classes,self.n_bins))
for c in range(self.num_classes):
class_idx = torch.where(predictions == c)[0]
class_confidences = confidences[class_idx]
class_accuracies = accuracies[class_idx]
for i,(bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):
# Calculated |confidence - accuracy| in each bin
in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(bin_upper.item())
self.count_in_bin[c,i] = in_bin.sum()
self.prop_in_bin[c,i] = in_bin.float().mean()
if self.count_in_bin[c,i].item() > 0:
self.bin_accuracy[c,i] = class_accuracies[in_bin].float().mean()
avg_confidence_in_bin = class_confidences[in_bin].float().mean()
ece[c] += torch.abs(avg_confidence_in_bin - self.bin_accuracy[c,i]) * self.prop_in_bin[c,i]
return ece
def reliability_diagram_and_bin_count(self, logits=None, targets=None, sm=False):
"""Creates reliability diagram and bin count plots for saved logits. Logits are saved in the forward pass.
Can also optionally pass logits and targets to plot specific reliability diagrams and bin counts.
Args:
logits (optional): A tensor of logits. Defaults to None.
targets (optional): A tensor of targets. Defaults to None.
title (optional): A title to prepend to the default title. Defaults to "".
"""
if logits is not None:
if sm:
self.sms = logits
else:
self.sms = torch.softmax(logits, dim=1)
if targets is not None:
self.targets = targets
self.num_classes = self.sms.shape[1]
for c_idx in range(self.num_classes):
# Get bin precision and counts
bin_precision, count_in_bin = self.get_full_range_bin_precision(c_idx)
# Get the number of bins
n_bin = len(bin_precision)
# Get the width and center of each bin
bin_width = 1./n_bin
bin_center = torch.linspace(0.0+0.5*bin_width,1.0+0.5*bin_width,n_bin+1)[:-1]
# Create plots
fig, (ax0, ax1) = plt.subplots(1,2, figsize=(12,5))
title = f"Class-{c_idx} "
fig.suptitle(title + 'Reliability Diagram and Bin Counts')
# Create relaiability diagram
ax0.bar(bin_center,bin_precision,align='center',width=bin_width*0.7,label=f'Bin precision',color='orange')
ax0.set_xlim(0,1)
ax0.set_ylim(0,1)
ax0.plot(bin_center,bin_center,label='ideal case',color='blue',linestyle='-.')
ax0.set_xlabel('Estimated label posterior')
ax0.set_ylabel('Actual precision')
ax0.legend()
# Create bin counts diagram
ax1.bar(bin_center,count_in_bin,align='center',width=bin_width*0.7,label=f'Bin counts',color='blue')
for k,c in enumerate(count_in_bin):
ax1.text(bin_center[k]-.005,count_in_bin[k]+.1,str(int(c)),color='black',fontsize='small',fontweight='bold')
ax1.set_xlim(0,1)
ax1.set_xlabel('Estimated label posterior')
ax1.set_ylabel('Example counts in bin')
ax1.legend()
plt.show()
def get_full_range_bin_precision(self, c_idx):
conf, preds = torch.max(self.sms, dim=1)
class_idx = torch.where(preds == c_idx)[0]
acc = (preds[class_idx] == self.targets[class_idx])
bin_precision = torch.zeros(self.n_bins)
prop_in_bin = torch.zeros(self.n_bins)
count_in_bin = torch.zeros(self.n_bins)
for i, (bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):
in_bin = (conf[class_idx]>= bin_lower) * (conf[class_idx]<=bin_upper)
# proportion of examples in bin over all examples
prop_in_bin[i] = in_bin.float().mean()
count_in_bin[i] = in_bin.sum()
if prop_in_bin[i]>0:
bin_precision[i] = acc[in_bin].float().mean()
return bin_precision, count_in_bin
def load_model(path):
print("Load your torch model")
pass
if __name__=="__main__":
"""An example using CIFAR10. Note that this code will not run correctly until load_model() is implemented.
For this code to work, load_model() should return a pytorch model suitable to classify CIFAR10. See
below.
"""
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch_size = 32
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# apply histogram binning approach --------------------------------------- #
# load base model
#####################################
# CHANGE THIS LINE
base_model = load_model("model_path")
#####################################
device = 'cuda'
# init class instance
n_bins = 15
ece_criterion = ECE(n_bins=n_bins, device=device)
pece_criterion = PerClassECE(n_bins=n_bins, device=device)
# run histogram binning on validation set
ece = ece_criterion.compute_ece(base_model, testloader)
print(f"ECE: {ece:.2f}")
pece = pece_criterion.compute_ece(base_model, testloader)
print(f"Per-class ECE: {pece.mean():.2f} +- {pece.std():.2f}")
model_temp_scaled = ModelWithTemperature(model=base_model, n_bins=n_bins, strategy="grid",
per_class=True, device=device)
# Setup values to iterate over during learning or grid search
# For the grid search approach
temps = torch.linspace(0.25, 4.0, 100)
temperature = model_temp_scaled.set_temperature(testloader, t_vals=list(temps))
print(f"Temperature: {temperature:.2f}")
ece = ece_criterion.compute_ece(model_temp_scaled, testloader)
print(f"ECE: {ece:.2f}")
pece = pece_criterion.compute_ece(model_temp_scaled, testloader)
print(f"Per-class ECE: {pece.mean():.2f} +- {pece.std():.2f}")
# viz of bin reliability and counts
ece_criterion.reliability_diagram_and_bin_count()
pece_criterion.reliability_diagram_and_bin_count() | 33,250 | 45.181944 | 143 | py |
learning-idk | learning-idk-main/threshold.py | """
Paper: Learning When to Say "I Don't Know"
arXiv Link: https://arxiv.org/abs/2209.04944
Authors: Nicholas Kashani Motlagh*, Jim Davis*,
Tim Anderson+, and Jeremy Gwinnup+
Affiliation: *Department of Computer Science & Engineering, Ohio State University
+Air Force Research Laboratory, Wright-Patterson AFB
Corresponding Email: [email protected] (First: Nicholas, Last: Kashani Motlagh)
Date: Sep 6, 2022
This research was supported by the U.S. Air Force Research Laboratory under Contract #GRT00054740 (Release #AFRL-2022-3339).
"""
# Built-in imports
import argparse
from pathlib import Path
# External imports
import numpy as np
from scipy import stats
from statsmodels.stats.proportion import proportion_confint
import torch
from torch.utils.data import Dataset, DataLoader
# Local imports
from temperature_scaling import ModelWithTemperature
# DEFAULTS
# Output Path for thresholds
DEFAULT_THRESHOLD_PATH = "thresholds.pt"
# Temperature scaling
N_BINS = 15
# Thresholding algorithm (b_cdf, wilson, wilson_cc, clopper_pearson, agresti_coull)
THRESH_FUNC = "b_cdf"
# Delta value used in thresholding algorithm
DELTA = 0.05
# Batch Size
NUM_IN_BATCH = 32
def _parse_args():
"""
Command-line arguments to the system.
:return: the parsed args bundle
"""
parser = argparse.ArgumentParser(description='threshold.py')
parser.add_argument('--threshold_path', type=str, default=DEFAULT_THRESHOLD_PATH, help='Path to save computed thresholds')
parser.add_argument('--data_path', type=str, default=None, help='Path to validation data (logits,targets)')
parser.add_argument('--test_data_path', type=str, default=None, help='Path to test data (logits,targets)')
parser.add_argument('--synth', action=argparse.BooleanOptionalAction, help="Boolean flag indicating data is synthetic")
parser.add_argument('--skip_ts', action=argparse.BooleanOptionalAction, help="Boolean flag indicating whether to skip temperature scaling.")
parser.add_argument('--delta', type=float, default=DELTA, help='User-provided significance level')
parser.add_argument('--thresh_func', type=str, default=THRESH_FUNC, help=f'Method to compute thresholds (b-cdf, wilson'\
'wilson-cc, clopper-pearson, agresti-coull')
return parser.parse_args()
class LogitDataset(Dataset):
"""Simple torch Dataset with examples as logits.
"""
def __init__(self, samples, targets):
"""Instantiates the Logit Dataset.
Args:
samples (torch.tensor): Tensor of logits (samples x classes)
targets (torch.tensor): Tensor of targets (samples)
"""
self.samples = samples
self.targets = targets
self.num_classes = torch.unique(targets).numel()
def __len__(self):
return self.targets.numel()
def __getitem__(self, idx):
return self.samples[idx,:], self.targets[idx]
def load_data(data_path, synth=False):
"""Loads the data from data_path and returns tensors for logits and targets.
Args:
data_path (str or Path): Path to data (logits, targets)
synth (bool, optional): Flag indicating whether data is synthetic. Defaults to False.
Returns:
Tuple: logits, targets
"""
if not type(data_path) is Path:
data_path = Path(data_path)
data = torch.load(data_path)
# Synthetic data has ground truth decision in last position
if synth:
return data[:, :-2].to(torch.float32), data[:, -2].to(torch.int64)
else:
return data[:, :-1].to(torch.float32), data[:, -1].to(torch.int64)
def load_decisions(data_path):
"""Loads the data from data_path and returns tensors for logits and targets.
Args:
data_path (str or Path): Path to data (logits, targets)
synth (bool, optional): Flag indicating whether data is synthetic. Defaults to False.
Returns:
Tuple: logits, targets
"""
if not type(data_path) is Path:
data_path = Path(data_path)
data = torch.load(data_path)
return data[:, -1].to(torch.int64)
def get_logits_loader(logits, targets):
"""Generates a dataloader from a path of logits, targets.
Args:
logits (torch.tensor): Tensor of logits.
targets (torch.tensor): Tensor of targets.
Returns:
DataLoader: DataLoader for data in data_path.
"""
dataset = LogitDataset(logits, targets)
return DataLoader(dataset, batch_size=NUM_IN_BATCH, shuffle=False)
def learn_temp(dataloader):
"""Learns per-class temperatures on logits.
Args:
dataloader (torch DataLoader): DataLoader for logits.
Returns:
ModelWithTemperature: A torch.nn.Module which simply temperature scales inputs (per-class).
"""
model_ts = ModelWithTemperature(n_bins=N_BINS, strategy="grid", per_class=True)
model_ts.set_temperature(dataloader, t_vals=list(torch.linspace(0.25, 4.0, 100)))
model_ts.eval()
return model_ts
def get_ts_data(model_ts, dataloader):
"""Extracts temperature scaled logits from a dataloader.
Args:
model_ts (ModelWithTemperature): A temperature scaled model.
dataloader (DataLoader): A dataloader of logits.
Returns:
Tuple: ts_logits, targets
"""
all_ts_logits = []
all_targets = []
for (inputs,targets) in dataloader:
ts_logits = model_ts(inputs)
all_ts_logits.append(ts_logits)
all_targets.append(targets)
all_ts_logits = torch.cat(all_ts_logits, dim=0)
all_targets = torch.cat(all_targets, dim=0)
return all_ts_logits, all_targets
def wilson_cc_bound(k, n, delta=DELTA):
"""Generates a (1-delta) upper bound using the Wilson interval with continuity correction.
This strategy is approximately equivalent to the Binomial CDF with delta area in the tail.
Args:
k (int): Number of sucesses
n (int): Number of trials
delta (float, optional): User defined significance level. Defaults to DELTA (0.05).
Returns:
float: The upper bound.
"""
p = k/n
q = 1.-p
z = stats.norm.isf((1-delta))
z2 = z**2
denom = 2*(n+z2)
num = 2.*n*p+z2+1.+z*np.sqrt(z2+2-1./n+4*p*(n*q-1))
bound = num/denom
if p == 0:
bound = 0.
elif p == 1:
bound = 1.
return bound
def learn_thresholds(logits, targets, delta=DELTA, thresh_func=THRESH_FUNC):
"""Learns per-class thresholds on logits using the proposed approach in the paper. The method
validates the reject region using thresh_func at a user-provided significance level.
Args:
logits (torch.tensor): Tensor of logits.
targets (torch.tensor): Tensor of targets.
delta (float, optional): User defined significance level. Defaults to DELTA.
thresh_func (str, optional): Implementation used to validate reject region. Options
are (b_cdf, wilson, wilson_cc, clopper_pearson, agresti_coull). Defaults to THRESH_FUNC.
Returns:
torch.tensor: A tensor of per-class thresholds.
"""
num_classes = torch.unique(targets).numel()
thresholds = torch.zeros(num_classes)
# Extract softmax scores
sm_scores = torch.softmax(logits, dim=1)
max_sms, preds = torch.max(sm_scores, dim=1)
# Compute per-class thresholds
for c in range(num_classes):
class_idx = torch.where(preds == c)[0]
thresholds[c] = learn_class_threshold(preds[class_idx], max_sms[class_idx], targets[class_idx], delta, thresh_func)
return thresholds
def accuracy(is_correct):
"""Computes accuracy from a binary tensor.
Args:
is_correct (bool): Binary tensor of successes and failures.
Returns:
float: Accuracy of the trials.
"""
return torch.sum(is_correct) / is_correct.numel()
def check_reject(preds, targets, delta, thresh_func):
"""Validate whether the reject region is viable using thresh_func at a delta significance level.
Args:
preds (torch.tensor): Tensor of predictions.
targets (torch.tensor): Tensor of targets.
delta (float, optional): User defined significance level. Defaults to DELTA.
thresh_func (str, optional): Implementation used to validate reject region. Options
are (b_cdf, wilson, wilson_cc, clopper_pearson, agresti_coull). Defaults to THRESH_FUNC.
Returns:
bool: Whether the reject region is viable using thresh_func at a user defined significance level.
"""
is_correct = (preds == targets)
k, n = torch.sum(is_correct), is_correct.numel()
if thresh_func == "b_cdf":
return stats.binom.cdf(k, n, 0.5) <= 1-delta
elif thresh_func == "wilson_cc":
return wilson_cc_bound(k, n, delta=delta) <= 0.5
else:
if thresh_func == "clopper_pearson":
thresh_func = "beta"
# We need a 1-delta single tail upper bound so alpha=2*(1-delta)
_, ci_u = proportion_confint(k, n, alpha=2*(1-delta), method=thresh_func)
return ci_u <= 0.5
def learn_class_threshold(preds, max_sms, targets, delta, thresh_func):
"""Learns a threshold for a single class using thresh_func at a user-defined significance level delta.
Args:
preds (torch.tensor): Tensor of predictions.
max_sms (torch.tensor): Tensor of softmax scores corresponding to predictions
targets (torch.tensor): Tensor of targets.
delta (float, optional): User defined significance level. Defaults to DELTA.
thresh_func (str, optional): Implementation used to validate reject region. Options
are (b_cdf, wilson, wilson_cc, clopper_pearson, agresti_coull). Defaults to THRESH_FUNC.
Returns:
float: Threshold that optimizes select accuracy while adhering to constraint.
"""
# Only need to check thresholds that optimize select accuracy
incorrect_idx = torch.where(preds != targets)[0]
possible_thresholds = torch.unique(max_sms[incorrect_idx])
best_thresh, best_cov = 0, -1
# Select accuracy
best_sacc = accuracy(preds == targets)
# Check possible thresholds
for thresh in possible_thresholds:
select_idx = torch.where(max_sms > thresh)[0]
reject_idx = torch.where(max_sms <= thresh)[0]
if select_idx.numel() > 0:
sacc = accuracy(preds[select_idx] == targets[select_idx])
else:
# The select accuracy is undefined so reject all
sacc = 1.1
# Get coverage
cov = select_idx.numel() / (select_idx.numel() + reject_idx.numel())
# Check reject region
if check_reject(preds[reject_idx], targets[reject_idx], delta, thresh_func):
# Optimize select accuracy / coverage
if sacc > best_sacc or (sacc == best_sacc and cov > best_cov):
best_thresh = thresh
best_sacc = sacc
best_cov = cov
return best_thresh
def sanity_check(logits, targets):
"""A quick sanity check that prints the accuracy of logits against targets. This ensures logits and targets
were loaded in correctly.
Args:
logits (torch.tensor): Tensor of logits.
targets (torch.tensor): Tensor of targets.
"""
print(f"Base accuracy: {accuracy(torch.argmax(logits, dim=1) == targets)}")
def evaluate(logits, targets, thresholds, decisions=None):
"""Computes the select accuracy, reject accuracy, and coverage of the logits/targets using thresholds.
If decisions are provided (in the case of equal-density synthetic data), then IDA will also be computed.
Args:
logits (torch.tensor): Tensor of logits.
targets (torch.tensor): Tensor of targets.
thresholds (torch.tensor): Tensor of per-class thresholds.
decisions (torch.tensor, optional): Tensor of ideal decisions used to compute IDA.
Only applies for synthetic equal-density datasets. Defaults to None.
"""
# Get softmax scores
sm_scores = torch.softmax(logits, dim=1)
max_sms, preds = torch.max(sm_scores, dim=1)
# Get tensor of corresponding thresholds for each prediction
class_thresholds = thresholds[preds]
select_idx = torch.where(max_sms > class_thresholds)[0]
reject_idx = torch.where(max_sms <= class_thresholds)[0]
# Compute select accuracy
if select_idx.numel() > 0:
sacc = accuracy(preds[select_idx] == targets[select_idx])
else:
sacc = -1
# Compute reject accuracy
if reject_idx.numel() > 0:
racc = accuracy(preds[reject_idx] == targets[reject_idx])
else:
racc = -1
# Compute coverage
cov = select_idx.numel() / (select_idx.numel() + reject_idx.numel())
print(f"Select Accuracy: {sacc * 100 :.1f}")
print(f"Reject Accuracy: {racc * 100 :.1f}")
print(f"Coverage: {cov * 100 :.1f}")
# Compute IDA
if decisions is not None:
selected = (max_sms > class_thresholds)
ida = accuracy(selected == decisions)
print(f"IDA: {ida * 100 :.1f}")
def main(data_path, threshold_path=DEFAULT_THRESHOLD_PATH, synth=False, delta=DELTA,
skip_ts=False, thresh_func=THRESH_FUNC, test_data_path=None):
# Load data to learn thresh
print("Loading Data")
logits, targets = load_data(data_path, synth=synth)
decisions = None
# Load data to evaluate thresholds
test_logits, test_targets, test_decisions = None, None, None
if test_data_path:
test_logits, test_targets = load_data(test_data_path, synth=synth)
# Get decisions if synthetic
if synth:
decisions = load_decisions(data_path)
if test_data_path:
test_decisions = load_decisions(test_data_path)
# Print accuracy
sanity_check(logits, targets)
# Temperature scale data
model_ts = None
if not skip_ts:
print("Temperature Scaling")
data_loader = get_logits_loader(logits, targets)
model_ts = learn_temp(data_loader)
logits, targets = get_ts_data(model_ts, data_loader)
# Temp scale test data
if test_data_path:
test_data_loader = get_logits_loader(test_logits, test_targets)
test_logits, test_targets = get_ts_data(model_ts, test_data_loader)
# Learn thresholds
print("Learning Thresholds")
thresholds = learn_thresholds(logits, targets, delta=delta, thresh_func=thresh_func)
torch.save(thresholds, threshold_path)
# Evaluate thresholds on validation data
print(f"Evaluating {data_path}")
evaluate(logits, targets, thresholds, decisions=decisions)
# Evaluate on test data
if test_data_path:
print(f"Evaluating {test_data_path}")
evaluate(test_logits, test_targets, thresholds, test_decisions)
if __name__ == '__main__':
args = _parse_args()
main(args.data_path, threshold_path=args.threshold_path, synth=args.synth, delta=args.delta,
skip_ts=args.skip_ts, thresh_func=args.thresh_func, test_data_path=args.test_data_path) | 15,103 | 37.927835 | 144 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tools/generate_config.py | #!/usr/bin/env python3
import argparse
from pathlib import Path
import json
import os
from collections import OrderedDict
THIS_DIR = Path(__file__).parent
if (THIS_DIR / "schemas").exists():
SCHEMAS_DIR = THIS_DIR / "schemas"
else:
SCHEMAS_DIR = THIS_DIR.parent / "share" / "schemas"
def post_type(jtype):
if "type" in jtype:
jtt = jtype["type"]
if "maxLength" in jtype:
if jtt == "string":
return "[" + str(jtype["maxLength"] + 1) + "]"
elif jtt == "array":
return "[" + str(jtype["maxLength"]) + "]"
return ""
else:
return ""
def pre_type(jtype):
if "type" in jtype:
jtt = jtype["type"]
if jtt == "array" or (
isinstance(jtt, list) and "array" in jtt and "null" in jtt
):
items = jtype["items"]
item_type = pre_type(items)
if "maxLength" in jtype:
return item_type
else:
return item_type + "*"
elif jtt == "boolean":
return "bool"
elif jtt == "none":
return "null"
elif jtt == "string" or (
isinstance(jtt, list) and "string" in jtt and "null" in jtt
):
return "char" if "maxLength" in jtype else "char*"
else:
raise Exception("unhandled json type: %s" % jtype)
elif "$ref" in jtype:
rtype = jtype["$ref"][jtype["$ref"].rfind("/") + 1 :]
if rtype.startswith("safe_"):
return rtype[5:]
elif rtype == "hex_string":
return "uint8_t*"
else:
return rtype
else:
raise Exception("unknown json type: %s" % jtype)
return jtype
def need_size_var(jtype):
if "type" in jtype:
jtt = jtype["type"]
return (jtt == "array" and "maxLength" not in jtype) or (
isinstance(jtt, list) and "array" in jtt and "null" in jtt
)
else:
return "$ref" in jtype and jtype["$ref"] == "#/definitions/hex_string"
num_settings = 0
header_includes = """
#include "shared/oe_compat.h"
"""
def generate_header(schema_file_name, root, args):
global num_settings
with open(str(args.header), "w") as header:
h = (
os.path.basename(schema_file_name)
.upper()
.replace(".", "_")
.replace("-", "_")
)
header.write("#ifndef _%s_H_\n" % h)
header.write("#define _%s_H_\n" % h)
header.write(
"\n/* Automatically generated from %s; do not modify. */\n\n"
% schema_file_name
)
header.write(header_includes)
header.write("\n")
top = root["$ref"].rsplit("/")[-1]
header.write("#define %s_VERSION 1UL\n\n" % top.upper())
num_settings = 0
for typename, typedef in root["definitions"].items():
if typename.startswith("sgxlkl_"):
if "enum" in typedef:
names = (
typedef["c_enum"] if "c_enum" in typedef else typedef["enum"]
)
header.write("typedef enum\n{\n")
i = 0
num_vals = len(names)
for value in names:
header.write(" %s = %d" % (value.upper(), i))
if i < num_vals - 1:
header.write(",")
if "description" in typedef:
header.write(" /* %s */" % typedef["description"])
header.write("\n")
i += 1
header.write("} %s;\n\n" % typename)
if "c_enum" in typedef:
header.write(
"const char* %s_to_string(%s e);\n" % (typename, typename)
)
header.write(
"%s string_to_%s(const char *e);\n\n" % (typename, typename)
)
else:
header.write("typedef struct %s\n{\n" % (typename[:-2]))
for name, jtype in typedef["properties"].items():
if name == "format_version":
continue
elif need_size_var(jtype):
var_name = "num_" + name
if name == "key":
var_name = "key_len"
header.write(" size_t %s;\n" % var_name)
header.write(
" %s %s%s;\n" % (pre_type(jtype), name, post_type(jtype))
)
num_settings += 1
header.write("} %s;\n\n" % typename)
header.write("extern const %s %s_default;\n\n" % (top, top[:-2]))
header.write(
"typedef struct {\n"
" char* scope;\n"
" char* type;\n"
" char* description;\n"
" char* default_value;\n"
" char* override_var;\n"
"} %s_setting_t;\n\n" % top[:-2]
)
header.write(
"extern const %s_setting_t %s_settings[%d];\n\n"
% (top[:-2], top[:-2], num_settings)
)
header.write("#endif /* _%s_H_ */" % h)
source_includes = """
#include "shared/oe_compat.h"
#ifdef SGXLKL_ENCLAVE
# include "enclave/enclave_util.h"
# define FAIL sgxlkl_fail
#else
# include "host/sgxlkl_util.h"
# define FAIL sgxlkl_host_fail
#endif
"""
def generate_source(schema_file_name, root, args):
with open(str(args.source), "w") as source:
source.write(
"/* Automatically generated from %s; do not modify. */\n" % schema_file_name
)
source.write(source_includes)
source.write("\n")
source.write('#include "%s"\n\n' % args.header)
top = root["$ref"].rsplit("/")[-1]
# enum conversions
for typename, typedef in root["definitions"].items():
if "enum" in typedef:
if "c_enum" in typedef:
names = typedef["enum"]
c_names = typedef["c_enum"]
if len(names) != len(c_names):
raise Exception(
"ERROR: length of c_enum does not match enum in %s"
% typename
)
source.write(
"const char* %s_to_string(%s e)\n{\n" % (typename, typename)
)
source.write(" switch(e) {\n")
for i in range(len(names)):
name = names[i]
c_name = c_names[i]
source.write(' case %s: return "%s";\n' % (c_name, name))
source.write(' default: return ""; /* Unreachable */\n')
source.write(" }\n")
source.write("}\n\n")
source.write(
"%s string_to_%s(const char *e)\n{\n" % (typename, typename)
)
for i in range(len(names)):
name = names[i]
c_name = c_names[i]
source.write(
' if (strcmp(e, "%s") == 0) return %s;\n' % (name, c_name)
)
source.write(" FAIL(\"unknown enum value '%s'\\n\", e);\n")
source.write(" return %s;\n\n" % (c_names[0]))
source.write("}\n\n")
# default config
source.write("const %s %s_default = {\n" % (top, top[:-2]))
scope = []
def initialize(scope, elem):
indent = " " * (len(scope) + 1)
typedef = root["definitions"][elem]
if "enum" not in typedef:
for name, jtype in typedef["properties"].items():
if name == "format_version":
continue
tname = pre_type(jtype)
if tname.startswith("sgxlkl_") and tname.endswith("*"):
source.write("%s.%s=NULL,\n" % (indent, name))
elif (
tname.startswith("sgxlkl_")
and "enum" not in root["definitions"][tname]
):
tdef = root["definitions"][tname]
if "type" in jtype and jtype["type"] == "array":
if "default" in jtype:
dflt = jtype["default"]
t = "{"
for i in dflt:
if type(i) is OrderedDict:
t += "{"
for k, v in i.items():
t += "." + str(k) + '="' + v + '"'
t += "},"
t += "}"
source.write("%s.%s = %s,\n" % (indent, name, t))
else:
source.write("%s.%s = {0},\n" % (indent, name))
elif "enum" not in tdef:
scope.append(name)
source.write("%s.%s = {\n" % (indent, name))
initialize(scope, tname)
source.write("%s},\n" % indent)
scope = scope[:-1]
else:
scope.append(name)
sname = ".".join(scope)
ctype = pre_type(jtype) + post_type(jtype)
if "default" not in jtype:
raise Exception("ERROR: no default provided for %s" % sname)
dflt = jtype["default"]
if ctype == "bool":
dflt = "true" if dflt else "false"
elif ctype == "char*" or ctype.startswith("char["):
if dflt is None or dflt == []:
dflt = "NULL"
else:
dflt = '"' + dflt + '"'
elif (
tname.startswith("sgxlkl_")
and "enum" in root["definitions"][tname]
):
etype = root["definitions"][tname]
names = etype["enum"]
c_names = etype["c_enum"]
i = 0
for n in names:
if n == dflt:
break
else:
i += 1
if i >= len(c_names):
raise Exception(
"ERROR: missing C enum type name for '%s'" % dflt
)
dflt = c_names[i]
if dflt is None or dflt == [] or dflt == "":
dflt = "NULL"
if need_size_var(jtype):
size_var_name = "num_" + name
if name == "key":
size_var_name = "key_len"
source.write("%s.%s=%s,\n" % (indent, size_var_name, 0))
source.write("%s.%s=%s,\n" % (indent, name, dflt))
scope = scope[:-1]
scope = scope[:-1]
initialize(scope, top)
source.write("};\n\n")
source.write("// clang-format off\n")
source.write(
"const %s_setting_t %s_settings[%d] = {\n"
% (top[:-2], top[:-2], num_settings)
)
scope = []
def describe(scope, elem):
typedef = root["definitions"][elem]
if "enum" not in typedef:
for name, jtype in typedef["properties"].items():
if name == "format_version":
continue
tname = pre_type(jtype)
if tname.endswith("*"):
tname = tname[:-1]
if (
tname.startswith("sgxlkl_")
and "enum" not in root["definitions"][tname]
):
scope.append(name)
describe(scope, tname)
scope = scope[:-1]
else:
scope.append(name)
sname = ".".join(scope)
ctype = pre_type(jtype) + post_type(jtype)
desc = jtype["description"] if "description" in jtype else ""
desc = desc.replace('"', '\\"')
dflt = "NULL"
if "default" in jtype:
dflt = jtype["default"]
if dflt == [] or dflt == None:
dflt = "NULL"
if ctype == "bool":
dflt = "true" if dflt else "false"
override_var = "NULL"
if "overridable" in jtype:
override_var = '"' + jtype["overridable"] + '"'
source.write(
' {"%s", "%s", "%s", "%s", %s},\n'
% (sname, ctype, desc, dflt, override_var)
)
scope = scope[:-1]
scope = scope[:-1]
describe(scope, top)
source.write("};\n")
source.write("// clang-format on\n")
def generate(args):
with open(args.schema_file, "r") as schema_file:
root = json.load(schema_file, object_pairs_hook=OrderedDict)
generate_header(args.schema_file, root, args)
generate_source(args.schema_file, root, args)
def main():
parser = argparse.ArgumentParser(
description="Generator for SGX-LKL configuration sources"
)
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument(
"schema_file", type=Path, help="Schema file path", metavar="PATH"
)
parser.add_argument(
"--header",
type=Path,
help="Header file to generate",
default="sgxlkl_config_gen.h",
)
parser.add_argument(
"--source",
type=Path,
help="Source file to generate",
default="sgxlkl_config_gen.c",
)
parser.set_defaults(func=generate)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 15,010 | 35.791667 | 88 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tools/gdb/sgx-lkl-gdb.py | # To use, add source /path/to/gdb.py to your $HOME/.gdbinit file.
import atexit
import os
import re
import subprocess
import tempfile
import textwrap as tw
# The set of loaded modules.
g_modules = set()
def add_symbol_file(filename, baseaddr):
sections = []
textaddr = "0"
p = subprocess.Popen(["readelf", "-SW", filename], stdout=subprocess.PIPE)
for line in p.stdout.readlines():
line = line.decode("utf-8").strip()
if not line.startswith("[") or line.startswith("[Nr]"):
continue
line = re.sub(r"\[ *(\d+)\]", "\\1", line)
sec = dict(zip(["nr", "name", "type", "addr"], line.split()))
if sec["nr"] == "0":
continue
if sec["name"] == ".text":
textaddr = sec["addr"]
elif int(sec["addr"], 16) != 0:
sections.append(sec)
module_address = int(textaddr, 16) + baseaddr
cmd = "add-symbol-file %s 0x%08x" % (filename, module_address)
for s in sections:
addr = int(s["addr"], 16)
if s["name"] == ".text" or addr == 0:
continue
cmd += " -s %s 0x%x" % (s["name"], int(baseaddr + addr))
gdb.execute(cmd)
global g_modules
g_modules.add(module_address)
# Remove symbol information for loaded modules.
def remove_modules(event):
global g_modules
for module_address in g_modules:
cmd = "remove-symbol-file -a 0x%08x" % (module_address)
gdb.execute(cmd, from_tty=False, to_string=True)
g_modules.clear()
class StarterExecBreakpoint(gdb.Breakpoint):
STARTER_HAS_LOADED = "__gdb_hook_starter_ready"
def __init__(self):
super(StarterExecBreakpoint, self).__init__(
self.STARTER_HAS_LOADED, internal=True
)
self.inited = False
def stop(self):
base_addr = gdb.parse_and_eval("base_addr")
in_hw_mode = gdb.parse_and_eval("mode != SW_DEBUG_MODE")
if in_hw_mode:
gdb.write("Running on hardware... skipping simulation load.\n")
else:
libsgxlkl = gdb.execute('printf "%s", libsgxlkl_path', to_string=True)
gdb.write(
"Loading symbols for %s at base 0x%x...\n" % (libsgxlkl, int(base_addr))
)
add_symbol_file(libsgxlkl, int(base_addr))
load_flag = gdb.lookup_global_symbol("__gdb_load_debug_symbols_alive")
if load_flag and not load_flag.value():
gdb.write("Enabled loading in-enclave debug symbols\n")
gdb.execute("set __gdb_load_debug_symbols_alive = 1")
if not self.inited:
self.inited = True
LoadLibraryBreakpoint()
LoadLibraryFromFileBreakpoint()
return False
class LoadLibraryBreakpoint(gdb.Breakpoint):
LDSO_LOAD_LIBRARY = "__gdb_hook_load_debug_symbols"
def __init__(self):
super(LoadLibraryBreakpoint, self).__init__(
self.LDSO_LOAD_LIBRARY, internal=True
)
def stop(self):
# dump symbols out to disk
uintptr_t = gdb.lookup_type("uintptr_t")
ssize_t = gdb.lookup_type("ssize_t")
dso = "((struct dso*)$rdi)"
symmem = "$rsi"
symsz = "$rdx"
mem_loc = int(gdb.parse_and_eval(symmem).cast(uintptr_t))
mem_sz = int(gdb.parse_and_eval(symsz).cast(ssize_t))
memvw = gdb.selected_inferior().read_memory(mem_loc, mem_sz)
# work out where new library is loaded
base_addr = int(gdb.parse_and_eval(dso + "->base").cast(uintptr_t))
fn = None
with tempfile.NamedTemporaryFile(suffix=".so", delete=False) as f:
f.write(memvw)
fn = f.name
gdb.write("Loading symbols at base 0x%x...\n" % (int(base_addr)))
add_symbol_file(fn, int(base_addr))
atexit.register(os.unlink, fn)
return False
class LoadLibraryFromFileBreakpoint(gdb.Breakpoint):
LDSO_LOAD_LIBRARY_FROM_FILE = "__gdb_hook_load_debug_symbols_from_file"
def __init__(self):
super(LoadLibraryFromFileBreakpoint, self).__init__(
self.LDSO_LOAD_LIBRARY_FROM_FILE, internal=True
)
def stop(self):
uintptr_t = gdb.lookup_type("uintptr_t")
libpath = gdb.execute('printf "%s", libpath', to_string=True)
base_addr = int(gdb.parse_and_eval("dso->base").cast(uintptr_t))
gdb.write("Loading symbols at base 0x%x...\n" % (int(base_addr)))
add_symbol_file(libpath, int(base_addr))
return False
class LthreadBacktrace(gdb.Command):
"""
Print backtrace for an lthread
Param 1: Address of lthread
Param 2: Backtrace depth (optional)
"""
def __init__(self):
super(LthreadBacktrace, self).__init__("lthread-bt", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if not argv:
gdb.write(
"No lthread address provided. Usage: lthread-bt <addr> [<btdepth>]\n"
)
gdb.flush()
return False
lt_addr = argv[0]
if len(argv) > 1:
btdepth = argv[1]
else:
btdepth = ""
old_fp = gdb.execute("p/x $rbp", to_string=True).split("=")[1].strip()
old_sp = gdb.execute("p/x $rsp", to_string=True).split("=")[1].strip()
old_ip = gdb.execute("p/x $rip", to_string=True).split("=")[1].strip()
gdb.execute("set $rbp = ((struct lthread *)%s)->ctx.ebp" % lt_addr)
gdb.execute("set $rsp = ((struct lthread *)%s)->ctx.esp" % lt_addr)
gdb.execute("set $rip = ((struct lthread *)%s)->ctx.eip" % lt_addr)
gdb.execute("bt %s" % btdepth)
# Restore registers
gdb.execute("set $rbp = %s" % old_fp)
gdb.execute("set $rsp = %s" % old_sp)
gdb.execute("set $rip = %s" % old_ip)
return False
class LthreadStats(gdb.Command):
"""
Prints the number of lthreads in the futex, scheduler, and syscall queues.
"""
def __init__(self):
super(LthreadStats, self).__init__("lthread-stats", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
schedq_lts = 0
syscall_req_lts = 0
syscall_ret_lts = 0
fxq_lts = 0
schedq_lts = self.count_queue_elements("__scheduler_queue")
syscall_req_lts = self.count_queue_elements("__syscall_queue")
syscall_ret_lts = self.count_queue_elements("__return_queue")
fxq = (
gdb.execute("p/x futex_queues->slh_first", to_string=True)
.split("=")[1]
.strip()
)
while int(fxq, 16) != 0:
fxq_lts = fxq_lts + 1
fxq = (
gdb.execute(
"p/x ((struct futex_q*)%s)->entries.sle_next" % fxq, to_string=True
)
.split("=")[1]
.strip()
)
waiting_total = schedq_lts + syscall_req_lts + syscall_ret_lts + fxq_lts
gdb.write("Waiting lthreads:\n")
gdb.write(" scheduler queue: %s\n" % schedq_lts)
gdb.write(" syscall request queue: %s\n" % syscall_req_lts)
gdb.write(" syscall return queue: %s\n" % syscall_ret_lts)
gdb.write(" waiting for futex: %s\n" % fxq_lts)
gdb.write(" Total: %s\n" % waiting_total)
gdb.flush()
return False
def count_queue_elements(self, queue):
enqueue_pos = int(
gdb.execute("p %s->enqueue_pos" % queue, to_string=True)
.split("=")[1]
.strip()
)
dequeue_pos = int(
gdb.execute("p %s->dequeue_pos" % queue, to_string=True)
.split("=")[1]
.strip()
)
return enqueue_pos - dequeue_pos
class LogAllLts(gdb.Command):
"""
Do a backtrace of all active lthreads.
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogAllLts, self).__init__("bt-lts", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
ltq = gdb.execute("p/x __active_lthreads", to_string=True).split("=")[1].strip()
no = 1
while int(ltq, 16) != 0:
lt = (
gdb.execute("p/x ((struct lthread_queue*)%s)->lt" % ltq, to_string=True)
.split("=")[1]
.strip()
)
lt_tid = (
gdb.execute(
"p/d ((struct lthread_queue*)%s)->lt->tid" % ltq, to_string=True
)
.split("=")[1]
.strip()
)
lt_name = (
gdb.execute(
"p/s ((struct lthread_queue*)%s)->lt->funcname" % ltq,
to_string=True,
)
.split("=")[1]
.strip()
.split(",")[0]
)
gdb.write(
"#%3d Lthread: TID: %3s, Addr: %s, Name: %s\n"
% (no, lt_tid, lt, lt_name)
)
gdb.execute("lthread-bt %s %s" % (lt, btdepth))
gdb.write("\n")
gdb.flush()
ltq = (
gdb.execute(
"p/x ((struct lthread_queue*)%s)->next" % ltq, to_string=True
)
.split("=")[1]
.strip()
)
no = no + 1
return False
class LogFxWaiters(gdb.Command):
"""
Do a backtrace of all lthreads waiting on a futex
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogFxWaiters, self).__init__("bt-fxq", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
fxq = (
gdb.execute("p/x futex_queues->slh_first", to_string=True)
.split("=")[1]
.strip()
)
while int(fxq, 16) != 0:
ft_lt = (
gdb.execute("p/x ((struct futex_q*)%s)->futex_lt" % fxq, to_string=True)
.split("=")[1]
.strip()
)
ft_key = (
gdb.execute("p ((struct futex_q*)%s)->futex_key" % fxq, to_string=True)
.split("=")[1]
.strip()
)
ft_deadline = (
gdb.execute(
"p ((struct futex_q*)%s)->futex_deadline" % fxq, to_string=True
)
.split("=")[1]
.strip()
)
gdb.write(
"FX entry: key: %s, lt: %s, deadline: %s\n"
% (ft_key, ft_lt, ft_deadline)
)
gdb.execute("lthread-bt %s %s" % (ft_lt, btdepth))
gdb.write("\n")
gdb.flush()
fxq = (
gdb.execute(
"p/x ((struct futex_q*)%s)->entries.sle_next" % fxq, to_string=True
)
.split("=")[1]
.strip()
)
return False
class LogSchedQueueTids(gdb.Command):
"""
Print thread id of each lthread in scheduler queue.
"""
def __init__(self):
super(LogSchedQueueTids, self).__init__("schedq-tids", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
enqueue_pos = int(
gdb.execute("p __scheduler_queue->enqueue_pos", to_string=True)
.split("=")[1]
.strip()
)
dequeue_pos = int(
gdb.execute("p __scheduler_queue->dequeue_pos", to_string=True)
.split("=")[1]
.strip()
)
if enqueue_pos < dequeue_pos:
raise Exception("Logic error: %d < %d" % (enqueue_pos, dequeue_pos))
buffer_mask = int(
gdb.execute("p __scheduler_queue->buffer_mask", to_string=True)
.split("=")[1]
.strip()
)
tids = []
for i in range(dequeue_pos, enqueue_pos):
gdb.write(
"p ((struct lthread*)__scheduler_queue->buffer[%d & %d].data)->tid\n"
% (i, buffer_mask)
)
tid = int(
gdb.execute(
"p ((struct lthread*)__scheduler_queue->buffer[%d & %d].data)->tid"
% (i, buffer_mask),
to_string=True,
)
.split("=")[1]
.strip()
)
tids.append(tid)
gdb.write("\nScheduler queue lthreads:\n" + tw.fill(str(tids)) + "\n")
gdb.flush()
class LogSyscallBacktraces(gdb.Command):
"""
Print backtraces for all lthreads waiting in the syscall queues.
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogSyscallBacktraces, self).__init__("bt-syscallqueues", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
gdb.write("Lthreads in system call request queue:\n")
self.print_bts_for_queue("__syscall_queue", btdepth)
gdb.write("\nLthreads in system call return queue:\n")
self.print_bts_for_queue("__return_queue", btdepth)
return False
def print_bts_for_queue(self, queue, btdepth):
enqueue_pos = int(
gdb.execute("p %s->enqueue_pos" % queue, to_string=True)
.split("=")[1]
.strip()
)
dequeue_pos = int(
gdb.execute("p %s->dequeue_pos" % queue, to_string=True)
.split("=")[1]
.strip()
)
if enqueue_pos < dequeue_pos:
raise Exception("Logic error: %d < %d" % (enqueue_pos, dequeue_pos))
buffer_mask = int(
gdb.execute("p %s->buffer_mask" % queue, to_string=True)
.split("=")[1]
.strip()
)
for i in range(dequeue_pos, enqueue_pos):
lt = (
gdb.execute(
"p/x slotlthreads[%s->buffer[%d & %d].data]"
% (queue, i, buffer_mask),
to_string=True,
)
.split("=")[1]
.strip()
)
if lt != "0x0":
tid = int(
gdb.execute("p ((struct lthread*)%s)->tid" % lt, to_string=True)
.split("=")[1]
.strip()
)
gdb.write("Lthread [tid=%d]\n" % tid)
gdb.execute("lthread-bt %s %s" % (lt, btdepth))
gdb.write("\n")
else:
gdb.write("Queue entry without associated lthread...\n")
gdb.flush()
class LogSyscallTids(gdb.Command):
"""
Print tids of lthreads in syscall and return queues.
"""
def __init__(self):
super(LogSyscallTids, self).__init__("syscall-tids", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
gdb.write("\nSlot tids:\n" + tw.fill(str(self.slot_tids())))
gdb.write("\nSlot syscallnos:\n" + tw.fill(str(self.syscall_nos())))
gdb.write("\nSyscall tids:\n" + tw.fill(str(self.queue_tids("syscall"))))
gdb.write("\nReturn tids:\n" + tw.fill(str(self.queue_tids("return"))))
gdb.flush()
def slot_tids(self):
maxsyscalls = int(
gdb.execute("p maxsyscalls", to_string=True).split("=")[1].strip()
)
slot_tids = {}
for i in range(0, maxsyscalls):
if (
int(
gdb.execute("p (int)slotlthreads[%d]" % i, to_string=True)
.split("=")[1]
.strip()
)
!= 0
):
tid = int(
gdb.execute("p slotlthreads[%d]->tid" % i, to_string=True)
.split("=")[1]
.strip()
)
slot_tids[i] = tid
return slot_tids
def queue_tids(self, queue):
enqueue_pos = int(
gdb.execute("p __%s_queue->enqueue_pos" % queue, to_string=True)
.split("=")[1]
.strip()
)
dequeue_pos = int(
gdb.execute("p __%s_queue->dequeue_pos" % queue, to_string=True)
.split("=")[1]
.strip()
)
if enqueue_pos < dequeue_pos:
raise Exception("Logic error: %d < %d" % (enqueue_pos, dequeue_pos))
buffer_mask = int(
gdb.execute("p __%s_queue->buffer_mask" % queue, to_string=True)
.split("=")[1]
.strip()
)
tids = []
for i in range(dequeue_pos, enqueue_pos):
slot = int(
gdb.execute(
"p ((int)__%s_queue->buffer[%d & %d].data)"
% (queue, i, buffer_mask),
to_string=True,
)
.split("=")[1]
.strip()
)
if (
int(
gdb.execute("p (int)slotlthreads[%d]" % slot, to_string=True)
.split("=")[1]
.strip()
)
!= 0
):
tid = int(
gdb.execute("p slotlthreads[%d]->tid" % slot, to_string=True)
.split("=")[1]
.strip()
)
tids.append(tid)
else:
gdb.write(
"\nNo lthread found for queue slot %d in slotlthreads\n" % slot
)
return tids
def syscall_nos(self):
maxsyscalls = int(
gdb.execute("p maxsyscalls", to_string=True).split("=")[1].strip()
)
slot_syscallnos = {}
for i in range(0, maxsyscalls):
if (
int(
gdb.execute("p (int)slotlthreads[%d]" % i, to_string=True)
.split("=")[1]
.strip()
)
!= 0
):
sno = int(
gdb.execute("p S[%d].syscallno" % i, to_string=True)
.split("=")[1]
.strip()
)
slot_syscallnos[i] = sno
return slot_syscallnos
if __name__ == "__main__":
StarterExecBreakpoint()
LthreadBacktrace()
LthreadStats()
LogAllLts()
LogFxWaiters()
LogSchedQueueTids()
LogSyscallBacktraces()
LogSyscallTids()
gdb.events.exited.connect(remove_modules)
| 18,949 | 29.713128 | 88 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tools/gdb/gdbcommands.py | # Copyright (c) Open Enclave SDK contributors.
# Licensed under the MIT License.
import sys
# Execute given command.
def gdb_execute(command):
output = gdb.execute(command, from_tty=False, to_string=True)
if output:
print(output)
gdb.execute("refresh")
class LKLFinishBreakpoint(gdb.FinishBreakpoint):
"""
Replacement for FinishBreakpoint that works with the
LKL thread scheduler.
"""
def __init__(self, frame):
super(LKLFinishBreakpoint, self).__init__(frame, internal=True)
# Set the breakpoint as having no specific thread so
# that it is robust to LKL thread switching.
self.thread = None
# Don't display messages when hit.
self.silent = True
# Correct hit count
self.correct_hit_count = 0
# Even if the LThread is tranferred between different EThreads,
# the stack frames and hence RSP will be preserved. Thus the
# caller RSP acts as a good unique identifier for this
# finish breakpoint.
self.caller_rsp = frame.older().read_register("rsp")
# For host/enclave transition boundary, this seems to be needed.
self.caller_frame_id = str(frame.older())
def stop(self):
try:
# Stop only if we have returned back to the caller.
# If another thread hits this breakpoint, its caller RSP
# will be different.
frame = gdb.newest_frame()
current_rsp = frame.read_register("rsp")
current_frame_id = str(frame)
if (
current_rsp == self.caller_rsp
or current_frame_id == self.caller_frame_id
):
# TODO: Better return value printing.
print("Value returned is " + str(self.return_value))
self.correct_hit_count = 1
return True
except:
pass
return False
class LKLFinish(gdb.Command):
"""
A drop in replacement for GDB's 'finish' command that works
with the LKL thread scheduler.
"""
def __init__(self):
# Override the 'finish' command. Change this to 'lkl-finish' if you
# want to retain the original implementation.
command = "finish"
print(
"Overriding 'finish' with LKL compatible 'lkl-finish'. "
"finish will now work with LKL."
)
super(LKLFinish, self).__init__(command, gdb.COMMAND_USER)
@staticmethod
def do_finish(frame):
# When function are inlined, but have debud info,
# both the current and older frames have the same pc.
# Skip over all such frames and set the breakpoint.
while frame.pc() == frame.older().pc():
frame = frame.older()
# TODO: We also need to skip this sequence too.
# Document this.
caller = frame.older()
while caller.older() and caller.older().pc() == caller.pc():
frame = caller
caller = frame.older()
# After having figured out correct frame, set LKL compatible
# breakpoint.
bp = LKLFinishBreakpoint(frame)
# Continue execution
gdb_execute("continue")
# Check if we stopped due to finish breakpoint being hit
# or due to some other reason
hit = bp.correct_hit_count > 0
if bp.is_valid():
bp.delete()
return hit
def print_advice(self):
print(
"lkl-finish could not determine what to do. "
"It is recommended that you manually place a breakpoint "
"and continue at this time."
)
def invoke(self, arg, from_tty):
try:
# Fetch the current frame
current_frame = gdb.newest_frame()
if not current_frame:
self.print_advice()
return
LKLFinish.do_finish(current_frame)
except:
_, ex, _ = sys.exc_info()
print(ex)
self.print_advice()
class LKLBreakpoint(gdb.Breakpoint):
"""
Thread-specific breakpoint that works LKL thread scheduler.
"""
def __init__(self, where, frame):
super(LKLBreakpoint, self).__init__(where, internal=True)
# Set the breakpoint as having no specific thread so
# that it is robust to LKL context switching.
self.thread = None
# Don't display messages when hit.
self.silent = True
# Correct hit count
self.correct_hit_count = 0
# Even if the LThread is tranferred between different EThreads,
# the stack frames and hence RSP will be preserved. Thus the
# caller RSP acts as a good unique identifier for this
# finish breakpoint.
self.frame_id = str(frame)
# Use the caller as a secondary id
caller_frame = frame.older()
self.caller_frame_id = str(caller_frame) if caller_frame else None
def stop(self):
try:
# Stop only if we have returned back to the caller.
# If another thread hits this breakpoint, its caller RSP
# will be different.
frame = gdb.newest_frame()
frame_id = str(frame)
caller_frame = frame.older()
caller_frame_id = str(caller_frame) if caller_frame else None
if frame_id == self.frame_id or caller_frame_id == self.caller_frame_id:
self.correct_hit_count = 1
return True
except:
pass
return False
class LKLNext(gdb.Command):
"""
A drop in replacement for GDB's 'next' command that works
with the LKL thread scheduler.
"""
def __init__(self):
# Override the 'next' command. Change this to 'lkl-next' if you
# want to retain the original implementation.
command = "next"
print(
"Overriding 'next' with LKL compatible 'lkl-next'. "
"next and n will now work with LKL."
)
super(LKLNext, self).__init__(command, gdb.COMMAND_USER)
def current_frame(self):
try:
frame = gdb.newest_frame()
return frame
except:
print("lkl-next could not determine current frame.")
return None
def current_sal(self):
try:
# In some cases, the object returned by frame.find_sal()
# has empty pc and last. Whereas the object returned by
# gdb.find_pc_line(frame.pc()) has accurate pc and last.
# Weird.
# E.g: arch/lkl/kernel/syscalls.c:122:
# ret = run_syscall(no, parames)
# Reproduction:
# $make run-hw-gdb # helloworld sample
# (gdb) b syscalls.c:120
# (gdb) n
# (gdb) python print(gdb.newest_frame().find_sal().pc)
# 0 <-------- incorrect
# (gdb) python print(gdb.find_pc_line(gdb.newest_frame().pc()).pc)
# 140733194943702 <-------- correct
# The above issue happens on GDB 8.3, but does not happen on
# GDB 8.1.
frame = self.current_frame()
sal = gdb.find_pc_line(frame.pc())
return sal
except:
return None
def print_advice(self):
print(
"lkl-next could not determine what to do. "
"It is recommended that you manually place a breakpoint "
"and continue at this time."
)
def intelligent_step(self, frame, sal):
# Disassemble instructions at current pc.
asm = frame.architecture().disassemble(start_pc=frame.pc(), end_pc=sal.last)
# We want to put a breakpoint and then 'continue' execution
# till that breakpoint. It is safe to 'continue' execution
# till we hit a branch or return instruction. Even calls are ok
# since we will return from the call.
# We don't know where branches will jump to; therefore we search
# for jumps (all start with 'j') or ret instruction.
bp_addr = None
for a in asm[1:]:
ins = a["asm"]
if ins.startswith("j") or ins.startswith("ret"):
bp_addr = a["addr"]
break
# gcc 8 and above generate endbr64 as the first instruction in
# a function. An address breakpoint set immediately after it does
# not work. It is better to do a step.
# E.g: b lkl_poststart_net
if len(asm) == 1 and asm[0]["asm"].startswith("endbr64"):
return False
# Check if the current source line has a jump or return.
if bp_addr:
# If yes, set breakpoint. Handle case where the first
# instruction itself is a jump. In that case, we will
# step.
if bp_addr == frame.pc():
return False
bp = LKLBreakpoint("*" + hex(a["addr"]), frame)
else:
# The source line does not have branches or returns.
# Set breakpoint at beyond the last instruction.
last_insn = asm[-1]
location = last_insn["addr"] + last_insn["length"]
bp = LKLBreakpoint("*" + hex(location), frame)
# Continue execution till the breakpoint. But we could
# stop due to some other reason (another breakpoint or exception)
# before our breakpoint is hit.
interrupted = True
if bp:
gdb_execute("continue")
if bp.is_valid():
# If the breakpoint's hit count is zero, then we stopped
# due to some other reason.
interrupted = bp.correct_hit_count == 0
bp.delete()
return interrupted
return interrupted
def invoke(self, arg, from_tty):
try:
# Fetch the current frame
start_frame = self.current_frame()
if not start_frame:
self.print_advice()
return
# Fetch the symbol and line. We will keep stepping until
# the current line number changes
start_sal = self.current_sal()
if not start_sal:
self.print_advice()
return
# Intelligently do the first step
# In lines of code without branches, we will be done
# after the intelligent step
done = self.intelligent_step(start_frame, start_sal)
# TODO: See if we can avoid stepping and use only
# breakpoints and continue.
while not done:
# Check if the current location has a frame
cur_frame = self.current_frame()
if not cur_frame:
gdb_execute("step")
continue
# Check if the current location has line information.
cur_sal = self.current_sal()
if not cur_sal:
gdb_execute("step")
continue
# If we are still in the starting line, step again.
if cur_sal.line == start_sal.line:
gdb_execute("step")
continue
# The line number is different.
# Line number will change when
# a) we step over a line
# b) if we have returned from the curernt function
# c) if we have stepped into another function.
# There is nothing to be done for (a) and (b).
# For (c), we need to return to the caller via
# the equivalent of a finish command.
if cur_frame.older() == start_frame:
finished = LKLFinish.do_finish(cur_frame)
if finished:
continue
else:
# Another breakpoint was hit, quit stepping
return
else:
# We have stepped over a line or returned from the
# current function
break
except:
# Cannot reliably do a 'next'
_, ex, _ = sys.exc_info()
print(ex)
self.print_advice()
class LKLNexti(gdb.Command):
"""
A drop in replacement for GDB's 'nexti' command that works
with the LKL thread scheduler.
"""
def __init__(self):
# Override the 'nexti' command. Change this to 'lkl-nexti' if you
# want to retain the original implementation.
command = "nexti"
print(
"Overriding 'nexti' with LKL compatible 'lkl-nexti'. "
"nexti and ni will now work with LKL."
)
super(LKLNexti, self).__init__(command, gdb.COMMAND_USER)
def print_advice(self):
print(
"lkl-nexti could not determine what to do. "
"It is recommended that you manually place a breakpoint "
"and continue at this time."
)
def invoke(self, arg, from_tty):
try:
# Disassemble two instructions from current pc.
frame = gdb.newest_frame()
asm = frame.architecture().disassemble(start_pc=frame.pc(), count=2)
# Determine if the current instruction is a call.
# If false, invoke stepi.
curr_ins = asm[0]
if curr_ins["asm"].find("call") == -1:
gdb.execute("stepi")
return
# Special case: the very last instruction is a call. Invoke stepi.
# Note that this should be a rare case.
if len(asm) < 2:
gdb.execute("stepi")
return
# If true, set a break point at the next instruction and continue.
next_ins = asm[1]
bp = LKLBreakpoint("*" + hex(next_ins["addr"]), frame)
if bp:
gdb.execute("continue")
if bp.is_valid():
bp.delete()
# Inovke the display to be consistent with normal behavior of ni.
gdb.execute("display")
except:
# Cannot reliably do a 'nexti'
_, ex, _ = sys.exc_info()
print(ex)
self.print_advice()
def register():
LKLFinish()
LKLNext()
LKLNexti()
if __name__ == "__main__":
register()
| 14,512 | 34.225728 | 85 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/scripts/generate_syscall_remap.py | #!/usr/bin/python3
# This script is used to generate the fixup table mapping x86-64 syscalls
# onto their LKL equivalents.
#
# The output is part of src/misc/syscall.c in sgx-musl-lkl
LKL_UNISTD_PATH = "../lkl/tools/lkl/include/lkl/asm-generic/unistd.h"
NATIVE_TBL_PATH = "../lkl/arch/x86/entry/syscalls/syscall_64.tbl"
class Syscall:
def __init__(self, name):
self.name = name
self.lkl_num = None
self.native_num = None
def __repr__(self):
return "<{}: lkl{}, native{}>".format(self.name, self.lkl_num, self.native_num)
def parse_table(f, syscall_tab, attr):
for ln in f:
ln = ln.strip()
if ln.startswith("#") or len(ln) == 0:
continue
num, abi, name, *extra = ln.split()
if not abi == "common" and not abi == "64":
continue
num = int(num)
if name not in syscall_tab:
syscall_tab[name] = Syscall(name)
setattr(syscall_tab[name], attr, num)
def parse_unistd(f, syscall_tab, attr):
for ln in f:
for prefix in ("#define __lkl__NR_", "#define __lkl__NR3264_"):
if not ln.startswith(prefix):
continue
name, _, num = ln[len(prefix) :].strip().partition(" ")
if name == "syscalls":
# this is the syscall count!
continue
try:
num = int(num)
except ValueError:
# we've probably reached the end...
break
if name not in syscall_tab:
syscall_tab[name] = Syscall(name)
setattr(syscall_tab[name], attr, num)
for prefix in ("__LKL__SC_3264(__lkl__NR3264_",):
if not ln.startswith(prefix):
continue
name, _, new_name = ln[len(prefix) :].strip().rstrip(")").split(", sys_")
num = getattr(syscall_tab[name], attr)
if new_name not in syscall_tab:
syscall_tab[new_name] = Syscall(new_name)
setattr(syscall_tab[new_name], attr, num)
syscall_tab = {}
with open(LKL_UNISTD_PATH, "r") as f:
parse_unistd(f, syscall_tab, "lkl_num")
with open(NATIVE_TBL_PATH, "r") as f:
parse_table(f, syscall_tab, "native_num")
syscall_nums = [
f
for f in syscall_tab.values()
if f.native_num is not None and f.lkl_num is not None
]
syscall_nums.sort(key=lambda f: f.native_num)
print("static const short syscall_remap_len = {};".format(syscall_nums[-1].native_num))
print("static const short syscall_remap[] = {")
x = 0
for n in range(syscall_nums[-1].native_num + 1):
if syscall_nums[x].native_num != n:
print("\t-1, /* not implemented in x86-64 */")
continue
e = syscall_nums[x]
print("\t{}, /* {} - x86-64 syscall: {} */".format(e.lkl_num, e.name, e.native_num))
x += 1
print("};")
| 2,855 | 31.827586 | 88 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tests/tools/sgx-lkl-cfg/create/src/app.py | import os
import sys
# Check metadata originally coming from the Dockerfile.
cwd = os.getcwd()
assert cwd == "/src", cwd
name = sys.argv[1]
assert name == "John", name
# TODO uncomment once https://github.com/lsds/sgx-lkl/issues/207 is fixed
# greeting = os.environ["GREETING"]
# assert greeting == 'Hello', greeting
greeting = "Hello"
print(f"{greeting} {name}!")
# Check naming of extra disk.
assert os.path.exists("/data_1")
| 433 | 20.7 | 73 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tests/tools/sgx-lkl-docker/app/python-helloworld.py | import os
print("Hello world!")
# Check that second disk was embedded in Docker image.
assert os.path.exists("/data/app/python-helloworld.py")
| 145 | 19.857143 | 55 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tests/languages/python/app/python-helloworld.py | import numpy as np
print("Confidential Computing using Intel SGX in Python with NumPy... ")
print(np.arange(10000).reshape(100, 100))
print("TEST_PASSED")
| 157 | 21.571429 | 72 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tests/containers/cc/app/python-helloworld.py | import requests
print("Hello world!")
response = requests.get("https://www.microsoft.com/", timeout=5)
response.raise_for_status()
print("Network test successful!")
| 167 | 20 | 64 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/tests/virtio/python_read/app/keyboard_read.py | def input_loop():
line = ""
while line != "stop":
line = input('Prompt ("stop" to quit): ')
print("User input is : %s" % line)
# Prompt the user for text
input_loop()
| 193 | 18.4 | 49 | py |