prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
| T.RandomHorizontalFlip() | megengine.data.transform.RandomHorizontalFlip |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
| T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4) | megengine.data.transform.ColorJitter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
| T.ToMode("CHW") | megengine.data.transform.ToMode |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = F.concat(return_bbox_targets, axis=0)
return_rois = return_rois.detach()
return_labels = return_labels.detach()
return_bbox_targets = return_bbox_targets.detach()
return return_rois, return_labels, return_bbox_targets
# rois, labels, bbox_targets = return_rois.detach(), return_labels.detach(), return_bbox_targets.detach()
# return rois, labels, bbox_targets
# return F.zero_grad(return_rois), F.zero_grad(return_labels), F.zero_grad(return_bbox_targets)
def _bernoulli_sample_masks(masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = | F.equal(masks, sample_value) | megengine.functional.equal |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = F.concat(return_bbox_targets, axis=0)
return_rois = return_rois.detach()
return_labels = return_labels.detach()
return_bbox_targets = return_bbox_targets.detach()
return return_rois, return_labels, return_bbox_targets
# rois, labels, bbox_targets = return_rois.detach(), return_labels.detach(), return_bbox_targets.detach()
# return rois, labels, bbox_targets
# return F.zero_grad(return_rois), F.zero_grad(return_labels), F.zero_grad(return_bbox_targets)
def _bernoulli_sample_masks(masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = F.equal(masks, sample_value)
num_mask = sample_mask.sum()
num_final_samples = | F.minimum(num_mask, num_samples) | megengine.functional.minimum |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = F.concat(return_bbox_targets, axis=0)
return_rois = return_rois.detach()
return_labels = return_labels.detach()
return_bbox_targets = return_bbox_targets.detach()
return return_rois, return_labels, return_bbox_targets
# rois, labels, bbox_targets = return_rois.detach(), return_labels.detach(), return_bbox_targets.detach()
# return rois, labels, bbox_targets
# return F.zero_grad(return_rois), F.zero_grad(return_labels), F.zero_grad(return_bbox_targets)
def _bernoulli_sample_masks(masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = F.equal(masks, sample_value)
num_mask = sample_mask.sum()
num_final_samples = F.minimum(num_mask, num_samples)
# here, we use the bernoulli probability to sample the anchors
sample_prob = num_final_samples / num_mask
# uniform_rng = rand.uniform(sample_mask.shapeof()[0])
uniform_rng = | rand.uniform(0, 1, sample_mask.shape) | megengine.random.uniform |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = | F.ones([1, gt_boxes_perimg.shape[1]]) | megengine.functional.ones |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = | F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = | F.cond_take(batch_rois_mask, batch_rois_mask) | megengine.functional.cond_take |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = | F.concat([gt_boxes_perimg, dummy_gt],axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = | F.argsort(overlaps_normal, descending=True) | megengine.functional.argsort |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = | F.gather(overlaps_normal, 1, overlaps_normal_indices) | megengine.functional.gather |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = | F.gather(overlaps_ignore, 1, overlaps_ignore_indices) | megengine.functional.gather |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = | F.cond_take(keep_mask > 0, keep_mask) | megengine.functional.cond_take |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = | F.concat(return_rois, axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = | F.concat(return_labels, axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = | F.concat(return_bbox_targets, axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = | F.ones((gt_boxes_perimg.shape[0], 1)) | megengine.functional.ones |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = | F.equal(rpn_rois[:, 0], bid) | megengine.functional.equal |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= | F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else | F.equal(fg_mask[:, 0], 0) | megengine.functional.equal |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else | F.equal(bg_mask[:, 0], 0) | megengine.functional.equal |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - | F.equal(labels, config.ignore_label) | megengine.functional.equal |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to( | F.expand_dims(rois, 1) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = | mge.tensor(config.bbox_normalize_stds[None, :]) | megengine.tensor |
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = | mge.tensor(config.bbox_normalize_means[None, :]) | megengine.tensor |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return | T.Compose(transforms=transforms, order=["image", "image_category"]) | megengine.data.transform.Compose |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return T.Compose(transforms=transforms, order=["image", "image_category"])
class ToColorSpace(T.VisionTransform):
"""Transform to transfer color space.
Args:
color_space: color space, supports ``"BGR"``, ``"RGB"`` and ``"GRAY"``.
"""
def __init__(self, color_space: str, *, order: Sequence = None):
super().__init__(order)
if color_space not in ("BGR", "RGB", "GRAY"):
raise ValueError(f"Color space '{color_space}' not supported")
self.color_space = color_space
def _apply_image(self, image: np.ndarray) -> np.ndarray:
if self.color_space == "BGR":
return image
elif self.color_space == "RGB":
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.color_space == "GRAY":
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
else:
raise ValueError(f"Color space '{self.color_space}' not supported")
@registers.augments.register()
class SimpleAugment:
"""Simple augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return | T.PseudoTransform() | megengine.data.transform.PseudoTransform |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
| T.RandomHorizontalFlip() | megengine.data.transform.RandomHorizontalFlip |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
| T.ToMode() | megengine.data.transform.ToMode |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
| T.CenterCrop(cfg.test.img_size) | megengine.data.transform.CenterCrop |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
| T.ToMode() | megengine.data.transform.ToMode |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return T.Compose(transforms=transforms, order=["image", "image_category"])
class ToColorSpace(T.VisionTransform):
"""Transform to transfer color space.
Args:
color_space: color space, supports ``"BGR"``, ``"RGB"`` and ``"GRAY"``.
"""
def __init__(self, color_space: str, *, order: Sequence = None):
super().__init__(order)
if color_space not in ("BGR", "RGB", "GRAY"):
raise ValueError(f"Color space '{color_space}' not supported")
self.color_space = color_space
def _apply_image(self, image: np.ndarray) -> np.ndarray:
if self.color_space == "BGR":
return image
elif self.color_space == "RGB":
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.color_space == "GRAY":
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
else:
raise ValueError(f"Color space '{self.color_space}' not supported")
@registers.augments.register()
class SimpleAugment:
"""Simple augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.PseudoTransform()
@registers.augments.register()
class ColorAugment:
"""Color augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
aug_args = cfg.augments.color_aug.to_dict()
lighting_scale = aug_args.pop("lighting")
return T.Compose([ | T.ColorJitter(**aug_args) | megengine.data.transform.ColorJitter |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return T.Compose(transforms=transforms, order=["image", "image_category"])
class ToColorSpace(T.VisionTransform):
"""Transform to transfer color space.
Args:
color_space: color space, supports ``"BGR"``, ``"RGB"`` and ``"GRAY"``.
"""
def __init__(self, color_space: str, *, order: Sequence = None):
super().__init__(order)
if color_space not in ("BGR", "RGB", "GRAY"):
raise ValueError(f"Color space '{color_space}' not supported")
self.color_space = color_space
def _apply_image(self, image: np.ndarray) -> np.ndarray:
if self.color_space == "BGR":
return image
elif self.color_space == "RGB":
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.color_space == "GRAY":
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
else:
raise ValueError(f"Color space '{self.color_space}' not supported")
@registers.augments.register()
class SimpleAugment:
"""Simple augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.PseudoTransform()
@registers.augments.register()
class ColorAugment:
"""Color augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
aug_args = cfg.augments.color_aug.to_dict()
lighting_scale = aug_args.pop("lighting")
return T.Compose([T.ColorJitter(**aug_args), | T.Lighting(lighting_scale) | megengine.data.transform.Lighting |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Implementation of Base GAN models.
"""
import megengine
import megengine.functional as F
import megengine.module as M
import megengine.random as R
import numpy as np
from . import losses
from .basemodel import BaseModel
class BaseGenerator(BaseModel):
r"""
Base class for a generic unconditional generator model.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, loss_type, **kwargs):
super().__init__(**kwargs)
self.nz = nz
self.ngf = ngf
self.bottom_width = bottom_width
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netD=None,
optG=None):
# Produce fake images
fake_images = self._infer_step_implementation(real_batch)
# Compute output logit of D thinking image real
output = netD(fake_images)
# Compute loss
errG = self.compute_gan_loss(output=output)
optG.zero_grad()
optG.backward(errG)
optG.step()
return errG
def _infer_step_implementation(self, batch):
# Get only batch size from real batch
batch_size = batch.shape[0]
noise = | R.gaussian(shape=[batch_size, self.nz]) | megengine.random.gaussian |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Implementation of Base GAN models.
"""
import megengine
import megengine.functional as F
import megengine.module as M
import megengine.random as R
import numpy as np
from . import losses
from .basemodel import BaseModel
class BaseGenerator(BaseModel):
r"""
Base class for a generic unconditional generator model.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, loss_type, **kwargs):
super().__init__(**kwargs)
self.nz = nz
self.ngf = ngf
self.bottom_width = bottom_width
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netD=None,
optG=None):
# Produce fake images
fake_images = self._infer_step_implementation(real_batch)
# Compute output logit of D thinking image real
output = netD(fake_images)
# Compute loss
errG = self.compute_gan_loss(output=output)
optG.zero_grad()
optG.backward(errG)
optG.step()
return errG
def _infer_step_implementation(self, batch):
# Get only batch size from real batch
batch_size = batch.shape[0]
noise = R.gaussian(shape=[batch_size, self.nz])
fake_images = self.forward(noise)
return fake_images
def compute_gan_loss(self, output):
if self.loss_type == "ns":
errG = losses.ns_loss_gen(output)
elif self.loss_type == "wasserstein":
errG = losses.wasserstein_loss_gen(output)
else:
raise ValueError("Invalid loss_type {} selected.".format(
self.loss_type))
return errG
def generate_images(self, num_images):
"""Generate images of shape [`num_images`, C, H, W].
Depending on the final activation function, pixel values are NOT guarenteed
to be within [0, 1].
"""
return self.infer_step(np.empty(num_images, dtype="float32"))
class BaseDiscriminator(BaseModel):
r"""
Base class for a generic unconditional discriminator model.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, loss_type, **kwargs):
super().__init__(**kwargs)
self.ndf = ndf
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netG=None,
optD=None):
# Produce logits for real images
output_real = self._infer_step_implementation(real_batch)
# Produce fake images
fake_images = netG._infer_step_implementation(real_batch)
fake_images = | F.zero_grad(fake_images) | megengine.functional.zero_grad |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Implementation of Base GAN models.
"""
import megengine
import megengine.functional as F
import megengine.module as M
import megengine.random as R
import numpy as np
from . import losses
from .basemodel import BaseModel
class BaseGenerator(BaseModel):
r"""
Base class for a generic unconditional generator model.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, loss_type, **kwargs):
super().__init__(**kwargs)
self.nz = nz
self.ngf = ngf
self.bottom_width = bottom_width
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netD=None,
optG=None):
# Produce fake images
fake_images = self._infer_step_implementation(real_batch)
# Compute output logit of D thinking image real
output = netD(fake_images)
# Compute loss
errG = self.compute_gan_loss(output=output)
optG.zero_grad()
optG.backward(errG)
optG.step()
return errG
def _infer_step_implementation(self, batch):
# Get only batch size from real batch
batch_size = batch.shape[0]
noise = R.gaussian(shape=[batch_size, self.nz])
fake_images = self.forward(noise)
return fake_images
def compute_gan_loss(self, output):
if self.loss_type == "ns":
errG = losses.ns_loss_gen(output)
elif self.loss_type == "wasserstein":
errG = losses.wasserstein_loss_gen(output)
else:
raise ValueError("Invalid loss_type {} selected.".format(
self.loss_type))
return errG
def generate_images(self, num_images):
"""Generate images of shape [`num_images`, C, H, W].
Depending on the final activation function, pixel values are NOT guarenteed
to be within [0, 1].
"""
return self.infer_step(np.empty(num_images, dtype="float32"))
class BaseDiscriminator(BaseModel):
r"""
Base class for a generic unconditional discriminator model.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, loss_type, **kwargs):
super().__init__(**kwargs)
self.ndf = ndf
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netG=None,
optD=None):
# Produce logits for real images
output_real = self._infer_step_implementation(real_batch)
# Produce fake images
fake_images = netG._infer_step_implementation(real_batch)
fake_images = F.zero_grad(fake_images)
# Produce logits for fake images
output_fake = self._infer_step_implementation(fake_images)
# Compute loss for D
errD = self.compute_gan_loss(output_real=output_real,
output_fake=output_fake)
D_x, D_Gz = self.compute_probs(output_real=output_real,
output_fake=output_fake)
# Backprop and update gradients
optD.zero_grad()
optD.backward(errD)
optD.step()
return errD, D_x, D_Gz
def _infer_step_implementation(self, batch):
return self.forward(batch)
def compute_gan_loss(self, output_real, output_fake):
r"""
Computes GAN loss for discriminator.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
errD (Tensor): A batch of GAN losses for the discriminator.
"""
# Compute loss for D
if self.loss_type == "gan" or self.loss_type == "ns":
errD = losses.minimax_loss_dis(output_fake=output_fake,
output_real=output_real)
elif self.loss_type == "wasserstein":
errD = losses.wasserstein_loss_dis(output_fake=output_fake,
output_real=output_real)
else:
raise ValueError("Invalid loss_type selected.")
return errD
def compute_probs(self, output_real, output_fake):
r"""
Computes probabilities from real/fake images logits.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
tuple: Average probabilities of real/fake image considered as real for the batch.
"""
D_x = | F.sigmoid(output_real) | megengine.functional.sigmoid |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Implementation of Base GAN models.
"""
import megengine
import megengine.functional as F
import megengine.module as M
import megengine.random as R
import numpy as np
from . import losses
from .basemodel import BaseModel
class BaseGenerator(BaseModel):
r"""
Base class for a generic unconditional generator model.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, loss_type, **kwargs):
super().__init__(**kwargs)
self.nz = nz
self.ngf = ngf
self.bottom_width = bottom_width
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netD=None,
optG=None):
# Produce fake images
fake_images = self._infer_step_implementation(real_batch)
# Compute output logit of D thinking image real
output = netD(fake_images)
# Compute loss
errG = self.compute_gan_loss(output=output)
optG.zero_grad()
optG.backward(errG)
optG.step()
return errG
def _infer_step_implementation(self, batch):
# Get only batch size from real batch
batch_size = batch.shape[0]
noise = R.gaussian(shape=[batch_size, self.nz])
fake_images = self.forward(noise)
return fake_images
def compute_gan_loss(self, output):
if self.loss_type == "ns":
errG = losses.ns_loss_gen(output)
elif self.loss_type == "wasserstein":
errG = losses.wasserstein_loss_gen(output)
else:
raise ValueError("Invalid loss_type {} selected.".format(
self.loss_type))
return errG
def generate_images(self, num_images):
"""Generate images of shape [`num_images`, C, H, W].
Depending on the final activation function, pixel values are NOT guarenteed
to be within [0, 1].
"""
return self.infer_step(np.empty(num_images, dtype="float32"))
class BaseDiscriminator(BaseModel):
r"""
Base class for a generic unconditional discriminator model.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, loss_type, **kwargs):
super().__init__(**kwargs)
self.ndf = ndf
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netG=None,
optD=None):
# Produce logits for real images
output_real = self._infer_step_implementation(real_batch)
# Produce fake images
fake_images = netG._infer_step_implementation(real_batch)
fake_images = F.zero_grad(fake_images)
# Produce logits for fake images
output_fake = self._infer_step_implementation(fake_images)
# Compute loss for D
errD = self.compute_gan_loss(output_real=output_real,
output_fake=output_fake)
D_x, D_Gz = self.compute_probs(output_real=output_real,
output_fake=output_fake)
# Backprop and update gradients
optD.zero_grad()
optD.backward(errD)
optD.step()
return errD, D_x, D_Gz
def _infer_step_implementation(self, batch):
return self.forward(batch)
def compute_gan_loss(self, output_real, output_fake):
r"""
Computes GAN loss for discriminator.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
errD (Tensor): A batch of GAN losses for the discriminator.
"""
# Compute loss for D
if self.loss_type == "gan" or self.loss_type == "ns":
errD = losses.minimax_loss_dis(output_fake=output_fake,
output_real=output_real)
elif self.loss_type == "wasserstein":
errD = losses.wasserstein_loss_dis(output_fake=output_fake,
output_real=output_real)
else:
raise ValueError("Invalid loss_type selected.")
return errD
def compute_probs(self, output_real, output_fake):
r"""
Computes probabilities from real/fake images logits.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
tuple: Average probabilities of real/fake image considered as real for the batch.
"""
D_x = F.sigmoid(output_real).mean()
D_Gz = | F.sigmoid(output_fake) | megengine.functional.sigmoid |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = | M.Sequential(*branch_main) | megengine.module.Sequential |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = M.Sequential(*branch_proj)
else:
self.branch_proj = None
self.init_weights()
def forward(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
return F.concat((x_proj, self.branch_main(x)), 1)
elif self.stride == 2:
x_proj = old_x
x = old_x
return F.concat((self.branch_proj(x_proj), self.branch_main(x)), 1)
else:
raise ValueError("use stride 1 or 2, current stride {}".format(self.stride))
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.shape
# assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = | F.transpose(x, (1, 0, 2)) | megengine.functional.transpose |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
| M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False) | megengine.module.Conv2d |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
| M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
| M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False) | megengine.module.Conv2d |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
| M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = | M.Sequential(*branch_proj) | megengine.module.Sequential |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
| M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False) | megengine.module.Conv2d |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
| M.BatchNorm2d(inp) | megengine.module.BatchNorm2d |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
| M.Conv2d(inp, inp, 1, 1, 0, bias=False) | megengine.module.Conv2d |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
| M.BatchNorm2d(inp) | megengine.module.BatchNorm2d |
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = F.expand_dims(attention_mask, (1, 2))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.astype(
next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary.
Items in the batch should begin with the special "CLS" token.
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch. It's the mask
that we typically use for attention when a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, bert=None):
super().__init__()
if bert is None:
self.bert = BertModel(config)
else:
self.bert = bert
self.num_labels = num_labels
self.dropout = Dropout(config.hidden_dropout_prob)
self.classifier = Linear(config.hidden_size, num_labels)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False
)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss = cross_entropy(
logits.reshape(-1, self.num_labels), labels.reshape(-1)
)
return logits, loss
else:
return logits, None
DATA_URL = "https://data.megengine.org.cn/models/weights/bert"
CONFIG_NAME = "bert_config.json"
VOCAB_NAME = "vocab.txt"
MODEL_NAME = {
"wwm_cased_L-24_H-1024_A-16": "wwm_cased_L_24_H_1024_A_16",
"wwm_uncased_L-24_H-1024_A-16": "wwm_uncased_L_24_H_1024_A_16",
"cased_L-12_H-768_A-12": "cased_L_12_H_768_A_12",
"cased_L-24_H-1024_A-16": "cased_L_24_H_1024_A_16",
"uncased_L-12_H-768_A-12": "uncased_L_12_H_768_A_12",
"uncased_L-24_H-1024_A-16": "uncased_L_24_H_1024_A_16",
"chinese_L-12_H-768_A-12": "chinese_L_12_H_768_A_12",
"multi_cased_L-12_H-768_A-12": "multi_cased_L_12_H_768_A_12",
}
def download_file(url, filename):
# urllib.URLopener().retrieve(url, filename)
urllib.request.urlretrieve(url, filename)
def create_hub_bert(model_name, pretrained):
assert model_name in MODEL_NAME, "{} not in the valid models {}".format(
model_name, MODEL_NAME
)
data_dir = "./{}".format(model_name)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
vocab_url = "{}/{}/{}".format(DATA_URL, model_name, VOCAB_NAME)
config_url = "{}/{}/{}".format(DATA_URL, model_name, CONFIG_NAME)
vocab_file = "./{}/{}".format(model_name, VOCAB_NAME)
config_file = "./{}/{}".format(model_name, CONFIG_NAME)
download_file(vocab_url, vocab_file)
download_file(config_url, config_file)
config = BertConfig(config_file)
model = | hub.load("megengine/models", MODEL_NAME[model_name], pretrained=pretrained) | megengine.hub.load |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = | Embedding(config.vocab_size, config.hidden_size) | megengine.module.Embedding |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = | Dropout(config.hidden_dropout_prob) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = | Linear(config.hidden_size, self.all_head_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = | Linear(config.hidden_size, self.all_head_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = | Linear(config.hidden_size, self.all_head_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = | Dropout(config.attention_probs_dropout_prob) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = | mge.tensor(x.shape) | megengine.tensor |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = | F.matmul(attention_probs, value_layer) | megengine.functional.matmul |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = | mge.tensor(context_layer.shape) | megengine.tensor |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = | F.concat([context_shape[:-2], self.all_head_size]) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = | Linear(config.hidden_size, config.hidden_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = | Dropout(config.hidden_dropout_prob) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = | Linear(config.hidden_size, config.intermediate_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = | Linear(config.intermediate_size, config.hidden_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = | Dropout(config.hidden_dropout_prob) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = | Linear(config.hidden_size, config.hidden_size) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = | F.expand_dims(attention_mask, (1, 2)) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = F.expand_dims(attention_mask, (1, 2))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.astype(
next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary.
Items in the batch should begin with the special "CLS" token.
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch. It's the mask
that we typically use for attention when a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, bert=None):
super().__init__()
if bert is None:
self.bert = BertModel(config)
else:
self.bert = bert
self.num_labels = num_labels
self.dropout = | Dropout(config.hidden_dropout_prob) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = F.expand_dims(attention_mask, (1, 2))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.astype(
next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary.
Items in the batch should begin with the special "CLS" token.
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch. It's the mask
that we typically use for attention when a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, bert=None):
super().__init__()
if bert is None:
self.bert = BertModel(config)
else:
self.bert = bert
self.num_labels = num_labels
self.dropout = Dropout(config.hidden_dropout_prob)
self.classifier = | Linear(config.hidden_size, num_labels) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = | F.zeros_like(input_ids) | megengine.functional.zeros_like |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to( | F.expand_dims(position_ids, 0) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = | F.ones_like(input_ids) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = | F.zeros_like(input_ids) | megengine.functional.zeros_like |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = | F.linspace(0, seq_length - 1, seq_length) | megengine.functional.linspace |
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh( | F.sqrt(2 / math.pi) | megengine.functional.sqrt |
#!/usr/bin/env mdl
# This file will seal the nms opr within a better way than lib_nms
import ctypes
import os
import struct
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine._internal.craniotome import CraniotomeBase
from megengine.core.tensor import wrap_io_tensor
_current_path = os.path.dirname(os.path.abspath(__file__))
_so_path = os.path.join(_current_path, "lib_nms.so")
try:
_so_lib = ctypes.CDLL(_so_path)
except Exception:
import subprocess
mge_path = os.path.join(os.path.dirname(mge.__file__), "_internal", "include")
assert os.path.exists(mge_path), "{} file not found".format(mge_path)
src_file = os.path.join(_current_path, "gpu_nms", "nms.cu")
assert os.path.exists(src_file), "{} file not found".format(src_file)
cmd = (
"nvcc -I {} -shared -o {} -Xcompiler '-fno-strict-aliasing -fPIC' {}".format(
mge_path, _so_path, src_file
)
)
subprocess.check_call(cmd, shell=True)
_so_lib = ctypes.CDLL(_so_path)
_TYPE_POINTER = ctypes.c_void_p
_TYPE_POINTER = ctypes.c_void_p
_TYPE_INT = ctypes.c_int32
_TYPE_FLOAT = ctypes.c_float
_so_lib.NMSForwardGpu.argtypes = [
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_FLOAT,
_TYPE_INT,
_TYPE_POINTER,
]
_so_lib.NMSForwardGpu.restype = _TYPE_INT
_so_lib.CreateHostDevice.restype = _TYPE_POINTER
class NMSCran(CraniotomeBase):
__nr_inputs__ = 1
__nr_outputs__ = 3
def setup(self, iou_threshold, max_output):
self._iou_threshold = iou_threshold
self._max_output = max_output
# Load the necessary host device
self._host_device = _so_lib.CreateHostDevice()
def execute(self, inputs, outputs):
box_tensor_ptr = inputs[0].pubapi_dev_tensor_ptr
output_tensor_ptr = outputs[0].pubapi_dev_tensor_ptr
output_num_tensor_ptr = outputs[1].pubapi_dev_tensor_ptr
mask_tensor_ptr = outputs[2].pubapi_dev_tensor_ptr
_so_lib.NMSForwardGpu(
box_tensor_ptr,
mask_tensor_ptr,
output_tensor_ptr,
output_num_tensor_ptr,
self._iou_threshold,
self._max_output,
self._host_device,
)
def grad(self, wrt_idx, inputs, outputs, out_grad):
return 0
def init_output_dtype(self, input_dtypes):
return [np.int32, np.int32, np.int32]
def get_serialize_params(self):
return ("nms", struct.pack("fi", self._iou_threshold, self._max_output))
def infer_shape(self, inp_shapes):
nr_box = inp_shapes[0][0]
threadsPerBlock = 64
output_size = nr_box
# here we compute the number of int32 used in mask_outputs.
# In original version, we compute the bytes only.
mask_size = int(
nr_box
* (nr_box // threadsPerBlock + int((nr_box % threadsPerBlock) > 0))
* 8
/ 4
)
return [[output_size], [1], [mask_size]]
@wrap_io_tensor
def gpu_nms(box, iou_threshold, max_output):
keep, num, _ = NMSCran.make(box, iou_threshold=iou_threshold, max_output=max_output)
return keep[:num]
def batched_nms(boxes, scores, idxs, iou_threshold, num_keep, use_offset=False):
if use_offset:
boxes_offset = (
| mge.tensor([0, 0, 1, 1], device=boxes.device) | megengine.tensor |
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
M.init.zeros_(self.bias)
def forward(self, x):
output = x.reshape(x.shape[0], self.num_groups, -1)
mean = | F.mean(output, axis=2, keepdims=True) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
M.init.zeros_(self.bias)
def forward(self, x):
output = x.reshape(x.shape[0], self.num_groups, -1)
mean = F.mean(output, axis=2, keepdims=True)
mean2 = | F.mean(output ** 2, axis=2, keepdims=True) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
| M.init.ones_(self.weight) | megengine.module.init.ones_ |
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
| M.init.zeros_(self.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
M.init.zeros_(self.bias)
def forward(self, x):
output = x.reshape(x.shape[0], self.num_groups, -1)
mean = F.mean(output, axis=2, keepdims=True)
mean2 = F.mean(output ** 2, axis=2, keepdims=True)
var = mean2 - mean * mean
output = (output - mean) / | F.sqrt(var + self.eps) | megengine.functional.sqrt |
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / | F.sqrt(self.running_var + self.eps) | megengine.functional.sqrt |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = | M.Sequential(*layers) | megengine.module.Sequential |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = | M.Elemwise("ADD") | megengine.module.Elemwise |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = | M.Sequential(*features) | megengine.module.Sequential |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = | M.QuantStub() | megengine.module.QuantStub |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = | M.DequantStub() | megengine.module.DequantStub |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
M.init.zeros_(m.bias)
def forward(self, x):
x = self.quant(x)
x = self.features(x)
x = | F.avg_pool2d(x, 7) | megengine.functional.avg_pool2d |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
M.init.zeros_(m.bias)
def forward(self, x):
x = self.quant(x)
x = self.features(x)
x = F.avg_pool2d(x, 7)
x = | F.flatten(x, 1) | megengine.functional.flatten |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ | M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False) | megengine.module.ConvBnRelu2d |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append( | M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False) | megengine.module.ConvBnRelu2d |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
| M.Dropout(0.2) | megengine.module.Dropout |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
| M.Linear(self.last_channel, num_classes) | megengine.module.Linear |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append( | M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False) | megengine.module.ConvBnRelu2d |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
| M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False) | megengine.module.ConvBn2d |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
| M.init.msra_normal_(m.weight, mode='fan_out') | megengine.module.init.msra_normal_ |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
| M.init.ones_(m.weight) | megengine.module.init.ones_ |
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |