prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
| M.Conv2d(channel, mid_channel, 1, 1, 0) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
| M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
| M.Conv2d(channel, mid_channel, 1, 1, 0) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
| M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
| M.Conv2d(channel, mid_channel, 1, 1, 0) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
| M.Conv2d(3, 64, 3, 2, 1) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
| M.Conv2d(64, 64, 3, 1, 1) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
| M.Conv2d(64, 64, 3, 2, 1) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": | mge.tensor(dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": | mge.tensor(dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": | mge.tensor(dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
| M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu") | megengine.module.init.msra_normal_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = | M.Conv2d(mid_channel, keypoint_num, 3, 1, 1) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
outs = self.forward(self.inputs["image"])
loss = 0
for stage_out in outs:
for ind, scale_out in enumerate(stage_out[:-1]):
label = (
self.inputs["heatmap"][:, ind]
* (self.inputs["heat_valid"] > 1.1)[:, :, None, None]
)
tmp = | F.square_loss(scale_out, label) | megengine.functional.square_loss |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
outs = self.forward(self.inputs["image"])
loss = 0
for stage_out in outs:
for ind, scale_out in enumerate(stage_out[:-1]):
label = (
self.inputs["heatmap"][:, ind]
* (self.inputs["heat_valid"] > 1.1)[:, :, None, None]
)
tmp = F.square_loss(scale_out, label)
loss += tmp / 4 / len(outs)
# OHKM loss for the largest heatmap
tmp = ((stage_out[-1] - self.inputs["heatmap"][:, -1]) ** 2).mean(3).mean(
2
) * (self.inputs["heat_valid"] > 0.1)
ohkm_loss = 0
for i in range(tmp.shape[0]):
selected_loss, _ = F.top_k(
tmp[i], self.keypoint_num // 2, descending=True
)
ohkm_loss += selected_loss.mean()
ohkm_loss /= tmp.shape[0]
loss += ohkm_loss
return loss
def predict(self):
outputs = self.forward(self.inputs["image"])
pred = outputs[-1][-1]
return pred
def forward(self, x):
f = self.head(x)
outputs = []
for i in range(self.nr_stg):
multi_scale_features = self.stages["Stage_{}_body".format(i)](f)
multi_scale_heatmaps = []
for j in range(4):
out = self.stages["Stage_{}_tail".format(i)]["tail_{}".format(j)](
multi_scale_features[j]
)
out = | F.interpolate(out, scale_factor=2 ** (3 - j)) | megengine.functional.interpolate |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = | M.init.calculate_fan_in_and_fan_out(m.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
| M.init.uniform_(m.bias, -bound, bound) | megengine.module.init.uniform_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
| M.init.ones_(m.weight) | megengine.module.init.ones_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
| M.Conv2d(mid_channel, 64, 1, 1, 0) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = | M.init.calculate_fan_in_and_fan_out(m.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
| M.init.uniform_(m.bias, -bound, bound) | megengine.module.init.uniform_ |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = | mge.tensor(-1.0) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = | F.matmul(x, w) | megengine.functional.matmul |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = | mge.Parameter([1.0]) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = | GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = | mge.Parameter(2.0) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = | GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = | mge.tensor(3) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = | mge.Parameter(1.0) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = | mge.Parameter(1.0) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = | GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = | F.ones((10, 10, 3, 3)) | megengine.functional.ones |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = | M.Conv2d(10, 10, kernel_size=3, padding=1) | megengine.module.Conv2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = | GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = | F.matmul(x, w) | megengine.functional.matmul |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = | dist.get_rank() | megengine.distributed.get_rank |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = | dist.get_world_size() | megengine.distributed.get_world_size |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = | M.Linear(rank * 2 + 2, rank * 2 + 4) | megengine.module.Linear |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif( | get_device_count_by_fork("gpu") | megengine.distributed.helper.get_device_count_by_fork |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = | mge.tensor([1.0, 3.0, 5.0]) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = | mge.tensor([2.0, 4.0, 6.0]) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = | GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = | mge.Tensor(i, dtype="float32") | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = | GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = GradManager().attach(m.parameters())
opt = optim.SGD(m.parameters(), 1e-3, momentum=0.9)
def train_func(x):
with gm:
if rank != 0:
x = dist.functional.remote_recv(
rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32
)
y = m(x)
if rank != size - 1:
dist.functional.remote_send(y, dest_rank=rank + 1)
gm.backward()
else:
y = y.mean()
gm.backward(y)
opt.step().clear_grad()
train_funcs = [
train_func,
| trace(symbolic=False) | megengine.jit.trace |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = GradManager().attach(m.parameters())
opt = optim.SGD(m.parameters(), 1e-3, momentum=0.9)
def train_func(x):
with gm:
if rank != 0:
x = dist.functional.remote_recv(
rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32
)
y = m(x)
if rank != size - 1:
dist.functional.remote_send(y, dest_rank=rank + 1)
gm.backward()
else:
y = y.mean()
gm.backward(y)
opt.step().clear_grad()
train_funcs = [
train_func,
trace(symbolic=False)(train_func),
| trace(symbolic=True) | megengine.jit.trace |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = GradManager().attach(m.parameters())
opt = optim.SGD(m.parameters(), 1e-3, momentum=0.9)
def train_func(x):
with gm:
if rank != 0:
x = dist.functional.remote_recv(
rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32
)
y = m(x)
if rank != size - 1:
| dist.functional.remote_send(y, dest_rank=rank + 1) | megengine.distributed.functional.remote_send |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = | F.transpose(right_slid, (0, 2, 1, 3, 4)) | megengine.functional.transpose |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = | F.mean(left_feature * right_slid, axis=1, keepdims=True) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = | F.transpose(coords, (0, 2, 3, 1)) | megengine.functional.transpose |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = | F.split(left_feature, 4, axis=1) | megengine.functional.split |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = | F.split(right_feature, 4, axis=1) | megengine.functional.split |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = | F.concat(corrs, axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = | F.split(left_feature, 4, axis=1) | megengine.functional.split |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = | F.split(right_feature, 4, axis=1) | megengine.functional.split |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = F.expand_dims(coords, 1) + offsets
coords = F.reshape(coords, (N, -1, W, 2)) # [N, search_num*H, W, 2]
right_feature = bilinear_sampler(
right_feature, coords
) # [N, C, search_num*H, W]
right_feature = F.reshape(
right_feature, (N, C, -1, H, W)
) # [N, C, search_num, H, W]
left_feature = F.expand_dims(left_feature, 2)
corr = F.mean(left_feature * right_feature, axis=1)
corrs.append(corr)
final_corr = | F.concat(corrs, axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
| F.reshape(extra_offset, (N, search_num, 2, H, W)) | megengine.functional.reshape |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = | F.expand_dims(offsets, (0, 2, 3)) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = | F.transpose(coords, (0, 2, 3, 1)) | megengine.functional.transpose |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = F.expand_dims(coords, 1) + offsets
coords = | F.reshape(coords, (N, -1, W, 2)) | megengine.functional.reshape |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = F.expand_dims(coords, 1) + offsets
coords = F.reshape(coords, (N, -1, W, 2)) # [N, search_num*H, W, 2]
right_feature = bilinear_sampler(
right_feature, coords
) # [N, C, search_num*H, W]
right_feature = F.reshape(
right_feature, (N, C, -1, H, W)
) # [N, C, search_num, H, W]
left_feature = | F.expand_dims(left_feature, 2) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = F.expand_dims(coords, 1) + offsets
coords = F.reshape(coords, (N, -1, W, 2)) # [N, search_num*H, W, 2]
right_feature = bilinear_sampler(
right_feature, coords
) # [N, C, search_num*H, W]
right_feature = F.reshape(
right_feature, (N, C, -1, H, W)
) # [N, C, search_num, H, W]
left_feature = F.expand_dims(left_feature, 2)
corr = | F.mean(left_feature * right_feature, axis=1) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
| F.transpose(left_feature, (0, 2, 3, 1)) | megengine.functional.transpose |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
| F.transpose(right_feature, (0, 2, 3, 1)) | megengine.functional.transpose |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = | mge.tensor(y_grid, device=self.fmap1.device) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = | F.expand_dims(coords, 1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose( | F.reshape(x, (N, H, W, C)) | megengine.functional.reshape |
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape( | F.stack((x_grid, y_grid)) | megengine.functional.stack |
import megengine as mge
import megengine.module as M
from megengine import functional as F
import numpy as np
from .transformer import MultiheadAttention
#from .utility import has_nan_or_inf
# mge.core.set_option('async_level', 0)
class DecoderWrapper(M.Module):
def __init__(self, cfg):
super().__init__()
channels = cfg.distiller.HIDDEN_DIM
heads = cfg.distiller.ATT_HEADS
# this is a local module derived from official implementation, we modify the last modules
self.matt = MultiheadAttention(channels, heads)
self.pos_projector = | M.Linear(in_features=channels, out_features=channels) | megengine.module.Linear |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
| set_symbolic_shape(True) | megengine.core._trace_option.set_symbolic_shape |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = | F.ones((1, 14, 8, 8), dtype=np.uint8) | megengine.functional.ones |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = | trace_module(module, data) | megengine.traced_module.trace_module |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = | F.zeros((1,), dtype=np.int32) | megengine.functional.zeros |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = F.zeros((1,), dtype=np.int32)
roi = | F.ones((1, 2, 2), dtype=np.float32) | megengine.functional.ones |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = F.zeros((1,), dtype=np.int32)
roi = F.ones((1, 2, 2), dtype=np.float32)
y = module(data, idx, roi)
traced_module = | trace_module(module, data, idx, roi) | megengine.traced_module.trace_module |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = F.zeros((1,), dtype=np.int32)
roi = F.ones((1, 2, 2), dtype=np.float32)
y = module(data, idx, roi)
traced_module = trace_module(module, data, idx, roi)
np.testing.assert_array_equal(traced_module(data, idx, roi), y)
func = | trace(traced_module, capture_as_const=True) | megengine.jit.trace |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = F.zeros((1,), dtype=np.int32)
roi = F.ones((1, 2, 2), dtype=np.float32)
y = module(data, idx, roi)
traced_module = trace_module(module, data, idx, roi)
np.testing.assert_array_equal(traced_module(data, idx, roi), y)
func = trace(traced_module, capture_as_const=True)
np.testing.assert_array_equal(func(data, idx, roi), y)
model = io.BytesIO()
func.dump(model, arg_names=("data", "idx", "roi"))
model.seek(0)
infer_cg = | cgtools.GraphInference(model) | megengine.utils.comp_graph_tools.GraphInference |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = | F.ones((1,)) | megengine.functional.ones |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = | F.zeros((1,)) | megengine.functional.zeros |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = | F.maximum((xmax - xmin) / W, (ymax - ymin) / H) | megengine.functional.maximum |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = | F.broadcast_to(self.I, (N,)) | megengine.functional.broadcast_to |
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = | F.broadcast_to(self.M, (N, 3, 3)) | megengine.functional.broadcast_to |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import List
import megengine.distributed as dist
from basecore.config import ConfigDict
from basecore.engine import BaseHook
from basecore.utils import str_timestamp
from basecls.utils import registers
from .hooks import (
CheckpointHook,
EvalHook,
LoggerHook,
LRSchedulerHook,
PreciseBNHook,
ResumeHook,
TensorboardHook,
)
__all__ = ["DefaultHooks"]
@registers.hooks.register()
class DefaultHooks:
"""The default hooks factory.
It combines :py:class:`~basecls.engine.LRSchedulerHook` ->
:py:class:`~basecls.engine.PreciseBNHook` -> :py:class:`~basecls.engine.ResumeHook` ->
:py:class:`~basecls.engine.TensorboardHook` -> :py:class:`~basecls.engine.LoggerHook` ->
:py:class:`~basecls.engine.CheckpointHook` -> :py:class:`~basecls.engine.EvalHook`.
"""
@classmethod
def build(cls, cfg: ConfigDict) -> List[BaseHook]:
"""Build function with a simple strategy.
Args:
cfg: config for setting hooks.
Returns:
A hook list.
"""
output_dir = cfg.output_dir
hook_list = [
LRSchedulerHook(),
PreciseBNHook(cfg.bn.precise_every_n_epoch),
ResumeHook(output_dir, cfg.resume),
]
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = | M.Linear(100, 200, bias=False) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = | M.Linear(200, 200, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = | M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2) | megengine.module.pooling.MaxPool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = | M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2) | megengine.module.pooling.AvgPool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = | M.BatchNorm1d(32) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = | M.BatchNorm2d(3) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return | F.transpose(x, self.perm) | megengine.functional.transpose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return | F.concat([a, a], self.concat_idx) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return | F.softmax(a) | megengine.functional.softmax |