prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = | scatter(inp, axis=axis) | megengine.distributed.functional.scatter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
all_to_all_output = all_to_all(
inp, split_axis=split_axis, concat_axis=concat_axis
)
gather_C = | gather(inp, axis=concat_axis) | megengine.distributed.functional.gather |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
all_to_all_output = all_to_all(
inp, split_axis=split_axis, concat_axis=concat_axis
)
gather_C = gather(inp, axis=concat_axis)
gather_B = | gather(all_to_all_output, axis=split_axis) | megengine.distributed.functional.gather |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
all_to_all_output = all_to_all(
inp, split_axis=split_axis, concat_axis=concat_axis
)
gather_C = gather(inp, axis=concat_axis)
gather_B = gather(all_to_all_output, axis=split_axis)
if rank == 0:
return gather_B, gather_C
return all_to_all_output
func = | trace(symbolic=symbolic) | megengine.jit.trace |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = | F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = | F.mean((t_gt - t_pred)**2, axis=1) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = | F.mean(r_mse) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = | F.mean(t_mse) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = | F.mean(r_mae) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = | F.mean(t_mae) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = | F.norm(concatenated[:, :, 3], axis=-1) | megengine.functional.norm |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = F.norm(concatenated[:, :, 3], axis=-1)
err_r = | F.mean(residual_rotdeg) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = F.norm(concatenated[:, :, 3], axis=-1)
err_r = F.mean(residual_rotdeg)
err_t = | F.mean(residual_transmag) | megengine.functional.mean |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean( | F.abs(r_gt_euler_deg - r_pred_euler_deg) | megengine.functional.abs |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean( | F.abs(t_gt - t_pred) | megengine.functional.abs |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum( | F.concat(total_losses) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = | F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) | megengine.functional.nn.l1_loss |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = | F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos( | F.clip(0.5 * (rot_trace - 1), -1.0, 1.0) | megengine.functional.clip |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight= | mge.tensor([0.7, 0.3]) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight= | mge.tensor([0.7, 0.3]) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = | quantize_qat(net) | megengine.quantization.quantize.quantize_qat |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = | dtype.qint8(16.0 / 128.0) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = | dtype.qint8(16.0 / 128.0) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
max_err,
require_quantize=False,
split_conv_relu=True,
)
def test_det_model():
net = | mge.load("models_fire_det.fix_batch.fuse_scale_cpu.pkl") | megengine.load |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
max_err,
require_quantize=False,
split_conv_relu=True,
)
def test_det_model():
net = mge.load("models_fire_det.fix_batch.fuse_scale_cpu.pkl")
inp_dtype = | dtype.qint8(16.0 / 128.0) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
max_err,
require_quantize=False,
split_conv_relu=True,
)
def test_det_model():
net = mge.load("models_fire_det.fix_batch.fuse_scale_cpu.pkl")
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 512, 512))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_snpe_model_8f():
model = "8w16f_backbone.tm"
net = | mge.load(model) | megengine.load |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
max_err,
require_quantize=False,
split_conv_relu=True,
)
def test_det_model():
net = mge.load("models_fire_det.fix_batch.fuse_scale_cpu.pkl")
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 512, 512))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_snpe_model_8f():
model = "8w16f_backbone.tm"
net = mge.load(model)
print(net.flatten().graph)
inp_dtype = | dtype.quint8(16.0 / 128.0, 128) | megengine.core.tensor.dtype.quint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor( | dtype.get_scale(inp_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor( | dtype.get_scale(inp_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor( | dtype.get_zero_point(inp_dtype) | megengine.core.tensor.dtype.get_zero_point |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = | M.Elemwise("add") | megengine.module.Elemwise |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = | M.Elemwise("add") | megengine.module.Elemwise |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = | M.Elemwise("add") | megengine.module.Elemwise |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = | QuantStub() | megengine.module.quant_dequant.QuantStub |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = | QuantStub() | megengine.module.quant_dequant.QuantStub |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1( | mge.tensor(self.data1) | megengine.tensor |