prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum( | tensor(data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum( | tensor(data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([ | tensor(data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), | tensor(data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([ | tensor(data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), | tensor(data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([ | tensor(data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([tensor(data1), | tensor(data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = | jit.trace(symbolic=is_symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_( | tensor(data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
@pytest.mark.parametrize("descending", [True, False])
@pytest.mark.parametrize("sorted", [True, False])
@pytest.mark.parametrize("inp1d", [True, False])
@pytest.mark.parametrize("kth_only", [True, False])
def test_topk(descending, sorted, inp1d, kth_only):
k = 3
if inp1d:
data = np.random.permutation(7)
else:
data = np.random.permutation(5 * 7).reshape(5, 7)
data = data.astype(np.int32)
def np_sort(x):
if descending:
return np.sort(x)[..., ::-1]
return np.sort(x)
res = F.topk(
tensor(data), k, descending=descending, no_sort=(not sorted), kth_only=kth_only
)
values, indices = res
values = values.numpy()
indices = indices.numpy()
if kth_only:
np.testing.assert_equal(
values, np.take_along_axis(data, indices[..., None], -1).squeeze(-1)
)
np.testing.assert_equal(values, np_sort(data)[..., k - 1])
else:
np.testing.assert_equal(values, np.take_along_axis(data, indices, -1))
if not sorted:
values = np_sort(values)
np.testing.assert_equal(values, np_sort(data)[..., :k])
@pytest.mark.parametrize("is_trace", [True, False])
def test_reduce_on_empty_tensor(is_trace):
dtypes = [np.float32, np.int32, np.bool]
inputs = [
(np.random.random((0,)), None),
(np.random.random((3, 0, 2)), 1),
(np.random.random((10, 10, 0, 10)), 0),
]
def run_test(fn, ref_fn, input, dtype, axis=None, symbolic=False):
if is_trace:
fn = | jit.trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
@pytest.mark.parametrize("descending", [True, False])
@pytest.mark.parametrize("sorted", [True, False])
@pytest.mark.parametrize("inp1d", [True, False])
@pytest.mark.parametrize("kth_only", [True, False])
def test_topk(descending, sorted, inp1d, kth_only):
k = 3
if inp1d:
data = np.random.permutation(7)
else:
data = np.random.permutation(5 * 7).reshape(5, 7)
data = data.astype(np.int32)
def np_sort(x):
if descending:
return np.sort(x)[..., ::-1]
return np.sort(x)
res = F.topk(
tensor(data), k, descending=descending, no_sort=(not sorted), kth_only=kth_only
)
values, indices = res
values = values.numpy()
indices = indices.numpy()
if kth_only:
np.testing.assert_equal(
values, np.take_along_axis(data, indices[..., None], -1).squeeze(-1)
)
np.testing.assert_equal(values, np_sort(data)[..., k - 1])
else:
np.testing.assert_equal(values, np.take_along_axis(data, indices, -1))
if not sorted:
values = np_sort(values)
np.testing.assert_equal(values, np_sort(data)[..., :k])
@pytest.mark.parametrize("is_trace", [True, False])
def test_reduce_on_empty_tensor(is_trace):
dtypes = [np.float32, np.int32, np.bool]
inputs = [
(np.random.random((0,)), None),
(np.random.random((3, 0, 2)), 1),
(np.random.random((10, 10, 0, 10)), 0),
]
def run_test(fn, ref_fn, input, dtype, axis=None, symbolic=False):
if is_trace:
fn = jit.trace(symbolic=symbolic)(fn)
for i in range(3):
out = fn( | tensor(input, dtype=dtype) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = | mge.get_logger(__name__) | megengine.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = | data.dataset.ImageNet(args.data, train=False) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = | mge.get_device_count("gpu") | megengine.get_device_count |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
| mge.set_default_device("cpux") | megengine.set_default_device |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
| Q.quantize_qat(model, Q.ema_fakequant_qconfig) | megengine.quantization.quantize_qat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = | mge.load(args.checkpoint) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
| Q.quantize(model) | megengine.quantization.quantize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = | F.cross_entropy_with_softmax(logits, label, label_smooth=0.1) | megengine.functional.cross_entropy_with_softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = | F.accuracy(logits, label, (1, 5)) | megengine.functional.accuracy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if | dist.is_distributed() | megengine.distributed.is_distributed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = | dist.all_reduce_sum(loss, "valid_loss") | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = | dist.all_reduce_sum(acc1, "valid_acc1") | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = | dist.all_reduce_sum(acc5, "valid_acc5") | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(mean=128),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
| T.Resize(256) | megengine.data.transform.Resize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
| T.CenterCrop(224) | megengine.data.transform.CenterCrop |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
| T.Normalize(mean=128) | megengine.data.transform.Normalize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(mean=128),
| T.ToMode("CHW") | megengine.data.transform.ToMode |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import json
import multiprocessing as mp
import os
import pathlib
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="testing directory")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
args.dir = os.path.abspath(args.dir)
setup_logger(args.dir, "test_all_log.txt", to_loguru=True)
logger.info(f"args: {args}")
result = dict()
for f in pathlib.Path(args.dir).glob("**/*.py"):
sys.path.append(os.path.dirname(f))
module_name = os.path.splitext(os.path.basename(f))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
weight_path = f"{os.path.splitext(f)[0]}.pkl"
if os.path.isfile(weight_path):
cfg.weights = weight_path
else:
sys.path.pop(-1)
continue
cfg.set_mode("freeze")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
tester = build(cfg)
acc1, acc5 = tester.test()
result[module_name] = dict(acc1=acc1, acc5=acc5)
sys.path.pop(-1)
logger.info(json.dumps(result, indent=4))
with open("result.json", "w") as f:
json.dump(result, f)
def build(cfg: ConfigDict):
"""Build function for testing script.
Args:
cfg: config for testing.
Returns:
A tester.
"""
model = build_model(cfg)
load_model(model, cfg.weights)
model.eval()
default_logging(cfg, model)
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
return ClsTester(cfg, model, dataloader)
def main():
"""Main function for testing script."""
parser = make_parser()
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if not os.path.exists(args.dir):
raise ValueError("Directory does not exist")
device_count = | mge.device.get_device_count("gpu") | megengine.device.get_device_count |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import json
import multiprocessing as mp
import os
import pathlib
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="testing directory")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
args.dir = os.path.abspath(args.dir)
setup_logger(args.dir, "test_all_log.txt", to_loguru=True)
logger.info(f"args: {args}")
result = dict()
for f in pathlib.Path(args.dir).glob("**/*.py"):
sys.path.append(os.path.dirname(f))
module_name = os.path.splitext(os.path.basename(f))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
weight_path = f"{os.path.splitext(f)[0]}.pkl"
if os.path.isfile(weight_path):
cfg.weights = weight_path
else:
sys.path.pop(-1)
continue
cfg.set_mode("freeze")
if cfg.fastrun:
logger.info("Using fastrun mode...")
| mge.functional.debug_param.set_execution_strategy("PROFILE") | megengine.functional.debug_param.set_execution_strategy |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import json
import multiprocessing as mp
import os
import pathlib
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="testing directory")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
args.dir = os.path.abspath(args.dir)
setup_logger(args.dir, "test_all_log.txt", to_loguru=True)
logger.info(f"args: {args}")
result = dict()
for f in pathlib.Path(args.dir).glob("**/*.py"):
sys.path.append(os.path.dirname(f))
module_name = os.path.splitext(os.path.basename(f))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
weight_path = f"{os.path.splitext(f)[0]}.pkl"
if os.path.isfile(weight_path):
cfg.weights = weight_path
else:
sys.path.pop(-1)
continue
cfg.set_mode("freeze")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
tester = build(cfg)
acc1, acc5 = tester.test()
result[module_name] = dict(acc1=acc1, acc5=acc5)
sys.path.pop(-1)
logger.info(json.dumps(result, indent=4))
with open("result.json", "w") as f:
json.dump(result, f)
def build(cfg: ConfigDict):
"""Build function for testing script.
Args:
cfg: config for testing.
Returns:
A tester.
"""
model = build_model(cfg)
load_model(model, cfg.weights)
model.eval()
default_logging(cfg, model)
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
return ClsTester(cfg, model, dataloader)
def main():
"""Main function for testing script."""
parser = make_parser()
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if not os.path.exists(args.dir):
raise ValueError("Directory does not exist")
device_count = mge.device.get_device_count("gpu")
if device_count == 0:
logger.warning("No GPU was found, testing on CPU")
worker(args)
elif device_count > 1:
mp_worker = | dist.launcher(worker) | megengine.distributed.launcher |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import json
import multiprocessing as mp
import os
import pathlib
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="testing directory")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{ | dist.get_rank() | megengine.distributed.get_rank |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = | RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True) | megengine.data.sampler.RandomSampler |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = | DataLoader(train_ds, train_sampler, num_workers=params.num_workers) | megengine.data.DataLoader |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = | dist.get_rank() | megengine.distributed.get_rank |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
| dist.get_rank() | megengine.distributed.get_rank |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)
dataloaders["train"] = train_dl
# chosse val or test data loader for evaluate
for split in ["val", "test"]:
if split in params.eval_type:
if split == "val":
val_sampler = | SequentialSampler(val_ds, batch_size=params.eval_batch_size) | megengine.data.sampler.SequentialSampler |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)
dataloaders["train"] = train_dl
# chosse val or test data loader for evaluate
for split in ["val", "test"]:
if split in params.eval_type:
if split == "val":
val_sampler = SequentialSampler(val_ds, batch_size=params.eval_batch_size)
dl = | DataLoader(val_ds, val_sampler, num_workers=params.num_workers) | megengine.data.DataLoader |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)
dataloaders["train"] = train_dl
# chosse val or test data loader for evaluate
for split in ["val", "test"]:
if split in params.eval_type:
if split == "val":
val_sampler = SequentialSampler(val_ds, batch_size=params.eval_batch_size)
dl = DataLoader(val_ds, val_sampler, num_workers=params.num_workers)
elif split == "test":
test_sampler = | SequentialSampler(test_ds, batch_size=params.eval_batch_size) | megengine.data.sampler.SequentialSampler |
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)
dataloaders["train"] = train_dl
# chosse val or test data loader for evaluate
for split in ["val", "test"]:
if split in params.eval_type:
if split == "val":
val_sampler = SequentialSampler(val_ds, batch_size=params.eval_batch_size)
dl = DataLoader(val_ds, val_sampler, num_workers=params.num_workers)
elif split == "test":
test_sampler = SequentialSampler(test_ds, batch_size=params.eval_batch_size)
dl = | DataLoader(test_ds, test_sampler, num_workers=params.num_workers) | megengine.data.DataLoader |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = | M.Linear(256*7*7, 1024) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = | M.Linear(1024, 1024) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = | M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = | M.Linear(1024, 5 * self.n) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = | M.Linear(1024, 5 * self.n) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = | F.concat([fc3, pred_boxes], axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = | F.flatten(poo5, start_axis=1) | megengine.functional.flatten |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = | F.stack([loss0, loss1], axis=1) | megengine.functional.stack |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.bottom_up = bottom_up
def forward(self, x):
bottom_up_features = self.bottom_up(x)
bottom_up_features = bottom_up_features[::-1]
results = []
prev_features = self.lateral_convs[0](bottom_up_features[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
bottom_up_features[1:], self.lateral_convs[1:], self.output_convs[1:]
):
fh, fw = features.shape[2:]
top_down_features = F.nn.interpolate(
prev_features, size = (fh, fw), mode="BILINEAR")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
results.append(output_conv(prev_features))
# p6
last_p6 = | F.max_pool2d(results[0], kernel_size=1, stride=2, padding=0) | megengine.functional.max_pool2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = | M.Linear(1054, 1024) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = | M.Linear(1024, 5 * self.n) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = | M.Linear(1024, 5 * self.n) | megengine.module.Linear |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
| M.init.normal_(l.weight, std=0.01) | megengine.module.init.normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
| M.init.fill_(l.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims( | F.softmax(scores, axis=1) | megengine.functional.softmax |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = | F.concat([offsets, cls_scores], axis=2) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = | F.softmax(cls_scores, axis=1) | megengine.functional.softmax |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
| M.init.msra_normal_(lateral_conv.weight, mode="fan_in") | megengine.module.init.msra_normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
| M.init.msra_normal_(output_conv.weight, mode="fan_in") | megengine.module.init.msra_normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = | mge.tensor(mean) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = | mge.tensor(std) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
| M.init.normal_(l.weight, std=0.01) | megengine.module.init.normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
| M.init.fill_(l.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = | F.stack([a, b], axis=1) | megengine.functional.stack |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = | F.stack([a, b], axis=1) | megengine.functional.stack |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
| M.init.fill_(lateral_conv.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
| M.init.fill_(output_conv.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to( | F.expand_dims(pred_boxes, axis=1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to( | F.expand_dims(fc2, axis=1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, | F.expand_dims(cls_prob, axis=2) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to( | F.expand_dims(rcnn_rois[:, 1:5], axis=1) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
from megengine import Parameter, tensor
from megengine.module import AvgPool2d, MaxPool2d
def test_avg_pool2d():
def test_func(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
kernel_size,
stride,
padding,
):
pool = | AvgPool2d(kernel_size, stride=stride, padding=padding, mode="average") | megengine.module.AvgPool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
from megengine import Parameter, tensor
from megengine.module import AvgPool2d, MaxPool2d
def test_avg_pool2d():
def test_func(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
kernel_size,
stride,
padding,
):
pool = AvgPool2d(kernel_size, stride=stride, padding=padding, mode="average")
inp = np.random.normal(
size=(batch_size, in_channels, in_height, in_width)
).astype(np.float32)
out_height = (in_height + padding * 2 - kernel_size) // stride + 1
out_width = (in_width + padding * 2 - kernel_size) // stride + 1
out = pool( | tensor(inp) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from tempfile import TemporaryFile
import numpy as np
from megengine.core import Buffer, Parameter, tensor
from megengine.test import assertTensorClose
def test_tensor_serialization():
def tensor_eq(a, b):
assert a.dtype == b.dtype
assert a.device == b.device
assert a.requires_grad == b.requires_grad
| assertTensorClose(a, b) | megengine.test.assertTensorClose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from tempfile import TemporaryFile
import numpy as np
from megengine.core import Buffer, Parameter, tensor
from megengine.test import assertTensorClose
def test_tensor_serialization():
def tensor_eq(a, b):
assert a.dtype == b.dtype
assert a.device == b.device
assert a.requires_grad == b.requires_grad
assertTensorClose(a, b)
with TemporaryFile() as f:
data = np.random.randint(low=0, high=7, size=[233])
a = | tensor(data, device="xpux", dtype=np.int32) | megengine.core.tensor |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = | M.Linear(feature_dim, num_class, bias=False) | megengine.module.Linear |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = | F.normalize(self.weight, axis=1) | megengine.functional.normalize |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = | F.one_hot(target, self.num_class) | megengine.functional.one_hot |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = | F.loss.cross_entropy(logits, target) | megengine.functional.loss.cross_entropy |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = | F.topk_accuracy(origin_logits, target, topk=1) | megengine.functional.topk_accuracy |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
small_margined_logit = origin_logits
margined_logit = | F.where(origin_logits >= 0, large_margined_logit, small_margined_logit) | megengine.functional.where |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
small_margined_logit = origin_logits
margined_logit = F.where(origin_logits >= 0, large_margined_logit, small_margined_logit)
logits = | F.where(one_hot_target, margined_logit, origin_logits) | megengine.functional.where |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
small_margined_logit = origin_logits
margined_logit = F.where(origin_logits >= 0, large_margined_logit, small_margined_logit)
logits = F.where(one_hot_target, margined_logit, origin_logits)
logits = logits * self.scale
loss = | F.loss.cross_entropy(logits, target) | megengine.functional.loss.cross_entropy |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
small_margined_logit = origin_logits
margined_logit = F.where(origin_logits >= 0, large_margined_logit, small_margined_logit)
logits = F.where(one_hot_target, margined_logit, origin_logits)
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = | F.topk_accuracy(origin_logits, target, topk=1) | megengine.functional.topk_accuracy |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = | F.one_hot(target, self.num_class) | megengine.functional.one_hot |
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos( | F.acos(origin_logits) | megengine.functional.acos |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = | mge.tensor(x, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = | mge.tensor(s, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = | mge.tensor(g_y, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = | tqt_forward(-127, 127, x, s) | megengine.quantization.utils.tqt_forward |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
zero_point = | tensor([1.0], dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
zero_point = tensor([1.0], dtype=np.float32)
scale = | tensor([4.0], dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = | tensor(inp_data, dtype=np.float32) | megengine.tensor |