mirror of https://github.com/Jittor/Jittor
add some ops
This commit is contained in:
parent
c1e0861446
commit
113d917dc4
|
@ -681,7 +681,7 @@ def jittor_exit():
|
|||
atexit.register(jittor_exit)
|
||||
|
||||
Var.__str__ = lambda x: str(x.data)
|
||||
Var.__repr__ = lambda x: f"jt.Var:{x.dtype}{x.uncertain_shape}"
|
||||
Var.__repr__ = lambda x: str(x.data)
|
||||
Var.peek = lambda x: f"{x.dtype}{x.shape}"
|
||||
|
||||
from . import nn
|
||||
|
|
|
@ -58,6 +58,7 @@ def argmax_pool(x, size, stride, padding=0):
|
|||
def concat(arr, dim):
|
||||
# TODO: low performance when concat lots of vars
|
||||
total_dim = 0
|
||||
if dim < 0: dim += len(arr[0].shape)
|
||||
for a in arr:
|
||||
total_dim += a.shape[dim]
|
||||
cdim = 0
|
||||
|
|
|
@ -75,9 +75,21 @@ def linear(x, n):
|
|||
return jt.matmul(x, w) + b
|
||||
|
||||
def relu(x): return jt.maximum(x, 0)
|
||||
def leaky_relu(x, scale): return jt.ternary(x>0, x, x*scale)
|
||||
def leaky_relu(x, scale=0.01): return jt.ternary(x>0, x, x*scale)
|
||||
def relu6(x): return jt.minimum(jt.maximum(x, 0), 6)
|
||||
|
||||
class PReLU(Module):
|
||||
def __init__(self, num_parameters=1, init_=0.25):
|
||||
self.num_parameters = num_parameters
|
||||
self.a = init.constant((num_parameters,), "float32", init_)
|
||||
|
||||
def execute(self, x):
|
||||
if self.num_parameters != 1:
|
||||
assert self.num_parameters == x.size(1), f"num_parameters does not match input channels in PReLU"
|
||||
return jt.maximum(0, x) + self.a.broadcast(x, [0,2,3]) * jt.minimum(0, x)
|
||||
else:
|
||||
return jt.maximum(0, x) + self.a * jt.minimum(0, x)
|
||||
|
||||
#TODO dims is 4 will cause slowly execution
|
||||
def cross_entropy_loss(output, target, ignore_index=None):
|
||||
if len(output.shape) == 4:
|
||||
|
@ -273,9 +285,76 @@ class BatchNorm(Module):
|
|||
b = self.bias.broadcast(x, [0,2,3])
|
||||
return norm_x * w + b
|
||||
|
||||
class InstanceNorm2d(Module):
|
||||
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=None, is_train=True, sync=True):
|
||||
assert affine == None
|
||||
self.sync = sync
|
||||
self.num_features = num_features
|
||||
self.is_train = is_train
|
||||
self.eps = eps
|
||||
self.momentum = momentum
|
||||
self.weight = init.constant((num_features,), "float32", 1.0)
|
||||
self.bias = init.constant((num_features,), "float32", 0.0)
|
||||
self.running_mean = init.constant((num_features,), "float32", 0.0).stop_grad()
|
||||
self.running_var = init.constant((num_features,), "float32", 1.0).stop_grad()
|
||||
|
||||
def execute(self, x):
|
||||
if self.is_train:
|
||||
xmean = jt.mean(x, dims=[2,3], keepdims=1)
|
||||
x2mean = jt.mean(x*x, dims=[2,3], keepdims=1)
|
||||
if self.sync and jt.mpi:
|
||||
xmean = xmean.mpi_all_reduce("mean")
|
||||
x2mean = x2mean.mpi_all_reduce("mean")
|
||||
|
||||
xvar = x2mean-xmean*xmean
|
||||
norm_x = (x-xmean)/jt.sqrt(xvar+self.eps)
|
||||
self.running_mean += (xmean.sum([0,2,3])-self.running_mean)*self.momentum
|
||||
self.running_var += (xvar.sum([0,2,3])-self.running_var)*self.momentum
|
||||
else:
|
||||
running_mean = self.running_mean.broadcast(x, [0,2,3])
|
||||
running_var = self.running_var.broadcast(x, [0,2,3])
|
||||
norm_x = (x-running_mean)/jt.sqrt(running_var+self.eps)
|
||||
w = self.weight.broadcast(x, [0,2,3])
|
||||
b = self.bias.broadcast(x, [0,2,3])
|
||||
return norm_x * w + b
|
||||
|
||||
class BatchNorm1d(Module):
|
||||
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=None, is_train=True, sync=True):
|
||||
assert affine == None
|
||||
self.sync = sync
|
||||
self.num_features = num_features
|
||||
self.is_train = is_train
|
||||
self.eps = eps
|
||||
self.momentum = momentum
|
||||
self.weight = init.constant((num_features,), "float32", 1.0)
|
||||
self.bias = init.constant((num_features,), "float32", 0.0)
|
||||
self.running_mean = init.constant((num_features,), "float32", 0.0).stop_grad()
|
||||
self.running_var = init.constant((num_features,), "float32", 1.0).stop_grad()
|
||||
|
||||
def execute(self, x):
|
||||
if self.is_train:
|
||||
xmean = jt.mean(x, dims=[0], keepdims=1)
|
||||
x2mean = jt.mean(x*x, dims=[0], keepdims=1)
|
||||
|
||||
if self.sync and jt.mpi:
|
||||
xmean = xmean.mpi_all_reduce("mean")
|
||||
x2mean = x2mean.mpi_all_reduce("mean")
|
||||
|
||||
xvar = x2mean-xmean*xmean
|
||||
norm_x = (x-xmean)/jt.sqrt(xvar+self.eps)
|
||||
self.running_mean += (xmean.sum([0])-self.running_mean)*self.momentum
|
||||
self.running_var += (xvar.sum([0])-self.running_var)*self.momentum
|
||||
else:
|
||||
running_mean = self.running_mean.broadcast(x, [0])
|
||||
running_var = self.running_var.broadcast(x, [0])
|
||||
norm_x = (x-running_mean)/jt.sqrt(running_var+self.eps)
|
||||
w = self.weight.broadcast(x, [0])
|
||||
b = self.bias.broadcast(x, [0])
|
||||
return norm_x * w + b
|
||||
|
||||
Relu = jt.make_module(relu)
|
||||
ReLU = Relu
|
||||
Leaky_relu = jt.make_module(leaky_relu, 0.01)
|
||||
Leaky_relu = jt.make_module(leaky_relu, 2)
|
||||
LeakyReLU = Leaky_relu
|
||||
ReLU6 = jt.make_module(relu6)
|
||||
Softmax = jt.make_module(softmax, 2)
|
||||
|
@ -389,6 +468,151 @@ class ConvTranspose(Module):
|
|||
return y
|
||||
|
||||
|
||||
class ReflectionPad2d(Module):
|
||||
def __init__(self, padding):
|
||||
self.padding = padding
|
||||
if isinstance(self.padding, int):
|
||||
self.pl = self.padding
|
||||
self.pr = self.padding
|
||||
self.pt = self.padding
|
||||
self.pb = self.padding
|
||||
elif isinstance(self.padding, tuple):
|
||||
self.pl, self.pr, self.pt, self.pb = self.padding
|
||||
else:
|
||||
raise TypeError(f"ReflectionPad2d padding just support int or tuple, but found {type(padding)}")
|
||||
|
||||
def execute(self, x):
|
||||
n,c,h,w = x.shape
|
||||
assert (self.pl < w and self.pr < w), f"padding_left and padding_right should be smaller than input width"
|
||||
assert (self.pt < h and self.pb < h), f"padding_top and padding_bottom should be smaller than input height"
|
||||
oh=h+self.pt+self.pb
|
||||
ow=w+self.pl+self.pr
|
||||
l = self.pl
|
||||
r = self.pl + w - 1
|
||||
t = self.pt
|
||||
b = self.pt + h - 1
|
||||
x_idx = np.zeros((oh,ow))
|
||||
y_idx = np.zeros((oh,ow))
|
||||
for j in range(oh):
|
||||
for i in range(ow):
|
||||
if i >= l and i <= r and j >= t and j <= b:
|
||||
x_idx[j,i] = i
|
||||
y_idx[j,i] = j
|
||||
elif i < l and j < t:
|
||||
x_idx[j,i] = 2 * l - i
|
||||
y_idx[j,i] = 2 * t - j
|
||||
elif i < l and j > b:
|
||||
x_idx[j,i] = 2 * l - i
|
||||
y_idx[j,i] = 2 * b - j
|
||||
elif i > r and j < t:
|
||||
x_idx[j,i] = 2 * r - i
|
||||
y_idx[j,i] = 2 * t - j
|
||||
elif i > r and j > b:
|
||||
x_idx[j,i] = 2 * r - i
|
||||
y_idx[j,i] = 2 * b - j
|
||||
elif i < l:
|
||||
x_idx[j,i] = 2 * l - i
|
||||
y_idx[j,i] = j
|
||||
elif i > r:
|
||||
x_idx[j,i] = 2 * r - i
|
||||
y_idx[j,i] = j
|
||||
elif j < t:
|
||||
x_idx[j,i] = i
|
||||
y_idx[j,i] = 2 * t - j
|
||||
elif j > b:
|
||||
x_idx[j,i] = i
|
||||
y_idx[j,i] = 2 * b - j
|
||||
return x.reindex([n,c,oh,ow], ["i0","i1","@e1(i2,i3)","@e0(i2,i3)"], extras=[jt.array(x_idx - self.pl), jt.array(y_idx - self.pt)])
|
||||
|
||||
class ZeroPad2d(Module):
|
||||
def __init__(self, padding):
|
||||
self.padding = padding
|
||||
if isinstance(self.padding, int):
|
||||
self.pl = self.padding
|
||||
self.pr = self.padding
|
||||
self.pt = self.padding
|
||||
self.pb = self.padding
|
||||
elif isinstance(self.padding, tuple):
|
||||
self.pl, self.pr, self.pt, self.pb = self.padding
|
||||
else:
|
||||
raise TypeError(f"ZeroPad2d padding just support int or tuple, but found {type(padding)}")
|
||||
|
||||
def execute(self, x):
|
||||
n,c,h,w = x.shape
|
||||
return x.reindex([n,c,h+self.pt+self.pb,w+self.pl+self.pr], ["i0","i1",f"i2-{self.pt}",f"i3-{self.pl}"])
|
||||
|
||||
class ConstantPad2d(Module):
|
||||
def __init__(self, padding, value):
|
||||
self.padding = padding
|
||||
if isinstance(self.padding, int):
|
||||
self.pl = self.padding
|
||||
self.pr = self.padding
|
||||
self.pt = self.padding
|
||||
self.pb = self.padding
|
||||
elif isinstance(self.padding, tuple):
|
||||
self.pl, self.pr, self.pt, self.pb = self.padding
|
||||
else:
|
||||
raise TypeError(f"ConstantPad2d padding just support int or tuple, but found {type(padding)}")
|
||||
self.value = value
|
||||
|
||||
def execute(self, x):
|
||||
n,c,h,w = x.shape
|
||||
return x.reindex([n,c,h+self.pt+self.pb,w+self.pl+self.pr], ["i0","i1",f"i2-{self.pt}",f"i3-{self.pl}"], overflow_value=self.value)
|
||||
|
||||
class ReplicationPad2d(Module):
|
||||
def __init__(self, padding):
|
||||
self.padding = padding
|
||||
if isinstance(self.padding, int):
|
||||
self.pl = self.padding
|
||||
self.pr = self.padding
|
||||
self.pt = self.padding
|
||||
self.pb = self.padding
|
||||
elif isinstance(self.padding, tuple):
|
||||
self.pl, self.pr, self.pt, self.pb = self.padding
|
||||
else:
|
||||
raise TypeError(f"ReplicationPad2d padding just support int or tuple, but found {type(padding)}")
|
||||
|
||||
def execute(self, x):
|
||||
n,c,h,w = x.shape
|
||||
oh=h+self.pt+self.pb
|
||||
ow=w+self.pl+self.pr
|
||||
l = self.pl
|
||||
r = self.pl + w - 1
|
||||
t = self.pt
|
||||
b = self.pt + h - 1
|
||||
x_idx = np.zeros((oh,ow))
|
||||
y_idx = np.zeros((oh,ow))
|
||||
for j in range(oh):
|
||||
for i in range(ow):
|
||||
if i >= l and i <= r and j >= t and j <= b:
|
||||
x_idx[j,i] = i
|
||||
y_idx[j,i] = j
|
||||
elif i < l and j < t:
|
||||
x_idx[j,i] = l
|
||||
y_idx[j,i] = t
|
||||
elif i < l and j > b:
|
||||
x_idx[j,i] = l
|
||||
y_idx[j,i] = b
|
||||
elif i > r and j < t:
|
||||
x_idx[j,i] = r
|
||||
y_idx[j,i] = t
|
||||
elif i > r and j > b:
|
||||
x_idx[j,i] = r
|
||||
y_idx[j,i] = b
|
||||
elif i < l:
|
||||
x_idx[j,i] = l
|
||||
y_idx[j,i] = j
|
||||
elif i > r:
|
||||
x_idx[j,i] = r
|
||||
y_idx[j,i] = j
|
||||
elif j < t:
|
||||
x_idx[j,i] = i
|
||||
y_idx[j,i] = t
|
||||
elif j > b:
|
||||
x_idx[j,i] = i
|
||||
y_idx[j,i] = b
|
||||
return x.reindex([n,c,oh,ow], ["i0","i1","@e1(i2,i3)","@e0(i2,i3)"], extras=[jt.array(x_idx - self.pl), jt.array(y_idx - self.pt)])
|
||||
|
||||
class Tanh(Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -407,8 +631,8 @@ def resize(x, size, mode="nearest"):
|
|||
H,W = size
|
||||
new_size = [n,c,H,W]
|
||||
nid, cid, hid, wid = jt.index(new_size)
|
||||
x = hid * ((h-1)/(H-1))
|
||||
y = wid * ((w-1)/(W-1))
|
||||
x = hid * h / H
|
||||
y = wid * w / W
|
||||
if mode=="nearest":
|
||||
return img.reindex([nid, cid, x.floor(), y.floor()])
|
||||
if mode=="bilinear":
|
||||
|
@ -426,7 +650,13 @@ def resize(x, size, mode="nearest"):
|
|||
return o
|
||||
raise(f"Not support {interpolation}")
|
||||
|
||||
|
||||
class Upsample(Module):
|
||||
def __init__(self, scale_factor=None, mode='nearest'):
|
||||
self.scale_factor = scale_factor if isinstance(scale_factor, tuple) else (scale_factor, scale_factor)
|
||||
self.mode = mode
|
||||
|
||||
def execute(self, x):
|
||||
return resize(x, size=(int(x.shape[2]*self.scale_factor[0]), int(x.shape[3]*self.scale_factor[1])), mode=self.mode)
|
||||
|
||||
class Sequential(Module):
|
||||
def __init__(self, *args):
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
|
||||
# ***************************************************************
|
||||
# Copyright (c) 2020 Jittor. Authors:
|
||||
# Wenyang Zhou <576825820@qq.com>
|
||||
# Dun Liang <randonlang@gmail.com>.
|
||||
# All Rights Reserved.
|
||||
# This file is subject to the terms and conditions defined in
|
||||
# file 'LICENSE.txt', which is part of this source code package.
|
||||
# ***************************************************************
|
||||
import unittest
|
||||
import jittor as jt
|
||||
import numpy as np
|
||||
import jittor.nn as jnn
|
||||
|
||||
try:
|
||||
jt.dirty_fix_pytorch_runtime_error()
|
||||
import torch
|
||||
import torch.nn as tnn
|
||||
except:
|
||||
torch = None
|
||||
tnn = None
|
||||
|
||||
def check_equal(a, b):
|
||||
eps = 1e-1 # icc error almost reaches 1e-1
|
||||
relative_error = (abs(a - b) / abs(b + 1)).mean()
|
||||
print(f"relative_error: {relative_error}")
|
||||
return relative_error < eps
|
||||
|
||||
class TestBatchNorm(unittest.TestCase):
|
||||
def test_batchnorm(self):
|
||||
# ***************************************************************
|
||||
# Define jittor & pytorch array
|
||||
# ***************************************************************
|
||||
arr = np.random.randn(16,10,224,224)
|
||||
jittor_arr = jt.array(arr)
|
||||
pytorch_arr = torch.Tensor(arr)
|
||||
# ***************************************************************
|
||||
# Test InstanceNorm2d Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.InstanceNorm2d(10)(pytorch_arr)
|
||||
jittor_result = jnn.InstanceNorm2d(10)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
# ***************************************************************
|
||||
# Define jittor & pytorch array
|
||||
# ***************************************************************
|
||||
arr = np.random.randn(16,1000)
|
||||
jittor_arr = jt.array(arr)
|
||||
pytorch_arr = torch.Tensor(arr)
|
||||
# ***************************************************************
|
||||
# Test InstanceNorm2d Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.BatchNorm1d(1000)(pytorch_arr)
|
||||
jittor_result = jnn.BatchNorm1d(1000)(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -0,0 +1,75 @@
|
|||
|
||||
# ***************************************************************
|
||||
# Copyright (c) 2020 Jittor. Authors:
|
||||
# Wenyang Zhou <576825820@qq.com>
|
||||
# Dun Liang <randonlang@gmail.com>.
|
||||
# All Rights Reserved.
|
||||
# This file is subject to the terms and conditions defined in
|
||||
# file 'LICENSE.txt', which is part of this source code package.
|
||||
# ***************************************************************
|
||||
import unittest
|
||||
import jittor as jt
|
||||
import numpy as np
|
||||
import jittor.nn as jnn
|
||||
|
||||
try:
|
||||
jt.dirty_fix_pytorch_runtime_error()
|
||||
import torch
|
||||
import torch.nn as tnn
|
||||
except:
|
||||
torch = None
|
||||
tnn = None
|
||||
|
||||
def check_equal(a, b):
|
||||
eps = 1e-1 # icc error almost reaches 1e-1
|
||||
relative_error = (abs(a - b) / abs(b + 1)).mean()
|
||||
print(f"relative_error: {relative_error}")
|
||||
return relative_error < eps
|
||||
|
||||
class TestPad(unittest.TestCase):
|
||||
def test_pad(self):
|
||||
# ***************************************************************
|
||||
# Define jittor & pytorch array
|
||||
# ***************************************************************
|
||||
arr = np.random.randn(16,3,224,224)
|
||||
jittor_arr = jt.array(arr)
|
||||
pytorch_arr = torch.Tensor(arr)
|
||||
# ***************************************************************
|
||||
# Test ReplicationPad2d Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.ReplicationPad2d(10)(pytorch_arr)
|
||||
jittor_result = jnn.ReplicationPad2d(10)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.ReplicationPad2d((1,23,4,5))(pytorch_arr)
|
||||
jittor_result = jnn.ReplicationPad2d((1,23,4,5))(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
# ***************************************************************
|
||||
# Test ConstantPad2d Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.ConstantPad2d(10,-2)(pytorch_arr)
|
||||
jittor_result = jnn.ConstantPad2d(10,-2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.ConstantPad2d((2,3,34,1),10.2)(pytorch_arr)
|
||||
jittor_result = jnn.ConstantPad2d((2,3,34,1),10.2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
# ***************************************************************
|
||||
# Test ZeroPad2d Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.ZeroPad2d(1)(pytorch_arr)
|
||||
jittor_result = jnn.ZeroPad2d(1)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.ZeroPad2d((2,3,34,1))(pytorch_arr)
|
||||
jittor_result = jnn.ZeroPad2d((2,3,34,1))(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
# ***************************************************************
|
||||
# Test ReflectionPad2d Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.ReflectionPad2d(20)(pytorch_arr)
|
||||
jittor_result = jnn.ReflectionPad2d(20)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.ReflectionPad2d((2,3,34,1))(pytorch_arr)
|
||||
jittor_result = jnn.ReflectionPad2d((2,3,34,1))(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -0,0 +1,71 @@
|
|||
|
||||
# ***************************************************************
|
||||
# Copyright (c) 2020 Jittor. Authors:
|
||||
# Wenyang Zhou <576825820@qq.com>
|
||||
# Dun Liang <randonlang@gmail.com>.
|
||||
# All Rights Reserved.
|
||||
# This file is subject to the terms and conditions defined in
|
||||
# file 'LICENSE.txt', which is part of this source code package.
|
||||
# ***************************************************************
|
||||
import unittest
|
||||
import jittor as jt
|
||||
import numpy as np
|
||||
import jittor.nn as jnn
|
||||
|
||||
try:
|
||||
jt.dirty_fix_pytorch_runtime_error()
|
||||
import torch
|
||||
import torch.nn as tnn
|
||||
except:
|
||||
torch = None
|
||||
tnn = None
|
||||
|
||||
def check_equal(a, b):
|
||||
eps = 1e-1 # icc error almost reaches 1e-1
|
||||
relative_error = (abs(a - b) / abs(b + 1)).mean()
|
||||
print(f"relative_error: {relative_error}")
|
||||
return relative_error < eps
|
||||
|
||||
class TestRelu(unittest.TestCase):
|
||||
def test_relu(self):
|
||||
# ***************************************************************
|
||||
# Define jittor & pytorch array
|
||||
# ***************************************************************
|
||||
arr = np.random.randn(16,10,224,224)
|
||||
jittor_arr = jt.array(arr)
|
||||
pytorch_arr = torch.Tensor(arr)
|
||||
# ***************************************************************
|
||||
# Test PReLU Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.PReLU(10, 2)(pytorch_arr)
|
||||
jittor_result = jnn.PReLU(10, 2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.PReLU(10, -0.2)(pytorch_arr)
|
||||
jittor_result = jnn.PReLU(10, -0.2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.PReLU(10, 99.9)(pytorch_arr)
|
||||
jittor_result = jnn.PReLU(10, 99.9)(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
# ***************************************************************
|
||||
# Test ReLU6 Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.ReLU6()(pytorch_arr)
|
||||
jittor_result = jnn.ReLU6()(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
# ***************************************************************
|
||||
# Test LeakyReLU Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.LeakyReLU(2)(pytorch_arr)
|
||||
jittor_result = jnn.LeakyReLU(2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.LeakyReLU()(pytorch_arr)
|
||||
jittor_result = jnn.LeakyReLU()(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
pytorch_result = tnn.LeakyReLU(99.9)(pytorch_arr)
|
||||
jittor_result = jnn.LeakyReLU(99.9)(jittor_arr)
|
||||
assert check_equal(pytorch_result.detach().numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -11,6 +11,16 @@ import jittor as jt
|
|||
import random
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import jittor.nn as jnn
|
||||
try:
|
||||
jt.dirty_fix_pytorch_runtime_error()
|
||||
import torch
|
||||
import torch.nn as tnn
|
||||
except:
|
||||
torch = None
|
||||
tnn = None
|
||||
|
||||
mid = 0
|
||||
if os.uname()[1] == "jittor-ce":
|
||||
mid = 1
|
||||
|
@ -74,6 +84,12 @@ def test_case(box_num, out_size, time_limit):
|
|||
assert fused_op_num == 1, fused_op_num
|
||||
assert t <= time_limit, t
|
||||
|
||||
def check_equal(a, b):
|
||||
eps = 1e-1 # icc error almost reaches 1e-1
|
||||
relative_error = (abs(a - b) / abs(b + 1)).mean()
|
||||
print(f"relative_error: {relative_error}")
|
||||
return relative_error < eps
|
||||
|
||||
class TestResizeAndCrop(unittest.TestCase):
|
||||
def test(self):
|
||||
test_case(100, [224, 224], 0.45)
|
||||
|
@ -81,5 +97,23 @@ class TestResizeAndCrop(unittest.TestCase):
|
|||
test_case(20, [1024, 1024], [1.2, 1.8][mid])
|
||||
test_case(20, [1024, 666], [0.8,1.0][mid])
|
||||
|
||||
def test_upsample(self):
|
||||
# ***************************************************************
|
||||
# Define jittor & pytorch array
|
||||
# ***************************************************************
|
||||
arr = np.random.randn(16,10,224,224)
|
||||
jittor_arr = jt.array(arr)
|
||||
pytorch_arr = torch.Tensor(arr)
|
||||
# ***************************************************************
|
||||
# Test Upsample Layer
|
||||
# ***************************************************************
|
||||
pytorch_result = tnn.Upsample(scale_factor=2)(pytorch_arr)
|
||||
jittor_result = jnn.Upsample(scale_factor=2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
pytorch_result = tnn.Upsample(scale_factor=0.2)(pytorch_arr)
|
||||
jittor_result = jnn.Upsample(scale_factor=0.2)(jittor_arr)
|
||||
assert check_equal(pytorch_result.numpy(), jittor_result.numpy()), f"{pytorch_result.mean()} || {jittor_result.mean()}"
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -114,16 +114,31 @@ class Compose:
|
|||
return data
|
||||
|
||||
class Resize:
|
||||
def __init__(self, size):
|
||||
def __init__(self, size, mode=Image.BILINEAR):
|
||||
if isinstance(size, int):
|
||||
size = (size, size)
|
||||
assert isinstance(size, tuple)
|
||||
self.size = size
|
||||
self.mode = mode
|
||||
def __call__(self, img:Image.Image):
|
||||
return img.resize(self.size, Image.BILINEAR)
|
||||
return img.resize(self.size, self.mode)
|
||||
|
||||
class Gray:
|
||||
def __call__(self, img:Image.Image):
|
||||
img = np.array(img.convert('L'))
|
||||
img = img[np.newaxis, :]
|
||||
return np.array((img / 255.0), dtype = np.float32)
|
||||
|
||||
class RandomCrop:
|
||||
def __init__(self, size):
|
||||
if isinstance(size, int):
|
||||
size = (size, size)
|
||||
assert isinstance(size, tuple)
|
||||
self.size = size
|
||||
def __call__(self, img:Image.Image):
|
||||
width, height = img.size
|
||||
assert self.size[0] <= height and self.size[1] <= width, f"crop size exceeds the input image in RandomCrop"
|
||||
top = np.random.randint(0,height-self.size[0])
|
||||
left = np.random.randint(0,width-self.size[1])
|
||||
return crop(img, top, left, self.size[0], self.size[1])
|
||||
|
|
@ -76,6 +76,7 @@ pjmap = {
|
|||
},
|
||||
'links': {},
|
||||
'extras': {},
|
||||
'delete': ['inplace'],
|
||||
},
|
||||
'ReLU6': {
|
||||
'pytorch': {
|
||||
|
@ -88,6 +89,19 @@ pjmap = {
|
|||
},
|
||||
'links': {},
|
||||
'extras': {},
|
||||
'delete': ['inplace'],
|
||||
},
|
||||
'PReLU': {
|
||||
'pytorch': {
|
||||
'args': 'num_parameters=1, init=0.25',
|
||||
},
|
||||
'jittor': {
|
||||
'module': 'nn',
|
||||
'name': 'PReLU',
|
||||
'args': 'num_parameters=1, init_=0.25'
|
||||
},
|
||||
'links': {'init': 'init_'},
|
||||
'extras': {},
|
||||
},
|
||||
'LeakyReLU': {
|
||||
'pytorch': {
|
||||
|
@ -96,10 +110,11 @@ pjmap = {
|
|||
'jittor': {
|
||||
'module': 'nn',
|
||||
'name': 'LeakyReLU',
|
||||
'args': 'scale'
|
||||
'args': 'scale=0.01'
|
||||
},
|
||||
'links': {'negative_slope': 'scale'},
|
||||
'extras': {},
|
||||
'delete': ['inplace'],
|
||||
},
|
||||
'BatchNorm2d': {
|
||||
'pytorch': {
|
||||
|
@ -113,6 +128,31 @@ pjmap = {
|
|||
'links': {},
|
||||
'extras': {},
|
||||
},
|
||||
'BatchNorm1d': {
|
||||
'pytorch': {
|
||||
'args': "num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True"
|
||||
},
|
||||
'jittor': {
|
||||
'module': 'nn',
|
||||
'name': 'BatchNorm1d',
|
||||
'args': 'num_features, eps=1e-5, momentum=0.1, affine=None, is_train=True, sync=True',
|
||||
},
|
||||
'links': {},
|
||||
'extras': {'affine': 'None'},
|
||||
'delete': ['track_running_stats'],
|
||||
},
|
||||
'InstanceNorm2d': {
|
||||
'pytorch': {
|
||||
'args': "num_features, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False"
|
||||
},
|
||||
'jittor': {
|
||||
'module': 'nn',
|
||||
'name': 'InstanceNorm2d',
|
||||
'args': 'num_features, eps=1e-05, momentum=0.1, affine=None, is_train=True, sync=True'
|
||||
},
|
||||
'links': {},
|
||||
'extras': {'affine': 'None'},
|
||||
},
|
||||
'Dropout2d': {
|
||||
'pytorch': {
|
||||
'args': 'p=0.5, inplace=False',
|
||||
|
@ -124,6 +164,19 @@ pjmap = {
|
|||
},
|
||||
'links': {},
|
||||
'extras': {},
|
||||
'delete': ['inplace'],
|
||||
},
|
||||
'Upsample': {
|
||||
'pytorch': {
|
||||
'args': "size=None, scale_factor=None, mode='nearest', align_corners=None",
|
||||
},
|
||||
'jittor': {
|
||||
'module': 'nn',
|
||||
'name': 'Upsample',
|
||||
'args': "scale_factor=None, mode='nearest'"
|
||||
},
|
||||
'links': {},
|
||||
'extras': {},
|
||||
},
|
||||
'kaiming_normal_': {
|
||||
'pytorch': {
|
||||
|
@ -250,9 +303,26 @@ unsupport_ops = [
|
|||
# ***************************************************************
|
||||
'Parameter', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict',
|
||||
'Conv1d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose3d', 'Unfold', 'Fold',
|
||||
'MaxPool1d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'AvgPool1d', 'AvgPool3d', 'FractionalMaxPool2d', 'LPPool1d', 'LPPool2d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveAvgPool3d',
|
||||
'ReflectionPad1d', 'ReflectionPad2d', 'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad2d', 'ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d', 'ELU', 'Hardshrink', 'Hardtanh', 'LogSigmoid', 'MultiheadAttention',
|
||||
'PReLU', 'RReLU', 'SELU', 'CELU', 'GELU', 'Softplus', 'Softshrink', 'Softsign', 'Tanhshrink', 'Threshold', 'Softmin', 'Softmax2d', 'LogSoftmax', 'AdaptiveLogSoftmaxWithLoss', 'BatchNorm1d', 'BatchNorm3d', 'GroupNorm', 'SyncBatchNorm', 'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'LocalResponseNorm', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCell', 'LSTMCell', 'GRUCell', 'Transformer', 'TransformerEncoder', 'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Identity', 'Bilinear', 'Dropout3d', 'AlphaDropout', 'Embedding', 'EmbeddingBag', 'CosineSimilarity', 'PairwiseDistance', 'L1Loss', 'MSELoss', 'CTCLoss', 'NLLLoss', 'PoissonNLLLoss', 'KLDivLoss', 'BCELoss', 'BCEWithLogitsLoss', 'MarginRankingLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss', 'SmoothL1Loss', 'SoftMarginLoss', 'MultiLabelSoftMarginLoss', 'CosineEmbeddingLoss', 'MultiMarginLoss', 'TripletMarginLoss', 'PixelShuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d', 'DataParallel', 'DistributedDataParallel', 'clip_grad_norm_', 'clip_grad_value_', 'parameters_to_vector', 'vector_to_parameters', 'BasePruningMethod', 'PruningContainer', 'Identity', 'RandomUnstructured', 'L1Unstructured', 'RandomStructured', 'LnStructured', 'CustomFromMask', 'identity', 'random_unstructured', 'l1_unstructured', 'random_structured', 'ln_structured', 'global_unstructured', 'custom_from_mask', 'remove', 'is_pruned', 'weight_norm', 'remove_weight_norm', 'spectral_norm', 'remove_spectral_norm', 'PackedSequence', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence', 'pack_sequence'
|
||||
'MaxPool1d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'AvgPool1d',
|
||||
'AvgPool3d', 'FractionalMaxPool2d', 'LPPool1d', 'LPPool2d', 'AdaptiveMaxPool1d',
|
||||
'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d', 'AdaptiveAvgPool3d',
|
||||
'ReflectionPad1d', 'ReplicationPad1d', 'ReplicationPad3d', 'ConstantPad1d', 'ConstantPad3d',
|
||||
'ELU', 'Hardshrink', 'Hardtanh', 'LogSigmoid', 'MultiheadAttention',
|
||||
'RReLU', 'SELU', 'CELU', 'GELU', 'Softplus', 'Softshrink', 'Softsign', 'Tanhshrink',
|
||||
'Threshold', 'Softmin', 'Softmax2d', 'LogSoftmax', 'AdaptiveLogSoftmaxWithLoss',
|
||||
'BatchNorm3d', 'GroupNorm', 'SyncBatchNorm', 'InstanceNorm1d', 'InstanceNorm3d', 'LocalResponseNorm',
|
||||
'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCell', 'LSTMCell', 'GRUCell', 'Transformer', 'TransformerEncoder',
|
||||
'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Identity', 'Bilinear',
|
||||
'Dropout3d', 'AlphaDropout', 'EmbeddingBag', 'CosineSimilarity', 'PairwiseDistance', 'L1Loss',
|
||||
'MSELoss', 'CTCLoss', 'NLLLoss', 'PoissonNLLLoss', 'KLDivLoss', 'BCELoss', 'BCEWithLogitsLoss',
|
||||
'MarginRankingLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss', 'SmoothL1Loss', 'SoftMarginLoss',
|
||||
'MultiLabelSoftMarginLoss', 'CosineEmbeddingLoss', 'MultiMarginLoss', 'TripletMarginLoss', 'UpsamplingNearest2d',
|
||||
'UpsamplingBilinear2d', 'DataParallel', 'DistributedDataParallel', 'clip_grad_norm_', 'clip_grad_value_',
|
||||
'parameters_to_vector', 'vector_to_parameters', 'BasePruningMethod', 'PruningContainer', 'Identity',
|
||||
'RandomUnstructured', 'L1Unstructured', 'RandomStructured', 'LnStructured', 'CustomFromMask', 'identity',
|
||||
'random_unstructured', 'l1_unstructured', 'random_structured', 'ln_structured', 'global_unstructured',
|
||||
'custom_from_mask', 'remove', 'is_pruned', 'weight_norm', 'remove_weight_norm', 'spectral_norm',
|
||||
'remove_spectral_norm', 'PackedSequence', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence', 'pack_sequence'
|
||||
]
|
||||
|
||||
support_ops = {}
|
||||
|
@ -298,6 +368,10 @@ def convert_(prefix, func_name, ags, kws):
|
|||
else:
|
||||
p_ags = info['pytorch']['args']
|
||||
j_ags = info['jittor']['args']
|
||||
if 'delete' in info.keys():
|
||||
delete = info['delete']
|
||||
else:
|
||||
delete = None
|
||||
j_prefix = info['jittor']['prefix'] if 'prefix' in info['jittor'].keys() else None
|
||||
j_module = info['jittor']['module']
|
||||
j_name = info['jittor']['name']
|
||||
|
@ -333,6 +407,12 @@ def convert_(prefix, func_name, ags, kws):
|
|||
pp_ags.append(p_ag)
|
||||
if len(jj_ags) == 0 and len(pp_ags) != 0:
|
||||
raise AttributeError(f"{func_name} in Jittor has no Attribute {pp_ags[0]}")
|
||||
if delete is not None:
|
||||
for d in delete:
|
||||
if d in pp_ags:
|
||||
jj_ags.append(d)
|
||||
if d in pp_kws.keys():
|
||||
jj_kws[d] = None
|
||||
if len(pp_ags) > len(ags) + len(kws):
|
||||
raise RuntimeError(f'There are needed {len(pp_ags) + len(list(pp_kws.keys()))} args in Pytorch {func_name} function, but you only provide {len(ags) + len(kws)}')
|
||||
ags_ = []
|
||||
|
@ -395,6 +475,12 @@ def convert_(prefix, func_name, ags, kws):
|
|||
j_kws_values[k] = extras[k]
|
||||
else:
|
||||
raise AttributeError(f"there is not attribute named {k} in Jittor {func_name}, you should delete it in {func_name} extras.")
|
||||
if delete is not None:
|
||||
for d in delete:
|
||||
if d in j_ags_values:
|
||||
j_ags_values.remove(d)
|
||||
if d in j_kws_values.keys():
|
||||
j_kws_values.pop(d)
|
||||
j_ags_ = [j_ags_values[str(i)] for i in range(len(list(j_ags_values.keys())))]
|
||||
j_kws_ = [key + "=" + j_kws_values[key] for key in j_kws_values.keys()]
|
||||
j_func = f"{j_module}.{j_name}({', '.join(j_ags_+j_kws_)})"
|
||||
|
@ -412,10 +498,10 @@ def dfs(a):
|
|||
if 'torch' in astunparse.unparse(a) and 'init' in astunparse.unparse(a):
|
||||
import_flag.append('init')
|
||||
return ast.parse('from jittor import init').body[0]
|
||||
if 'torch' in astunparse.unparse(a) and 'nn' in astunparse.unparse(a):
|
||||
if 'torch' in astunparse.unparse(a) and a.names[0].asname == 'nn':
|
||||
import_flag.append('nn')
|
||||
return ast.parse('from jittor import nn').body[0]
|
||||
if a.names[0].name == 'torch':
|
||||
if 'torch' in a.names[0].name:
|
||||
return 'delete'
|
||||
elif isinstance(a, ast.ImportFrom):
|
||||
if 'torch' in a.module:
|
||||
|
@ -460,7 +546,6 @@ def dfs(a):
|
|||
ret = dfs(a_)
|
||||
if ret is 'delete':
|
||||
delete_flag.append(True)
|
||||
del a.__dict__[k][i]
|
||||
continue
|
||||
if ret is not None:
|
||||
a.__dict__[k][i] = ret
|
||||
|
@ -470,4 +555,4 @@ def dfs(a):
|
|||
else:
|
||||
ret = dfs(a.__dict__[k])
|
||||
if ret is not None:
|
||||
a.__dict__[k] = ret
|
||||
a.__dict__[k] = ret
|
Loading…
Reference in New Issue