add UpsamplingBilinear2d, AdaptiveMaxPool2d

This commit is contained in:
Dun Liang 2021-05-11 16:29:59 +08:00
parent c47db5d189
commit 2efd7be357
4 changed files with 45 additions and 17 deletions

View File

@ -9,7 +9,7 @@
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
__version__ = '1.2.2.70'
__version__ = '1.2.2.71'
from . import lock
with lock.lock_scope():
ori_int = int

View File

@ -348,6 +348,8 @@ class Identity(Module):
def execute(self, input):
return input
def identity(input): return input
class Dropout(Module):
def __init__(self, p=0.5, is_train=False):
assert p >= 0 and p <= 1, "dropout probability has to be between 0 and 1, but got {}".format(p)
@ -1322,6 +1324,14 @@ class Upsample(Module):
int(x.shape[3]*self.scale_factor[1])),
mode=self.mode)
class UpsamplingBilinear2d(Upsample):
def __init__(self, scale_factor=None):
Upsample.__init__(self, scale_factor, 'bilinear')
class UpsamplingNearest2d(Upsample):
def __init__(self, scale_factor=None):
Upsample.__init__(self, scale_factor, 'nearest')
class Sequential(Module):
def __init__(self, *args):
self.layers = collections.OrderedDict()

View File

@ -215,6 +215,36 @@ class AdaptiveAvgPool2d(Module):
])
return xx.reduce("mean", [4,5])
class AdaptiveMaxPool2d(Module):
def __init__(self, output_size):
self.output_size = output_size
def execute(self, x):
if isinstance(self.output_size, int):
oh = self.output_size
ow = self.output_size
elif isinstance(self.output_size, tuple) or isinstance(self.output_size, list):
oh = x.shape[2] if self.output_size[0] is None else self.output_size[0]
ow = x.shape[3] if self.output_size[1] is None else self.output_size[1]
else:
raise TypeError(f"AdaptiveMaxPool2d only support int, tuple or list input. Not support {type(self.output_size)} yet.")
if oh == 1 and ow == 1:
return x.reduce("maximum", [2,3], keepdims=True)
N,C,H,W = x.shape
self.sh = math.floor(H / oh)
self.sw = math.floor(W / ow)
self.ksh = H - (oh - 1) * self.sh
self.ksw = W - (ow - 1) * self.sw
h = (H-self.ksh)//self.sh+1
w = (W-self.ksw)//self.sw+1
xx = x.reindex([N,C,h,w,self.ksh,self.ksw], [
"i0", # Nid
"i1", # Cid
f"i2*{self.sh}+i4", # Hid
f"i3*{self.sw}+i5", # Wid
])
return xx.reduce("maximum", [4,5])
def pool(x, kernel_size, op, padding=0, stride=None):
return Pool(kernel_size, stride, padding, op=op)(x)

View File

@ -217,18 +217,6 @@ pjmap = {
'links': {},
'extras': {},
},
'kaiming_normal_': {
'pytorch': {
'args': "tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'",
},
'jittor': {
'module': 'init',
'name': 'relu_invariant_gauss_',
'args': 'var, mode="fan_in"'
},
'links': {'tensor': 'var'},
'extras': {},
},
'constant_': {
'pytorch': {
'args': "tensor, val",
@ -382,10 +370,10 @@ unsupport_ops = [
'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Identity', 'Bilinear',
'Dropout3d', 'AlphaDropout', 'EmbeddingBag', 'CosineSimilarity', 'PairwiseDistance', 'CTCLoss', 'NLLLoss', 'PoissonNLLLoss', 'KLDivLoss', 'BCEWithLogitsLoss',
'MarginRankingLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss', 'SmoothL1Loss', 'SoftMarginLoss',
'MultiLabelSoftMarginLoss', 'CosineEmbeddingLoss', 'MultiMarginLoss', 'TripletMarginLoss', 'UpsamplingNearest2d',
'UpsamplingBilinear2d', 'DataParallel', 'DistributedDataParallel', 'clip_grad_norm_', 'clip_grad_value_',
'parameters_to_vector', 'vector_to_parameters', 'BasePruningMethod', 'PruningContainer', 'Identity',
'RandomUnstructured', 'L1Unstructured', 'RandomStructured', 'LnStructured', 'CustomFromMask', 'identity',
'MultiLabelSoftMarginLoss', 'CosineEmbeddingLoss', 'MultiMarginLoss', 'TripletMarginLoss', # 'DataParallel', 'DistributedDataParallel',
'clip_grad_norm_', 'clip_grad_value_',
'parameters_to_vector', 'vector_to_parameters', 'BasePruningMethod', 'PruningContainer',
'RandomUnstructured', 'L1Unstructured', 'RandomStructured', 'LnStructured', 'CustomFromMask',
'random_unstructured', 'l1_unstructured', 'random_structured', 'ln_structured', 'global_unstructured',
'custom_from_mask', 'remove', 'is_pruned', 'weight_norm', 'remove_weight_norm', 'spectral_norm',
'remove_spectral_norm', 'PackedSequence', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence', 'pack_sequence'