# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved.
# Maintainers:
# Guowei Yang <471184555@qq.com>
# Wenyang Zhou <576825820@qq.com>
# Meng-Hao Guo <guomenghao1997@gmail.com>
# Dun Liang <randonlang@gmail.com>.
#
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
from jittor import init, Module
import numpy as np
import math
pool_use_code_op = True
[文档]
class Pool(Module):
'''
池化(Pooling)类。根据op参数的不同, 可以实现最大池化、最小池化和平均池化操作。
参数:
- kernel_size (int, tuple): 池化窗口的大小。
- stride (int, tuple): 池化窗口移动的步长。
- padding (int, tuple): 输入图像四周的零填充数量。
- dilation (None): 控制池化窗口中各点的间距。
- return_indices (bool): 如果是True, 那么在前向过程中, 返回额外的一个输出, 为每个窗口中最大值的索引。
- ceil_mode (bool): 当为True时, 会对output的大小进行向上取整的操作。
- count_include_pad (bool): 当进行平均池化操作时, 该参数定义是否把零填充区域计算在内。
- op (str): 池化操作的类型。
形状:
- 输入: :math:`(N, C, H_{in}, W_{in})`。
- 输出: :math:`(N, C, H_{out}, W_{out})`。
属性:
- kernel_size (int, tuple): 池化窗口的大小。
- stride (int, tuple): 池化窗口移动的步长。
- padding (int, tuple): 输入图像四周的零填充数量。
- dilation (None): 控制池化窗口中各点的间距。
- return_indices (bool): 如果是True, 那么在前向过程中, 返回额外的一个输出, 为每个窗口中最大值的索引。
- ceil_mode (bool): 当为True时, 会对output的大小进行向上取整的操作。
- count_include_pad (bool): 当进行平均池化操作时, 该参数定义是否把零填充区域计算在内。
- op (str): 池化操作的类型。
代码示例:
>>> input = jt.random([50,3,32,32]) #初始化一个随机张量
>>> pool = nn.Pool(2,2) #创建一个实现2x2最大池化操作的Pool对象
>>> output = pool(input) #对张量input进行池化操作
>>> print(output.shape)
[50,3,16,16,]
'''
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False, count_include_pad=True, op='''maximum'''):
assert dilation == None
assert return_indices == None or op == "maximum"
self.return_indices = return_indices
self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
self.op = op
stride = stride if stride else kernel_size
self.stride = stride if isinstance(stride, tuple) else (stride, stride)
self.padding = padding if isinstance(padding, tuple) else (padding, padding)
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad and padding != 0
def execute(self, x):
N,C,H,W = x.shape
if self.ceil_mode == False:
h = (H+self.padding[0]*2-self.kernel_size[0])//self.stride[0]+1
w = (W+self.padding[1]*2-self.kernel_size[1])//self.stride[1]+1
use_code_op = self.op in ['maximum', 'minimum']
# some second order avg_pool is require, so we don't use code op here
else:
h = (H+self.padding[0]*2-self.kernel_size[0] + self.stride[0] - 1)//self.stride[0]+1
w = (W+self.padding[1]*2-self.kernel_size[1] + self.stride[1] - 1)//self.stride[1]+1
use_code_op = self.op in ['maximum', 'minimum', 'mean']
if use_code_op and pool_use_code_op:
if self.op == 'mean':
if self.count_include_pad:
count = f"int count = {self.kernel_size[0]*self.kernel_size[1]};"
else:
count = "int count = (k2_ - k2) * (k3_ - k3);"
count += "float32 rcount = 1.0f / count;"
else:
count = ""
forward_body = f'''
int k3 = i3*{self.stride[1]}-{self.padding[1]};
int k2 = i2*{self.stride[0]}-{self.padding[0]};
int k3_ = min(k3 + {self.kernel_size[1]}, in0_shape3);
int k2_ = min(k2 + {self.kernel_size[0]}, in0_shape2);
k3 = max(0, k3);
k2 = max(0, k2);
{count}
'''
if not self.return_indices:
forward_body += f'''
@out(i0, i1, i2, i3) = @expand_op(init_{self.op}, @out_type);
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
@out(i0, i1, i2, i3) = @expand_op({self.op}, @out_type, @out(i0, i1, i2, i3), @out_type, @in0(i0, i1, p, q), @in0_type);
'''
else:
forward_body += f'''
auto out_value = @expand_op(init_{self.op}, @out_type);
int out_index = -1;
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
if (out_value < @in0(i0, i1, p, q)) {{
out_value = @in0(i0, i1, p, q);
out_index = p * in0_shape3 + q;
}}
@out(i0, i1, i2, i3) = out_value;
@out1(i0, i1, i2, i3) = out_index;
'''
backward_body = f'''
int k3 = i3*{self.stride[1]}-{self.padding[1]};
int k2 = i2*{self.stride[0]}-{self.padding[0]};
int k3_ = min(k3 + {self.kernel_size[1]}, in0_shape3);
int k2_ = min(k2 + {self.kernel_size[0]}, in0_shape2);
k3 = max(0, k3);
k2 = max(0, k2);
{count}
int bo=1;
for (int p = k2; p < k2_ && bo; ++p)
for (int q = k3; q < k3_ && bo; ++q) {{
{"atomicAdd(&@out(i0,i1,p,q), @dout(i0,i1,i2,i3)/count);"
if self.op == "mean" else
f"""if (@pout(i0,i1,i2,i3) == @in0(i0,i1,p,q)) {{
atomicAdd(&@out(i0,i1,p,q), @dout(i0,i1,i2,i3)),
bo=0;
}}"""}
}}
'''
if self.return_indices:
return_shapes = [[N,C,h,w]] * 2
return_dtypes = [x.dtype, 'int32']
else:
return_shapes = [N,C,h,w]
return_dtypes = x.dtype
out = jt.code(return_shapes, return_dtypes, [x],
cuda_header="""
#include <misc/cuda_limits.h>
""",
cuda_src=f'''
__global__ static void kernel1(@ARGS_DEF) {{
@PRECALC
int p3 = threadIdx.x;
int s3 = blockDim.x;
int p2 = threadIdx.y + blockIdx.x * blockDim.y;
int s2 = blockDim.y * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i3 = p3; i3 < out_shape3; i3 += s3)
for (int i2 = p2; i2 < out_shape2; i2 += s2)
{{ {forward_body} }}
}}
int tx = std::min(1024, out_shape3);
int ty = std::min(1024 / tx, out_shape2);
int bx = (out_shape2 - 1) / ty + 1;
int by = out_shape1;
int bz = out_shape0;
dim3 s1(bx, by, bz);
dim3 s2(tx, ty);
kernel1<<<s1, s2>>>(@ARGS);
''',
cuda_grad_src=[f'''
__global__ static void kernel3(@ARGS_DEF) {{
@PRECALC
int p3 = threadIdx.x;
int s3 = blockDim.x;
int p2 = threadIdx.y + blockIdx.x * blockDim.y;
int s2 = blockDim.y * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i3 = p3; i3 < pout_shape3; i3 += s3)
for (int i2 = p2; i2 < pout_shape2; i2 += s2)
{{ {backward_body} }}
}}
cudaMemsetAsync(out_p, 0, out->size);
int tx = std::min(1024, pout_shape3);
int ty = std::min(1024 / tx, pout_shape2);
int bx = (pout_shape2 - 1) / ty + 1;
int by = pout_shape1;
int bz = pout_shape0;
dim3 s1_(bx, by, bz);
dim3 s2_(tx, ty);
kernel3<<<s1_, s2_>>>(@ARGS);
'''],
cpu_header='',
cpu_src=f'''
using namespace std;
for (int i0=0; i0<out_shape0; i0++)
for (int i1=0; i1<out_shape1; i1++)
for (int i2=0; i2<out_shape2; i2++)
for (int i3=0; i3<out_shape3; i3++)
{{ {forward_body} }}
''',
cpu_grad_src = [f'''
using namespace std;
std::memset(out_p, 0, out->size);
#define atomicAdd(a,b) (*a) += b
for (int i0=0; i0<pout_shape0; i0++)
for (int i1=0; i1<pout_shape1; i1++)
for (int i2=0; i2<pout_shape2; i2++)
for (int i3=0; i3<pout_shape3; i3++)
{{ {backward_body} }}
'''])
return out
else:
# TODO: backward
xx = x.reindex([N,C,h,w,self.kernel_size[0],self.kernel_size[1]], [
"i0", # Nid
"i1", # Cid
f"i2*{self.stride[0]}-{self.padding[0]}+i4", # Hid
f"i3*{self.stride[1]}-{self.padding[1]}+i5", # Wid
])
return xx.reduce(self.op, [4,5])
def _triple(x):
if isinstance(x, tuple):
assert len(x) == 3
return x
else:
return (x,x,x)
[文档]
class Pool3d(Module):
'''
三维池化 (pooling) 类。对三维输入的深度, 高度和宽度进行池化计算。
参数:
- kernel_size (int, tuple): 池化核的大小。
- stride (int, tuple): 池化操作的步长。
- padding (int, tuple): 输入的填充大小。
- dilation (int, tuple): 内核之间元素的距离。
- return_indices (bool): 如果是 True, 则返回输出的最大值的索引。
- ceil_mode (bool): 如果是 True, 则在计算输出大小时会使用向上取整, 而不是默认的向下取整。
- count_include_pad (bool): 如果是 True, 在计算平均值时, 填充位置会被计入总数。
- op (str): 池化操作的类型。
属性:
- kernel_size (int, tuple): 池化核的大小。
- stride (int, tuple): 池化操作的步长。
- padding (int, tuple): 输入的填充大小。
- dilation (int, tuple): 内核之间元素的距离。
- return_indices (bool): 如果是 True, 则返回输出的最大值的索引。
- ceil_mode (bool): 如果是 True, 则在计算输出大小时会使用向上取整, 而不是默认的向下取整。
- count_include_pad (bool): 如果是 True, 在计算平均值时, 填充位置会被计入总数。
- op (str): 池化操作的类型。
形状:
- 输入: :math:`(N, C, D_{in}, H_{in}, W_{in})`。
- 输出: :math:`(N, C, D_{out}, H_{out}, W_{out})`。
代码示例:
>>> pool = nn.Pool3d(kernel_size=2, stride=2)
>>> input = jt.randn(20, 16, 50, 32, 32)
>>> output = pool(input)
>>> print(output.shape)
[20,16,25,16,16,]
'''
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False, count_include_pad=True, op='''maximum'''):
assert dilation == None
assert return_indices == None or op == "maximum"
self.return_indices = return_indices
self.kernel_size = _triple(kernel_size)
self.op = op
stride = stride if stride else kernel_size
self.stride = _triple(stride)
self.padding = _triple(padding)
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad and padding != 0
def execute(self, x):
N,C,D,H,W = x.shape
if self.ceil_mode == False:
d = (D+self.padding[0]*2-self.kernel_size[0])//self.stride[0]+1
h = (H+self.padding[1]*2-self.kernel_size[1])//self.stride[1]+1
w = (W+self.padding[2]*2-self.kernel_size[2])//self.stride[2]+1
use_code_op = self.op in ['maximum', 'minimum']
# some second order avg_pool is require, so we don't use code op here
else:
d = (D+self.padding[0]*2-self.kernel_size[0] + self.stride[0] - 1)//self.stride[0]+1
h = (H+self.padding[1]*2-self.kernel_size[1] + self.stride[1] - 1)//self.stride[1]+1
w = (W+self.padding[2]*2-self.kernel_size[2] + self.stride[2] - 1)//self.stride[2]+1
use_code_op = self.op in ['maximum', 'minimum', 'mean']
if use_code_op and pool_use_code_op:
if self.op == 'mean':
if self.count_include_pad:
count = f"int count = {self.kernel_size[0]*self.kernel_size[1]*self.kernel_size[2]};"
else:
count = "int count = (k2_ - k2) * (k3_ - k3) * (k4_ - k4);"
count += "float32 rcount = 1.0f / count;"
else:
count = ""
forward_body = f'''
int k4 = i4*{self.stride[2]}-{self.padding[2]};
int k3 = i3*{self.stride[1]}-{self.padding[1]};
int k2 = i2*{self.stride[0]}-{self.padding[0]};
int k4_ = min(k4 + {self.kernel_size[2]}, in0_shape4);
int k3_ = min(k3 + {self.kernel_size[1]}, in0_shape3);
int k2_ = min(k2 + {self.kernel_size[0]}, in0_shape2);
k4 = max(0, k4);
k3 = max(0, k3);
k2 = max(0, k2);
{count}
'''
if not self.return_indices:
forward_body += f'''
@out(i0, i1, i2, i3, i4) = @expand_op(init_{self.op}, @out_type);
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
for (int r = k4; r < k4_; ++r)
@out(i0, i1, i2, i3, i4) = @expand_op({self.op}, @out_type, @out(i0, i1, i2, i3, i4), @out_type, @in0(i0, i1, p, q, r), @in0_type);
'''
else:
forward_body += f'''
auto out_value = @expand_op(init_{self.op}, @out_type);
int out_index = -1;
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
for (int r = k4; q < k4_; ++r)
if (out_value < @in0(i0, i1, p, q, r)) {{
out_value = @in0(i0, i1, p, q, r);
out_index = p * in0_shape3 * in0_shape4 + q * in0_shape4 + r;
}}
@out(i0, i1, i2, i3, i4) = out_value;
@out1(i0, i1, i2, i3, i4) = out_index;
'''
backward_body = f'''
int k4 = i4*{self.stride[2]}-{self.padding[2]};
int k3 = i3*{self.stride[1]}-{self.padding[1]};
int k2 = i2*{self.stride[0]}-{self.padding[0]};
int k4_ = min(k4 + {self.kernel_size[2]}, in0_shape4);
int k3_ = min(k3 + {self.kernel_size[1]}, in0_shape3);
int k2_ = min(k2 + {self.kernel_size[0]}, in0_shape2);
k4 = max(0, k4);
k3 = max(0, k3);
k2 = max(0, k2);
{count}
int bo=1;
for (int p = k2; p < k2_ && bo; ++p)
for (int q = k3; q < k3_ && bo; ++q)
for (int r = k4; r < k4_ && bo; ++r) {{
{"atomicAdd(&@out(i0,i1,p,q,r), @dout(i0,i1,i2,i3,i4)/count);"
if self.op == "mean" else
f"""if (@pout(i0,i1,i2,i3,i4) == @in0(i0,i1,p,q,r)) {{
atomicAdd(&@out(i0,i1,p,q,r), @dout(i0,i1,i2,i3,i4)),
bo=0;
}}"""}
}}
'''
if self.return_indices:
return_shapes = [[N,C,d,h,w]] * 2
return_dtypes = [x.dtype, 'int32']
else:
return_shapes = [N,C,d,h,w]
return_dtypes = x.dtype
out = jt.code(return_shapes, return_dtypes, [x],
cuda_header="""
#include <misc/cuda_limits.h>
""",
cuda_src=f'''
__global__ static void kernel1(@ARGS_DEF) {{
@PRECALC
int p4 = threadIdx.x;
int s4 = blockDim.x;
int p3 = threadIdx.y;
int s3 = blockDim.y;
int p2 = threadIdx.z + blockIdx.x * blockDim.z;
int s2 = blockDim.z * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i4 = p4; i4 < out_shape4; i4 += s4)
for (int i3 = p3; i3 < out_shape3; i3 += s3)
for (int i2 = p2; i2 < out_shape2; i2 += s2)
{{ {forward_body} }}
}}
int tx = std::min(1024, out_shape4);
int ty = std::min(1024 / tx, out_shape3);
int tz = std::min(1024 / tx / ty, out_shape2);
int bx = (out_shape2 - 1) / tz + 1;
int by = out_shape1;
int bz = out_shape0;
dim3 s1(bx, by, bz);
dim3 s2(tx, ty, tz);
kernel1<<<s1, s2>>>(@ARGS);
''',
cuda_grad_src=[f'''
__global__ static void kernel3(@ARGS_DEF) {{
@PRECALC
int p4 = threadIdx.x;
int s4 = blockDim.x;
int p3 = threadIdx.y;
int s3 = blockDim.y;
int p2 = threadIdx.z + blockIdx.x * blockDim.z;
int s2 = blockDim.z * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i4 = p4; i4 < out_shape4; i4 += s4)
for (int i3 = p3; i3 < out_shape3; i3 += s3)
for (int i2 = p2; i2 < out_shape2; i2 += s2)
{{ {backward_body} }}
}}
cudaMemsetAsync(out_p, 0, out->size);
int tx = std::min(1024, pout_shape4);
int ty = std::min(1024 / tx, pout_shape3);
int tz = std::min(1024 / tx / ty, pout_shape2);
int bx = (pout_shape2 - 1) / tz + 1;
int by = pout_shape1;
int bz = pout_shape0;
dim3 s1(bx, by, bz);
dim3 s2(tx, ty, tz);
kernel3<<<s1, s2>>>(@ARGS);
'''],
cpu_header='',
cpu_src=f'''
using namespace std;
for (int i0=0; i0<out_shape0; i0++)
for (int i1=0; i1<out_shape1; i1++)
for (int i2=0; i2<out_shape2; i2++)
for (int i3=0; i3<out_shape3; i3++)
for (int i4=0; i4<out_shape4; i4++)
{{ {forward_body} }}
''',
cpu_grad_src = [f'''
using namespace std;
std::memset(out_p, 0, out->size);
#define atomicAdd(a,b) (*a) += b
for (int i0=0; i0<pout_shape0; i0++)
for (int i1=0; i1<pout_shape1; i1++)
for (int i2=0; i2<pout_shape2; i2++)
for (int i3=0; i3<pout_shape3; i3++)
for (int i4=0; i4<pout_shape4; i4++)
{{ {backward_body} }}
'''])
return out
else:
# TODO: backward
xx = x.reindex([N,C,d,h,w,self.kernel_size[0],self.kernel_size[1],self.kernel_size[2]], [
"i0", # Nid
"i1", # Cid
f"i2*{self.stride[0]}-{self.padding[0]}+i5", # Did
f"i3*{self.stride[1]}-{self.padding[1]}+i6", # Hid
f"i4*{self.stride[2]}-{self.padding[2]}+i7", # Hid
])
return xx.reduce(self.op, [5,6,7])
[文档]
class AdaptiveAvgPool2d(Module):
'''
对输入进行二维自适应平均池化处理的类。
参数:
- output_size (int, tuple, list) : 期望的输出形状。
形状:
- 输入: :math:`[N, C, H, W]`。
- 输出: :math:`[N, C, S_0, S_1]`, 此处 (S_0, S_1) = ``output_size`` 。
属性:
- output_size (int, tuple, list) : 期望的输出形状。
代码示例:
>>> m = nn.AdaptiveAvgPool2d((5, 7)) # target output size of 5x7
>>> input = jt.randn(1, 64, 8, 9)
>>> output = m(input)
>>> m = nn.AdaptiveAvgPool2d(7) # target output size of 7x7 (square)
>>> input = jt.randn(1, 64, 10, 9)
>>> output = m(input)
>>> m = nn.AdaptiveAvgPool2d((None, 7)) # target output size of 10x7
>>> input = jt.randn(1, 64, 10, 9)
>>> output = m(input)
'''
def __init__(self, output_size):
self.output_size = output_size
def execute(self, x):
if isinstance(self.output_size, int):
oh = self.output_size
ow = self.output_size
elif isinstance(self.output_size, tuple) or isinstance(self.output_size, list):
oh = x.shape[2] if self.output_size[0] is None else self.output_size[0]
ow = x.shape[3] if self.output_size[1] is None else self.output_size[1]
else:
raise TypeError(f"AdaptiveAvgPool2d only support int, tuple or list input. Not support {type(self.output_size)} yet.")
if oh == 1 and ow == 1:
return x.reduce("mean", [2,3], keepdims=True)
N,C,H,W = x.shape
self.sh = math.floor(H / oh)
self.sw = math.floor(W / ow)
self.ksh = H - (oh - 1) * self.sh
self.ksw = W - (ow - 1) * self.sw
h = (H-self.ksh)//self.sh+1
w = (W-self.ksw)//self.sw+1
xx = x.reindex([N,C,h,w,self.ksh,self.ksw], [
"i0", # Nid
"i1", # Cid
f"i2*{self.sh}+i4", # Hid
f"i3*{self.sw}+i5", # Wid
])
return xx.reduce("mean", [4,5])
[文档]
class AdaptiveMaxPool2d(Module):
'''
对输入进行二维自适应最大池化处理的类。
参数:
- output_size (int, tuple, list) : 期望的输出形状。
- return_indices(bool, optional): 是否返回最大值的索引。默认值: False。
形状:
- 输入 : :math:`[N, C, H, W]`
- 输出 : :math:`[N, C, S_0, S_1]`, 此处 (S_0, S_1) = ``output_size`` 。
属性:
- output_size (int, tuple, list) : 期望的输出形状。
- return_indices (bool) : 是否返回最大值的索引。
代码示例:
>>> # target output size of 5x7
>>> m = nn.AdaptiveMaxPool2d((5, 7))
>>> input = jt.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveMaxPool2d(7)
>>> input = jt.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = jt.randn(1, 64, 10, 9)
>>> output = m(input)
'''
def __init__(self, output_size, return_indices=False):
self.output_size = output_size
self.return_indices = return_indices
def execute(self, x):
if isinstance(self.output_size, int):
oh = self.output_size
ow = self.output_size
elif isinstance(self.output_size, tuple) or isinstance(self.output_size, list):
oh = x.shape[2] if self.output_size[0] is None else self.output_size[0]
ow = x.shape[3] if self.output_size[1] is None else self.output_size[1]
else:
raise TypeError(f"AdaptiveMaxPool2d only support int, tuple or list input. Not support {type(self.output_size)} yet.")
if oh == 1 and ow == 1:
return x.reduce("maximum", [2,3], keepdims=True)
N,C,H,W = x.shape
self.sh = math.floor(H / oh)
self.sw = math.floor(W / ow)
self.ksh = H - (oh - 1) * self.sh
self.ksw = W - (ow - 1) * self.sw
if self.return_indices:
return MaxPool2d(
kernel_size=(self.ksh, self.ksw),
stride=(self.sh, self.sw), return_indices=True)(x)
h = (H-self.ksh)//self.sh+1
w = (W-self.ksw)//self.sw+1
xx = x.reindex([N,C,h,w,self.ksh,self.ksw], [
"i0", # Nid
"i1", # Cid
f"i2*{self.sh}+i4", # Hid
f"i3*{self.sw}+i5", # Wid
])
return xx.reduce("maximum", [4,5])
[文档]
class AdaptiveAvgPool3d(Module):
'''
对输入进行三维自适应平均池化处理的类。
参数:
- output_size (int, tuple, list) : 期望的输出形状。
形状:
- 输入: :math:`[N, C, D, H, W]`
- 输出: :math:`[N, C, S_0, S_1, S_2]`, 此处 (S_0, S_1, S_2) = ``output_size`` 。
属性:
- output_size (int, tuple, list) : 期望的输出形状。
代码示例:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
>>> input = jt.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveAvgPool3d(7)
>>> input = jt.randn(1, 64, 10, 9, 8)
>>> output = m(input)
'''
def __init__(self, output_size):
self.output_size = _triple(output_size)
def execute(self, x):
od, oh, ow = self.output_size
if od == 1 and oh == 1 and ow == 1:
return x.reduce("mean", [2,3,4], keepdims=True)
N,C,D,H,W = x.shape
self.sd = math.floor(D / od)
self.sh = math.floor(H / oh)
self.sw = math.floor(W / ow)
self.ksd = D - (od - 1) * self.sd
self.ksh = H - (oh - 1) * self.sh
self.ksw = W - (ow - 1) * self.sw
d = (D-self.ksd)//self.sd+1
h = (H-self.ksh)//self.sh+1
w = (W-self.ksw)//self.sw+1
xx = x.reindex([N,C,d,h,w,self.ksd,self.ksh,self.ksw], [
"i0", # Nid
"i1", # Cid
f"i2*{self.sd}+i5", # Did
f"i3*{self.sh}+i6", # Hid
f"i4*{self.sw}+i7", # Wid
])
return xx.reduce("mean", [5,6,7])
class AdaptiveMaxPool3d(Module):
'''
对输入进行三维自适应平均池化处理的类。
参数:
- output_size (int, tuple, list) : 期望的输出形状。
- return_indices (bool): 若为True, 则将最大值索引值和输出一起返回。
形状:
- 输入: :math:`[N, C, D, H, W]`
- 输出: :math:`[N, C, S_0, S_1, S_2]`, 此处 (S_0, S_1, S_2) = ``output_size`` 。
属性:
- output_size (int, tuple, list) : 期望的输出形状。
- return_indices (bool) : 若为True, 则将最大值索引值和输出一起返回。
代码示例:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
>>> input = jt.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveMaxPool3d(7)
>>> input = jt.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
'''
def __init__(self, output_size, return_indices=False):
self.output_size = _triple(output_size)
self.return_indices = return_indices
def execute(self, x):
od, oh, ow = self.output_size
if od == 1 and oh == 1 and ow == 1 and not self.return_indices:
return x.reduce("maximum", [2,3,4], keepdims=True)
N,C,D,H,W = x.shape
self.sd = math.floor(D / od)
self.sh = math.floor(H / oh)
self.sw = math.floor(W / ow)
self.ksd = D - (od - 1) * self.sd
self.ksh = H - (oh - 1) * self.sh
self.ksw = W - (ow - 1) * self.sw
if self.return_indices:
return MaxPool3d(
kernel_size=(self.ksd, self.ksh, self.ksw),
stride=(self.sd, self.sh, self.sw), return_indices=True)(x)
d = (D-self.ksd)//self.sd+1
h = (H-self.ksh)//self.sh+1
w = (W-self.ksw)//self.sw+1
xx = x.reindex([N,C,d,h,w,self.ksd,self.ksh,self.ksw], [
"i0", # Nid
"i1", # Cid
f"i2*{self.sd}+i5", # Did
f"i3*{self.sh}+i6", # Hid
f"i4*{self.sw}+i7", # Wid
])
return xx.reduce("maximun", [5,6,7])
[文档]
def pool(x, kernel_size, op, padding=0, stride=None):
'''
对输入的张量进行池化操作。此函数将对输入应用指定的池化操作, 池化的方式由参数 ``op`` 指定。
参数:
- x (Var): 输入的张量。
- kernel_size (int, tuple of int): 池化窗口的大小。
- op (str): 池化方式, :math:`'max'` 表示最大值池化, :math:`'avg'` 表示平均池化。
- padding (int, tuple of int, optional): 在输入的张量的各边填充0的宽度, 默认值: 0。
- stride (int, tuple of int, optional): 池化窗口移动的步长。默认值: None。
返回值:
池化后的张量。
代码示例:
>>> import jittor as jt
>>> x = jt.ones((1,3,4,4))
>>> y = pool(x, (2, 2), 'max')
>>> y.shape
(1, 3, 2, 2)
'''
return Pool(kernel_size, stride, padding, op=op)(x)
pool2d = pool
[文档]
def pool3d(x, kernel_size, op, padding=0, stride=None):
'''
对输入的张量进行三维池化操作。此函数将对输入应用指定的池化操作, 池化的方式由参数 ``op`` 指定。
参数:
- x (Var or jt.Module): 输入的3维张量。
- kernel_size (int or tuple of int): 池化窗口的尺寸。
- op (str): 池化操作的类型。可以是 :math:`\'''max\
'''` 表示最大值池化。
- padding (int or tuple of int, optional): 输入的每一维在每个方向上的补0层数。默认值: 0。
- stride (int or tuple of int, optional): 池化窗口的步长。默认值: 等于 :math:`kernel` _ :math:`size` 。
返回值:
池化后的输出。
代码示例:
>>> import jittor as jt
>>> x = jt.random([2,3,10,10,10])
>>> y = jt.nn.pool3d(x, 2, 'max')
>>> y.shape
[2,3,5,5,5]
'''
return Pool3d(kernel_size, stride, padding, op=op)(x)
[文档]
class AvgPool2d(Module):
'''
二维平均池化 (pooling) 类。对二维输入的高度和宽度进行平均池化计算。
参数:
- kernel_size (int or tuple): 池化核的大小。
- stride (int or tuple, optional): 池化操作的步长。
- padding (int or tuple, optional): 在输入数据的高度和宽度上各边缘处添加的零填充的大小。
- ceil_mode (bool, optional): 是否使用 ``ceil`` 函数计算输出的高度和宽度。默认值: False。
- count_include_pad (bool, optional): 是否在计算平均池化时包含零填充的格子。默认值: True。
形状:
- 输入: :math:`(N, C, H_{in}, W_{in})`。
- 输出: :math:`(N, C, H_{out}, W_{out})`, 其中
.. math::
& \\qquad H_{\\text {out }}=\\left\\lfloor\\frac{H_{\\text {in }}+2 \\times \\text { padding }[0]-\\text { kernel_size }[0]}{\\text { stride }[0]}+1\\right\\rfloor \\\\
& \\qquad W_{\\text {out }}=\\left\\lfloor\\frac{W_{\\text {in }}+2 \\times \\text { padding }[1]-\\text { kernel_size }[1]}{\\text { stride }[1]}+1\\right\\rfloor
属性:
- layer (jt.Module): 用于执行池化操作的模块。
代码示例:
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
>>> input = jt.randn(20, 16, 50, 32)
>>> output = m(input)
'''
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
self.layer = Pool(kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, op="mean")
def execute(self, x):
return self.layer(x)
[文档]
class AvgPool3d(Module):
'''
三维平均池化 (pooling) 类。对三维输入的深度, 高度和宽度进行平均池化计算。
参数:
- kernel_size (int or tuple): 池化核的大小。
- stride (int or tuple, optional): 池化操作的步长。
- padding (int or tuple, optional): 在输入数据的高度和宽度上各边缘处添加的零填充的大小。
- ceil_mode (bool, optional): 是否使用 ``ceil`` 函数计算输出的高度和宽度。默认值: False。
- count_include_pad (bool, optional): 是否在计算平均池化时包含零填充的格子。默认值: True。
形状:
- 输入: :math:`(N, C, D_{in}, H_{in}, W_{in})`。
- 输出: :math:`(N, C, D_{out}, H_{out}, W_{out})`, 其中
.. math::
& \\qquad D_{\\text {out }}=\\left\\lfloor\\frac{D_{\\text {in }}+2 \\times \\text { padding }[0]-\\text { kernel_size }[0]}{\\text { stride }[0]}+1\\right\\rfloor \\\\
& \\qquad H_{\\text {out }}=\\left\\lfloor\\frac{H_{\\text {in }}+2 \\times \\text { padding }[1]-\\text { kernel_size }[1]}{\\text { stride }[1]}+1\\right\\rfloor \\\\
& \\qquad W_{\\text {out }}=\\left\\lfloor\\frac{W_{\\text {in }}+2 \\times \\text { padding }[2]-\\text { kernel_size }[2]}{\\text { stride }[2]}+1\\right\\rfloor
属性:
- layer (jt.Module): 用于执行池化操作的模块。
代码示例:
>>> import jittor as jt
>>> from jittor import nn
>>> m = nn.AvgPool3d(2, stride=1, padding=1, count_include_pad=False)
>>> input = jt.random([1, 6, 2, 2, 2])
>>> output = m(input)
>>> print(output.shape)
[1,6,3,3,3,]
'''
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
self.layer = Pool3d(kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, op="mean")
def execute(self, x):
return self.layer(x)
[文档]
def avg_pool2d(x, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
'''
对输入张量进行2D平均池化操作。
参数:
- x(Var): 输入张量, 形状为 :math:`(N, C, H_{in}, W_{in})`。
- kernel_size (int or tuple): 池化核的大小。
- stride(int or tuple, optional): 步长。
- padding(int, optional): 在输入张量的所有边界上隐式零填充。默认值: 0。
- ceil_mode(bool, optional): 当设置为True时, 会使用 :math:`ceil` 函数计算输出形状。默认值: False。
- count_include_pad(bool, optional): 当设置为True时, 将在计算平均值时包含零填充。默认值: True。
返回值:
- 进行2D平均池化后的张量。
代码示例:
>>> import jittor as jt
>>> x = jt.random((1, 1, 4, 4))
>>> jt.nn.avg_pool2d(x, kernel_size=2, stride=2)
'''
return AvgPool2d(kernel_size, stride, padding, ceil_mode, count_include_pad)(x)
[文档]
class MaxPool2d(Module):
'''
二维最大池化 (pooling) 类。对二维输入的高度和宽度进行最大池化计算。
参数:
- kernel_size (int or tuple): 池化核的大小。
- stride (int or tuple, optional): 池化操作的步长。
- padding (int or tuple, optional): 在输入数据的高度和宽度上各边缘处添加的零填充的大小。
- dilation (int or tuple, optional): 控制卷积核中元素的间距。
- return_indices (bool, optional): 如果为True, 则返回最大值的位置索引。
- ceil_mode (bool, optional): 是否使用 ``ceil`` 函数计算输出的高度和宽度。默认值: False。
形状:
- Input: :math:`(N,C,H_{in},W_{in})`
- Output: :math:`(N,C,H_{out},W_{out})`, 其中
.. math::
& \\qquad H_{\\text {out }}=\\left\\lfloor\\frac{H_{\\text {in }}+2 \\times \\text { padding }[0]-\\text { dilation }[0] \\times(\\text { kernel_size }[0]-1)-1}{\\text { stride }[0]}+1\\right\\rfloor \\\\
& \\qquad W_{\\text {out }}=\\left\\lfloor\\frac{W_{\\text {in }}+2 \\times \\text { padding }[1]-\\text { dilation }[1] \\times(\\text { kernel_size }[1]-1)-1}{\\text { stride }[1]}+1\\right\\rfloor
属性:
- layer (jt.Module): 用于执行池化操作的模块。
代码示例:
>>> import jittor as jt
>>> from jittor import nn
>>> m = nn.MaxPool2d(2, stride=1, padding=1)
>>> input = jt.random([1, 3, 5, 5])
>>> output = m(input)
>>> print(output.shape)
[1,3,6,6,]
'''
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False):
self._layer = Pool(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, return_indices=return_indices, ceil_mode=ceil_mode, op="maximum")
def execute(self, x):
return self._layer(x)
[文档]
class MaxPool3d(Module):
'''
三维最大池化 (pooling) 类。对三维输入的深度、高度和宽度进行最大池化计算。
参数:
- kernel_size (int or tuple): 池化核的大小。
- stride (int or tuple, optional): 池化操作的步长。
- padding (int or tuple, optional): 在输入数据的高度和宽度上各边缘处添加的零填充的大小。
- dilation (int or tuple, optional): 控制卷积核中元素的间距。
- return_indices (bool, optional): 如果为True, 则返回最大值的位置索引。
- ceil_mode (bool, optional): 是否使用 ``ceil`` 函数计算输出的高度和宽度。默认值: False。
形状:
- Input: :math:`(N,C,D_{in},H_{in},W_{in})`
- Output: :math:`(N,C,D_{out},H_{out},W_{out})`, 其中
.. math ::
& \\qquad D_{\\text {out }}=\\left\\lfloor\\frac{D_{\\text {in }}+2 \\times \\text { padding }[0]-\\text { dilation }[0] \\times(\\text { kernel_size }[0]-1)-1}{\\text { stride }[0]}+1\\right\\rfloor \\\\
& \\qquad H_{\\text {out }}=\\left\\lfloor\\frac{H_{\\text {in }}+2 \\times \\text { padding }[1]-\\text { dilation }[1] \\times(\\text { kernel_size }[1]-1)-1}{\\text { stride }[1]}+1\\right\\rfloor \\\\
& \\qquad W_{\\text {out }}=\\left\\lfloor\\frac{W_{\\text {in }}+2 \\times \\text { padding }[2]-\\text { dilation }[2] \\times(\\text { kernel_size }[2]-1)-1}{\\text { stride }[2]}+1\\right\\rfloor
属性:
- layer (jt.Module): 用于执行池化操作的模块。
代码示例:
>>> import jittor as jt
>>> from jittor import nn
>>> m = nn.MaxPool3d(2, stride=1, padding=1)
>>> input = jt.random([1, 3, 5, 5, 5])
>>> output = m(input)
>>> print(output.shape)
[1,3,6,6,6,]
'''
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False):
self._layer = Pool3d(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, return_indices=return_indices, ceil_mode=ceil_mode, op="maximum")
def execute(self, x):
return self._layer(x)
[文档]
def max_pool2d(x, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False):
'''
对输入张量进行2D最大池化操作。
参数:
- x(Var): 输入张量, 形状为 :math:`(N, C, H_{in}, W_{in})`。
- kernel_size (int or tuple): 池化核的大小。
- stride(int or tuple, optional): 步长。
- padding(int, optional): 在输入张量的所有边界上隐式零填充。默认值: 0。
- ceil_mode(bool, optional): 当设置为True时, 会使用 :math:`ceil` 函数计算输出形状。默认值: False。
- count_include_pad(bool, optional): 当设置为True时, 将在计算平均值时包含零填充。默认值: True。
返回:
- 进行2D最大池化后的张量。
代码示例:
>>> import jittor as jt
>>> x = jt.random((1, 1, 4, 4))
>>> jt.nn.max_pool2d(x, kernel_size=2, stride=2)
'''
return MaxPool2d(kernel_size, stride, padding, dilation, return_indices, ceil_mode)(x)
[文档]
def max_pool3d(x, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False):
'''
对输入张量进行3D最大池化操作。
参数:
- x(Var): 输入张量, 形状为 :math:`(N, C, D_{in}, H_{in}, W_{in})`。
- kernel_size (int or tuple): 池化核的大小。
- stride(int or tuple, optional): 步长。
- padding(int, optional): 在输入张量的所有边界上隐式零填充。默认值: 0。
- ceil_mode(bool, optional): 当设置为True时, 会使用 :math:`ceil` 函数计算输出形状。默认值: False。
- count_include_pad(bool, optional): 当设置为True时, 将在计算平均值时包含零填充。默认值: True。
返回值:
3D最大池化操作后的输出张量(Var)
代码示例:
>>> import torch
>>> x = torch.randn(1, 1, 4, 4, 4) # 输入张量的形状为 [batch_size, channels, depth, height, width]
>>> torch.nn.functional.max_pool3d(x, kernel_size=3, stride=2, padding=1)
'''
return MaxPool3d(kernel_size, stride, padding, dilation, return_indices, ceil_mode)(x)
[文档]
class MaxUnpool2d(Module):
'''
``MaxPool2d`` 的逆运算。
参数:
- kernel_size(int, Tuple[int, int]]): 窗口大小。
- stride(int, Tuple[int, int], optional, 默认为None): 步长。
形状:
- 输入: :math:`(N,C,H_{in},W_{in})` 。
- 输出: :math:`(N,C,H_{out},W_{out})`, 其中后两维被 output_size 指定。若 output_size 为 ``None``, 则
.. math ::
& \\qquad H_{\\text {out }}=\\left\\lfloor\\frac{H_{\\text {in }}+2 \\times \\text { padding }[0]-\\text { kernel_size }[0]}{\\text { stride }[0]}+1\\right\\rfloor \\\\
& \\qquad W_{\\text {out }}=\\left\\lfloor\\frac{W_{\\text {in }}+2 \\times \\text { padding }[1]-\\text { kernel_size }[1]}{\\text { stride }[1]}+1\\right\\rfloor
属性:
- kernel_size (int, tuple): 窗口大小。
- stride (int, tuple, optional): 步长。
代码示例:
>>> import jittor as jt
>>> from jittor import nn
>>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool2d(2, stride=2)
>>> input = jt.array([[[[1., 2, 3, 4, 0],
... [5, 6, 7, 8, 0],
... [9, 10, 11, 12, 0],
... [13, 14, 15, 16, 0],
... [0, 0, 0, 0, 0]]]])
>>> output1, indices = pool(input)
>>> output2= unpool(output1, indices, output_size=input.shape)
>>> print(output2)
jt.Var([[[[ 0. 0. 0. 0. 0.]
[ 0. 6. 0. 8. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 14. 0. 16. 0.]
[ 0. 0. 0. 0. 0.]]]], dtype=float32)
'''
def __init__(self, kernel_size, stride=None):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if stride is None: stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
def execute(self, x, id, output_size=None):
b, c, ph, pw = x.shape
kh, kw = self.kernel_size
sh, sw = self.stride
if output_size:
h, w = output_size[-2:]
else:
h, w = ph * sh, pw * sw
if self.stride == self.kernel_size:
x = x.reindex(shape=[b, c, h, w],
indexes=['i0', 'i1', f'i2/{kh}', f'i3/{kw}'],
extras=[id],
overflow_conditions=[
f'(i2*yshape3+i3) != @e0(i0,i1,i2/{kh},i3/{kw})'],
overflow_value=0)
else:
x = x.reindex_reduce(
op="add",
shape=[b, c, h, w],
indexes=['i0', 'i1',
f'@e0(i0,i1,i2,i3)/xshape3',
f'@e0(i0,i1,i2,i3)%xshape3'],
extras=[id],
)
return x
[文档]
class MaxUnpool3d(Module):
'''
``MaxPool3d`` 的逆运算。
参数:
- kernel_size(int, Tuple[int, int, int]]): 窗口大小。 如果是一个整数, 大小为(kernel_size, kernel_size, kernel_size)。
- stride(int, Tuple[int, int, int], optional, 默认为None): 步长。
形状:
- 输入: :math:`(N,C,D_{in},H_{in},W_{in})` 。
- 输出: :math:`(N,C,D_{in},H_{out},W_{out})`, 其中后三维被 output_size 指定。若 output_size 为 ``None``, 则
.. math ::
& \\qquad D_{\\text {out }}=\\left\\lfloor\\frac{D_{\\text {in }}+2 \\times \\text { padding }[0]-\\text { kernel_size }[0]}{\\text { stride }[0]}+1\\right\\rfloor \\\\
& \\qquad H_{\\text {out }}=\\left\\lfloor\\frac{H_{\\text {in }}+2 \\times \\text { padding }[1]-\\text { kernel_size }[1]}{\\text { stride }[1]}+1\\right\\rfloor \\\\
& \\qquad W_{\\text {out }}=\\left\\lfloor\\frac{W_{\\text {in }}+2 \\times \\text { padding }[2]-\\text { kernel_size }[2]}{\\text { stride }[2]}+1\\right\\rfloor
属性:
- kernel_size (int, tuple): 窗口大小。
- stride (int, tuple, optional): 步长。
代码示例:
>>> import jittor as jt
>>> from jittor import nn
>>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool3d(3, stride=2)
>>> output, indices = pool(jt.randn(20, 16, 51, 33, 15))
>>> unpooled_output = unpool(output, indices)
>>> print(unpooled_output.size())
[20,16,50,32,14,]
'''
def __init__(self, kernel_size, stride=None):
if stride is None: stride = kernel_size
kernel_size = _triple(kernel_size)
stride = _triple(stride)
self.kernel_size = kernel_size
self.stride = stride
def execute(self, x, id, output_size=None):
b, c, pd, ph, pw = x.shape
kd, kh, kw = self.kernel_size
sd, sh, sw = self.stride
if output_size:
d, h, w = output_size[-3:]
else:
d, h, w = pd * sd, ph * sh, pw * sw
if self.stride == self.kernel_size:
x = x.reindex(shape=[b, c, d, h, w],
indexes=['i0', 'i1', f'i2/{kd}', f'i3/{kh}', f'i4/{kw}'],
extras=[id],
overflow_conditions=[
f'(i2*yshape3*yshape4+i3*yshape4+i4) != @e0(i0,i1,i2/{kd},i3/{kh},i4/{kw})'],
overflow_value=0)
else:
x = x.reindex_reduce(
op="add",
shape=[b, c, d, h, w],
indexes=['i0', 'i1',
f'@e0(i0,i1,i2,i3,i4)/(xshape4*xshape3)',
f'@e0(i0,i1,i2,i3,i4)/xshape4%xshape3',
f'@e0(i0,i1,i2,i3,i4)%xshape4'],
extras=[id],
)
return x