[{"data":1,"prerenderedAt":267},["ShallowReactive",2],{"content-query-HhXuWqbdzZ":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":261,"_id":262,"_source":263,"_file":264,"_stem":265,"_extension":266},"/technology-blogs/zh/552","zh",false,"","开发者分享｜手写算子没那么难，教你用MindSpore实现自适应平均池化算子！","了解基于MindSpore实现AdaptiveAvgPool2d替代方案！","2021-05-14","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/05/14/68dcd768cb534592a7bce1d7e78cfa39.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":258},"root",[17,25,34,39,53,58,65,70,75,80,85,95,100,105,110,117,122,135,143,148,156,161,169,173,180,185,192,197,202,207,212,220,225,236,241,246,251],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"开发者分享手写算子没那么难教你用mindspore实现自适应平均池化算子",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":18,"tag":30,"props":31,"children":33},"img",{"alt":7,"src":32},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/05/14/28c0c60456bb461d983da3d1323b46b7.gif",[],{"type":18,"tag":26,"props":35,"children":36},{},[37],{"type":24,"value":38},"本文来源于：[MindSpore 论坛](MindSpore 论坛)",{"type":18,"tag":26,"props":40,"children":41},{},[42,44],{"type":24,"value":43},"作者：",{"type":18,"tag":45,"props":46,"children":50},"a",{"href":47,"rel":48},"https://bbs.huaweicloud.com/forum/home.php?mod=space&uid=302496&do=thread",[49],"nofollow",[51],{"type":24,"value":52},"luxuff",{"type":18,"tag":26,"props":54,"children":55},{},[56],{"type":24,"value":57},"最近用 MindSpore 复现 Fast SCNN 网络的时候，里面用到了一个自适应平均池化算子 “nn.AdaptiveAvgPool2d”，但是 MindSpore 目前的版本还没有提供对应的算子供开发者使用，所以笔者查阅了一部分资料，了解了其计算原理。实验结果表明，下述方法可以替代 AdaptiveAvgPool2d 算子，可以此为基础迁移至其他 AI 框架中。",{"type":18,"tag":26,"props":59,"children":60},{},[61],{"type":18,"tag":30,"props":62,"children":64},{"alt":7,"src":63},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/05/14/e1d3ce469a9e45e399486a9fdd453b4a.png",[],{"type":18,"tag":26,"props":66,"children":67},{},[68],{"type":24,"value":69},"AdaptiveAvgPool2d",{"type":18,"tag":26,"props":71,"children":72},{},[73],{"type":24,"value":74},"AdaptiveAvgPool2d 的功能简单来讲就是，开发者只需要传入“待处理数据” 和 “目标大小”，该算子会自动计算池化操作时的kernel_size和stride等数据，使得输出结果的shape为“目标大小”。",{"type":18,"tag":26,"props":76,"children":77},{},[78],{"type":24,"value":79},"但其实上述理解是不完全正确的，甚至是错误的。基于此理解可以稍微理解其功能，但是如果想对其进行复现，则会完全陷入误区。",{"type":18,"tag":26,"props":81,"children":82},{},[83],{"type":24,"value":84},"目前普遍存在的一种复现方式是，既然我们知道普通池化操作的计算过程是，已知池化层的kernel_size、padding、stride 以及输入张量的大小input_size，则输出张量大小 output_size 为：",{"type":18,"tag":86,"props":87,"children":89},"pre",{"code":88},"output_size =（input_size+2*padding-kernel_size）/stride +1\n",[90],{"type":18,"tag":91,"props":92,"children":93},"code",{"__ignoreMap":7},[94],{"type":24,"value":88},{"type":18,"tag":26,"props":96,"children":97},{},[98],{"type":24,"value":99},"（此处简化了计算，如果输入张量的 column 值和 row 值不等，则分别计算）",{"type":18,"tag":26,"props":101,"children":102},{},[103],{"type":24,"value":104},"那么我们就想办法通过 input_size 和 output_size 反推出 kernel_size、stride 等数值就好了，反向回去计算一定可以得到我们需要的数据。",{"type":18,"tag":26,"props":106,"children":107},{},[108],{"type":24,"value":109},"但其实这种方法仅合其形，不对其意。我们只能使得输出张量是我们需要的“目标大小”，其内部数值却和 “nn.AdaptiveAvgPool2d” 的计算结果有不小差异，究其原因，在于出发点的错误。",{"type":18,"tag":26,"props":111,"children":112},{},[113],{"type":18,"tag":30,"props":114,"children":116},{"alt":7,"src":115},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/05/14/d4a07e13db8749b6bfac4275ff57a839.png",[],{"type":18,"tag":26,"props":118,"children":119},{},[120],{"type":24,"value":121},"AdaptiveAvgPool2d 计算原理",{"type":18,"tag":26,"props":123,"children":124},{},[125,127,133],{"type":24,"value":126},"笔者翻阅了不少资料，最终在",{"type":18,"tag":45,"props":128,"children":131},{"href":129,"rel":130},"https://discuss.pytorch.org/t/what-is-adaptiveavgpool2d/26897",[49],[132],{"type":24,"value":129},{"type":24,"value":134},"找到了需要的内容。其中 Thomas 对AdaptiveAvgPool2d 做了相当准确的解释，我将其分享的代码改进为了 NCHW 的模式：",{"type":18,"tag":86,"props":136,"children":138},{"code":137},"import torch.nn as nn\nimport torch\ndef torch_pool(inputs, target_size):\n    #NCHW\n    H = target_size[0]\n    W = target_size[1]\n    s_p1 = (torch.arange(W, dtype=torch.float32) * (inputs.size(-1) / W)).long()\n    e_p1 = ((torch.arange(W, dtype=torch.float32)+1) * (inputs.size(-1) / W)).ceil().long()\n    s_p2 = (torch.arange(H, dtype=torch.float32) * (inputs.size(-2) / H)).long()\n    e_p2 = ((torch.arange(H, dtype=torch.float32)+1) * (inputs.size(-2) / H)).ceil().long()\n    pooled2 = []\n    for i_H in range(H):\n      pooled = []\n      for i_W in range(W):\n          res = torch.mean(inputs[:, :, s_p2[i_H]:e_p2[i_H],s_p1[i_W]:e_p1[i_W]], dim=(-2,-1), keepdim=True)\n          pooled.append(res)\n      pooled = torch.cat(pooled, -1)\n      pooled2.append(pooled)\n    pooled2 = torch.cat(pooled2,-2)\n    return pooled2\nif __name__ == '__main__':\n    data = [[[[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]]\n              ,\n            [[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]]\n              ]]\n    inputs = torch.tensor(data,dtype=torch.float32)\n    print(inputs)\n    print(inputs.size())\n    print(\"*********************************\")\n    avgpool1 = torch_pool(inputs, (1,3))\n    avgpool2 = torch_pool(inputs, (2,3))\n    avgpool3 = torch_pool(inputs, (3,3))\n    avgpool6 = torch_pool(inputs, (6,5))\n    print(avgpool1)\n    print(\"*********************************\")\n    print(avgpool2)\n    print(\"*********************************\")\n    print(avgpool3)\n    print(\"*********************************\")\n    print(avgpool6)\n",[139],{"type":18,"tag":91,"props":140,"children":141},{"__ignoreMap":7},[142],{"type":24,"value":137},{"type":18,"tag":26,"props":144,"children":145},{},[146],{"type":24,"value":147},"计算结果：",{"type":18,"tag":86,"props":149,"children":151},{"code":150},"tensor([[[[2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.]],\n\n         [[2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.],\n          [2., 3., 4., 5., 6., 9., 7., 8.]]]])\ntorch.Size([1, 2, 6, 8])\n*********************************\ntensor([[[[3., 6., 8.]],\n\n         [[3., 6., 8.]]]])\n*********************************\ntensor([[[[3., 6., 8.],\n          [3., 6., 8.]],\n\n         [[3., 6., 8.],\n          [3., 6., 8.]]]])\n*********************************\ntensor([[[[3., 6., 8.],\n          [3., 6., 8.],\n          [3., 6., 8.]],\n\n         [[3., 6., 8.],\n          [3., 6., 8.],\n          [3., 6., 8.]]]])\n*********************************\ntensor([[[[2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000]],\n\n         [[2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000],\n          [2.5000, 4.0000, 5.5000, 7.3333, 7.5000]]]])\n",[152],{"type":18,"tag":91,"props":153,"children":154},{"__ignoreMap":7},[155],{"type":24,"value":150},{"type":18,"tag":26,"props":157,"children":158},{},[159],{"type":24,"value":160},"与 pytorch 的 nn.AdaptiveAvgPool2d 算子进行对比验证：",{"type":18,"tag":86,"props":162,"children":164},{"code":163},"import torch.nn as nn\nimport torch\n\nif __name__ == '__main__':\n\n    data = [[[[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]]\n              ,\n            [[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]\n              ,[2,3,4,5,6,9,7,8]]\n              ]]\n\n    x = torch.tensor(data,dtype=torch.float32)\n    print(x)\n    print(x.size())\n    print(\"*********************************\")\n\n    avgpool1 = nn.AdaptiveAvgPool2d((1,3))\n    avgpool2 = nn.AdaptiveAvgPool2d((2,3))\n    avgpool3 = nn.AdaptiveAvgPool2d((3,3))\n    avgpool6 = nn.AdaptiveAvgPool2d((6,5))\n    print(avgpool1(x))\n    print(\"*********************************\")\n    print(avgpool2(x))\n    print(\"*********************************\")\n    print(avgpool3(x))\n    print(\"*********************************\")\n    print(avgpool6(x))\n",[165],{"type":18,"tag":91,"props":166,"children":167},{"__ignoreMap":7},[168],{"type":24,"value":163},{"type":18,"tag":26,"props":170,"children":171},{},[172],{"type":24,"value":147},{"type":18,"tag":86,"props":174,"children":175},{"code":150},[176],{"type":18,"tag":91,"props":177,"children":178},{"__ignoreMap":7},[179],{"type":24,"value":150},{"type":18,"tag":26,"props":181,"children":182},{},[183],{"type":24,"value":184},"可以发现，无论是输出 shape 还是 Tensor 内部数值，二者都是一样的。而且无论改变 NCHW 的哪一部分内容，扩充 N、C、H、W 的任意一维，最后的计算结果都是保持一致的。",{"type":18,"tag":26,"props":186,"children":187},{},[188],{"type":18,"tag":30,"props":189,"children":191},{"alt":7,"src":190},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/05/14/8cf33452c5dc47b99f8a1f21d653f00c.png",[],{"type":18,"tag":26,"props":193,"children":194},{},[195],{"type":24,"value":196},"MindSpore版AdaptiveAvgPool2d",{"type":18,"tag":26,"props":198,"children":199},{},[200],{"type":24,"value":201},"观察上述代码，如果要重写成MindSpore版的代码，我们只需要替换掉‘torch.arange’、‘torch.mean’、‘torch.cat’这三个主要算子，以及添加一个取整操作，在MindSpore中就是ops.ReduceMean(keep_dims=True)、P.Concat(axis=-1)等算子，只要做对应替换就可以了。",{"type":18,"tag":26,"props":203,"children":204},{},[205],{"type":24,"value":206},"但我之前在重写res = torch.mean(inputs[:, :, s_p2[i_H]:e_p2[i_H],s_p1[i_W]:e_p1[i_W]], dim=(-2,-1), keepdim=True)这一句时，发现MindSpore对‘变量下标’做切片操作时会发生异常，不太清楚是不是我的用法有问题。",{"type":18,"tag":26,"props":208,"children":209},{},[210],{"type":24,"value":211},"不过我写了一个临时的版本，比如如果要将NCx32x64的数据池化成NCx6x6大小,我们可以提前计算出需要切片的下标，就可以得到这样一版可用的代码了：",{"type":18,"tag":86,"props":213,"children":215},{"code":214},"def _AvgPool2d6x6(self,x):\n        s_p1 = [ 0, 10, 21, 32, 42, 53]\n        e_p1 = [11, 22, 32, 43, 54, 64]\n        s_p2 = [ 0,  5, 10, 16, 21, 26]\n        e_p2 = [ 6, 11, 16, 22, 27, 32]       \n        pooled2 = []\n        for i_H in range(6):\n          pooled = []\n          for i_W in range(6):\n              res = self.reduceMean(x[:, :, s_p2[i_H]:e_p2[i_H],s_p1[i_W]:e_p1[i_W]], (-2,-1))\n              pooled.append(res)\n          pooled = self.concat1((pooled[0],pooled[1],pooled[2],pooled[3],pooled[4],pooled[5]))\n          pooled2.append(pooled)\n        pooled2 = self.concat2((pooled2[0],pooled2[1],pooled2[2],pooled2[3],pooled2[4],pooled2[5]))       \n        return pooled2\n",[216],{"type":18,"tag":91,"props":217,"children":218},{"__ignoreMap":7},[219],{"type":24,"value":214},{"type":18,"tag":26,"props":221,"children":222},{},[223],{"type":24,"value":224},"MindSpore官方资料",{"type":18,"tag":26,"props":226,"children":227},{},[228,230],{"type":24,"value":229},"GitHub :",{"type":18,"tag":45,"props":231,"children":234},{"href":232,"rel":233},"https://github.com/mindspore-ai/mindspore",[49],[235],{"type":24,"value":232},{"type":18,"tag":26,"props":237,"children":238},{},[239],{"type":24,"value":240},"Gitee:[https : //gitee.com/mindspore/mindspore](https : //gitee.com/mindspore/mindspore)",{"type":18,"tag":26,"props":242,"children":243},{},[244],{"type":24,"value":245},"官方QQ群 : 871543426",{"type":18,"tag":26,"props":247,"children":248},{},[249],{"type":24,"value":250},"长按下方二维码加入MindSpore项目↓",{"type":18,"tag":26,"props":252,"children":253},{},[254],{"type":18,"tag":30,"props":255,"children":257},{"alt":7,"src":256},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/05/14/563febe311004d9d9e0ec6f27a9a81b7.jpg",[],{"title":7,"searchDepth":259,"depth":259,"links":260},4,[],"markdown","content:technology-blogs:zh:552.md","content","technology-blogs/zh/552.md","technology-blogs/zh/552","md",1776506137696]