[{"data":1,"prerenderedAt":499},["ShallowReactive",2],{"content-query-yYblfGw44S":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":493,"_id":494,"_source":495,"_file":496,"_stem":497,"_extension":498},"/technology-blogs/zh/711","zh",false,"","PatchmatchNet：对learning-based稠密重建算法新的思考","PatchmatchNet为固化的learning-based类MVS方法注入了新鲜血液","2021-09-08","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/09/09/94ba204099744db294c2d7943da7f81f.png","technology-blogs","大V博文",{"type":15,"children":16,"toc":490},"root",[17,25,31,44,55,67,75,83,96,107,115,123,131,139,147,158,166,174,186,194,205,213,221,232,240,248,256,267,275,286,294,305,316,324,332,343,351,359,370,381,389,400,408,419,427,431,442,450,458,466,474,482],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"patchmatchnet对learning-based稠密重建算法新的思考",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"作者：于璠",{"type":18,"tag":26,"props":32,"children":33},{},[34,36],{"type":24,"value":35},"作者主页：",{"type":18,"tag":37,"props":38,"children":42},"a",{"href":39,"rel":40},"https://www.zhihu.com/people/yu-fan-42-9",[41],"nofollow",[43],{"type":24,"value":39},{"type":18,"tag":26,"props":45,"children":46},{},[47,49],{"type":24,"value":48},"文章来源：",{"type":18,"tag":37,"props":50,"children":53},{"href":51,"rel":52},"https://zhuanlan.zhihu.com/p/406484746",[41],[54],{"type":24,"value":51},{"type":18,"tag":26,"props":56,"children":57},{},[58],{"type":18,"tag":59,"props":60,"children":61},"strong",{},[62],{"type":18,"tag":59,"props":63,"children":64},{},[65],{"type":24,"value":66},"前言",{"type":18,"tag":26,"props":68,"children":69},{},[70],{"type":18,"tag":59,"props":71,"children":72},{},[73],{"type":24,"value":74},"自MVSNet[1]提出以来，稠密重建(Multi-view Stereo, MVS)领域的learning-based方法基于正平扫(front-to-parallel)+可微单应性形变(differentiable homography)构建多视图对的cost volume，之后利用3D CNN进行正则的技术路线似乎已经根深蒂固。后续的R-MVSNet[2]对于cost volume在深度维度的正则方式进行创新，采用Conv-GRU逐层处理大幅减少了显存占用；CasMVSNet[3]首次提出coarse-to-fine的结构范式优化了显存占用和计算效率；Vis-MVSNet[4]和CVP-MVSNet[5]分别从多视图对cost volume的聚合方式以及coars-to-fine后续阶段的深度假设范围进行了深入思考。虽然在这个过程中，近景MVS数据集DTU和中近景MVS数据集Tanks & Temples也不断被新的learning-based方法霸榜，但是没有一篇文章能够跳出MVSNet构建的算法框架去思考learning-based方法未来可能的发展方向。",{"type":18,"tag":26,"props":76,"children":77},{},[78],{"type":18,"tag":59,"props":79,"children":80},{},[81],{"type":24,"value":82},"随着CVPR2021会议的召开，multi-view stereo领域的一篇oral文章《PatchmatchNet: Learned Multi-View Patchmatch Stereo》给出了一种新的可能性——基于patch match的思路去做learning-based稠密重建算法。",{"type":18,"tag":26,"props":84,"children":85},{},[86],{"type":18,"tag":59,"props":87,"children":88},{},[89],{"type":18,"tag":37,"props":90,"children":93},{"href":91,"rel":92},"https://link.zhihu.com/?target=https://arxiv.org/abs/2012.01411",[41],[94],{"type":24,"value":95},"PatchmatchNet: Learned Multi-View Patchmatch Stereoarxiv.org/abs/2012.01411",{"type":18,"tag":26,"props":97,"children":98},{},[99],{"type":18,"tag":59,"props":100,"children":101},{},[102],{"type":18,"tag":59,"props":103,"children":104},{},[105],{"type":24,"value":106},"Learning-based Patchmatch",{"type":18,"tag":26,"props":108,"children":109},{},[110],{"type":18,"tag":59,"props":111,"children":112},{},[113],{"type":24,"value":114},"文章提出的可学习Patchmatch方法接近了传统Patchmatch方法的思想，主要分为以下三个步骤：",{"type":18,"tag":26,"props":116,"children":117},{},[118],{"type":18,"tag":59,"props":119,"children":120},{},[121],{"type":24,"value":122},"1. 初始化：生成随机深度假设",{"type":18,"tag":26,"props":124,"children":125},{},[126],{"type":18,"tag":59,"props":127,"children":128},{},[129],{"type":24,"value":130},"2. 传播：像素之间互相传播深度假设",{"type":18,"tag":26,"props":132,"children":133},{},[134],{"type":18,"tag":59,"props":135,"children":136},{},[137],{"type":24,"value":138},"3. 评估：计算匹配cost并生成深度预测结果",{"type":18,"tag":26,"props":140,"children":141},{},[142],{"type":18,"tag":59,"props":143,"children":144},{},[145],{"type":24,"value":146},"其中深度初始化阶段并没有太多创新，依旧是在全局深度范围或者基于上个阶段或者上个迭代的的深度预测结果构建深度假设，这里就不做过多介绍。文章的主要亮点在于传播和评估两个步骤的设计，下面逐一进行介绍。",{"type":18,"tag":26,"props":148,"children":149},{},[150],{"type":18,"tag":59,"props":151,"children":152},{},[153],{"type":18,"tag":59,"props":154,"children":155},{},[156],{"type":24,"value":157},"Adaptive Propagation",{"type":18,"tag":26,"props":159,"children":160},{},[161],{"type":18,"tag":59,"props":162,"children":163},{},[164],{"type":24,"value":165},"传播(propagation)可以认为是patchmatch方法中的核心步骤，其目的是为了获取待预测像素周围的深度信息，利用局部视野构建下个迭代步骤的深度选择范围。理论上只要位于同一表面的像素中有一个像素其深度初始化或者预测得接近真实值，那么整个表面的像素都可以通过传播效应，迭代优化最终获得正确的深度预测。",{"type":18,"tag":26,"props":167,"children":168},{},[169],{"type":18,"tag":59,"props":170,"children":171},{},[172],{"type":24,"value":173},"作者实现传播的思想也非常巧妙，借鉴了经典的可形变卷积(deformable convolution)[6]思路，通过将主视图的特征输入到一个2D CNN去预测每个像素的空间相关邻居的偏移量(2D offsets)",{"type":18,"tag":26,"props":175,"children":176},{},[177],{"type":18,"tag":59,"props":178,"children":179},{},[180],{"type":18,"tag":181,"props":182,"children":185},"img",{"alt":183,"src":184},"1.png","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161748rlkinlgq7fcfg4b5.png",[],{"type":18,"tag":26,"props":187,"children":188},{},[189],{"type":18,"tag":59,"props":190,"children":191},{},[192],{"type":24,"value":193},"这里 为上个迭代步骤的深度预测结果， 代表传播窗口的大小，限定了每个像素在下个迭代的深度假设延展范围。",{"type":18,"tag":26,"props":195,"children":196},{},[197],{"type":18,"tag":59,"props":198,"children":199},{},[200],{"type":18,"tag":181,"props":201,"children":204},{"alt":202,"src":203},"2.jpg","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161800f2qapsp4vxvuy06y.jpg",[],{"type":18,"tag":26,"props":206,"children":207},{},[208],{"type":18,"tag":59,"props":209,"children":210},{},[211],{"type":24,"value":212},"图1 Adative Propagation",{"type":18,"tag":26,"props":214,"children":215},{},[216],{"type":18,"tag":59,"props":217,"children":218},{},[219],{"type":24,"value":220},"作者可视化了训练完成之后的网络所预测的offset，图1-b为固定传播的深度假设传播范围，图1-c是施加了offset之后的深度假设传播范围，可以发现offset倾向于分布在位于同一平面的像素上。",{"type":18,"tag":26,"props":222,"children":223},{},[224],{"type":18,"tag":59,"props":225,"children":226},{},[227],{"type":18,"tag":59,"props":228,"children":229},{},[230],{"type":24,"value":231},"Adaptive Evaluation",{"type":18,"tag":26,"props":233,"children":234},{},[235],{"type":18,"tag":59,"props":236,"children":237},{},[238],{"type":24,"value":239},"作者定义的评估阶段流程相对复杂，包含了可微单应性形变、cost volume计算、自适应空间cost聚合(adaptive spatial cost aggregation)以及深度回归。这里其余的步骤并没有太多创新，因此只重点介绍下核心步骤自适应空间cost聚合。",{"type":18,"tag":26,"props":241,"children":242},{},[243],{"type":18,"tag":59,"props":244,"children":245},{},[246],{"type":24,"value":247},"Adaptive Spatial Cost Aggregation",{"type":18,"tag":26,"props":249,"children":250},{},[251],{"type":18,"tag":59,"props":252,"children":253},{},[254],{"type":24,"value":255},"传统MVS算法通常在一个空间窗口上对cost信息进行聚合，这样做可以提升匹配的鲁棒性同时起到隐式平滑的作用。这个步骤在之前的learning-based方法中主要是通过3D CNN对cost volume进行正则来完成的。而在本文中，作者抛弃了繁重的3D CNN，手动对cost volume中的每个体素进行邻居信息聚合。作者采取的方式同Adaptive Propagation步骤类似，同样用了一个2D CNN来预测主视图特征每个像素位点上的offset。得到offset之后，对于cost volume在深度维度的每个cost map进行spatial cost aggregation，具体公式如下：",{"type":18,"tag":26,"props":257,"children":258},{},[259],{"type":18,"tag":59,"props":260,"children":261},{},[262],{"type":18,"tag":181,"props":263,"children":266},{"alt":264,"src":265},"3.png","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161821wln9igww46rv1bpt.png",[],{"type":18,"tag":26,"props":268,"children":269},{},[270],{"type":18,"tag":59,"props":271,"children":272},{},[273],{"type":24,"value":274},"采用上述空间cost信息聚合的方式，一定程度上等价于对cost volume用3D CNN进行正则，训练完成之后offset的分布如下：",{"type":18,"tag":26,"props":276,"children":277},{},[278],{"type":18,"tag":59,"props":279,"children":280},{},[281],{"type":18,"tag":181,"props":282,"children":285},{"alt":283,"src":284},"4.jpg","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161832hl84ncwbcvi1678d.jpg",[],{"type":18,"tag":26,"props":287,"children":288},{},[289],{"type":18,"tag":59,"props":290,"children":291},{},[292],{"type":24,"value":293},"图2 Adaptive Spatial Cost Aggregation",{"type":18,"tag":26,"props":295,"children":296},{},[297],{"type":18,"tag":59,"props":298,"children":299},{},[300],{"type":18,"tag":59,"props":301,"children":302},{},[303],{"type":24,"value":304},"Pipeline",{"type":18,"tag":26,"props":306,"children":307},{},[308],{"type":18,"tag":59,"props":309,"children":310},{},[311],{"type":18,"tag":181,"props":312,"children":315},{"alt":313,"src":314},"5.jpg","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161852ffbf6qkuo7cd6qw8.jpg",[],{"type":18,"tag":26,"props":317,"children":318},{},[319],{"type":18,"tag":59,"props":320,"children":321},{},[322],{"type":24,"value":323},"图3 Patchmatch流程",{"type":18,"tag":26,"props":325,"children":326},{},[327],{"type":18,"tag":59,"props":328,"children":329},{},[330],{"type":24,"value":331},"整个learning-based patchmatch的步骤如图3所示，这里展示的是一次patchmatch迭代的流程。单次迭代预测的深度图并不理想，需要通过迭代优化的方式将深度图进行refine。完整的pipeline如图4所示：",{"type":18,"tag":26,"props":333,"children":334},{},[335],{"type":18,"tag":59,"props":336,"children":337},{},[338],{"type":18,"tag":181,"props":339,"children":342},{"alt":340,"src":341},"6.jpg","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161908pwenftzmenamopzh.jpg",[],{"type":18,"tag":26,"props":344,"children":345},{},[346],{"type":18,"tag":59,"props":347,"children":348},{},[349],{"type":24,"value":350},"图4 PatchmatchNet算法流水线",{"type":18,"tag":26,"props":352,"children":353},{},[354],{"type":18,"tag":59,"props":355,"children":356},{},[357],{"type":24,"value":358},"整个网络的结构基于coarse-to-fine的范式，输入的主视图+辅助视图组首先经过一个多尺度的特征提取器生成三个尺度的特征。之后在每个尺度的特征图上分别进行patchmatch迭代优化生成当前尺度的深度图，上采样之后作为下个尺度patchmatch的初始深度图。分辨率最高的尺度上生成深度预测结果之后，还会上采样并经过refine步骤得到最终的预测结果。",{"type":18,"tag":26,"props":360,"children":361},{},[362],{"type":18,"tag":59,"props":363,"children":364},{},[365],{"type":18,"tag":59,"props":366,"children":367},{},[368],{"type":24,"value":369},"实验结果",{"type":18,"tag":26,"props":371,"children":372},{},[373],{"type":18,"tag":59,"props":374,"children":375},{},[376],{"type":18,"tag":181,"props":377,"children":380},{"alt":378,"src":379},"7.jpg","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161925njirwoqjjsoz8gla.jpg",[],{"type":18,"tag":26,"props":382,"children":383},{},[384],{"type":18,"tag":59,"props":385,"children":386},{},[387],{"type":24,"value":388},"从DTU测试集的结果上看，PatchmatchNet虽然网络结构和计算步骤简单，依旧能够达到SOTA的精度水平。同时，由于网络结构轻量，计算步骤简单，PatchmatchNet的速度和显存占用表现在现有SOTA方法中是最优的。",{"type":18,"tag":26,"props":390,"children":391},{},[392],{"type":18,"tag":59,"props":393,"children":394},{},[395],{"type":18,"tag":181,"props":396,"children":399},{"alt":397,"src":398},"8.jpg","https://bbs-img.huaweicloud.com/data/forums/attachment/forum/202109/08/161940kmdgpdswmolgqdrj.jpg",[],{"type":18,"tag":26,"props":401,"children":402},{},[403],{"type":18,"tag":59,"props":404,"children":405},{},[406],{"type":24,"value":407},"图5 PatchmatchNet同SOTA方法的显存占用与速度对比",{"type":18,"tag":26,"props":409,"children":410},{},[411],{"type":18,"tag":59,"props":412,"children":413},{},[414],{"type":18,"tag":59,"props":415,"children":416},{},[417],{"type":24,"value":418},"思考与总结",{"type":18,"tag":26,"props":420,"children":421},{},[422],{"type":18,"tag":59,"props":423,"children":424},{},[425],{"type":24,"value":426},"PatchmatchNet的出现为已经固化的learning-based类MVS方法注入了新鲜血液，虽然没有彻底颠覆之前的算法流水线，但其通过引入patchmatch方法的思想，同时以极简的网络结构，较快的推理速度和非常少的显存占用达到了同SOTA方法不相上下的精度水准。",{"type":18,"tag":428,"props":429,"children":430},"hr",{},[],{"type":18,"tag":26,"props":432,"children":433},{},[434],{"type":18,"tag":59,"props":435,"children":436},{},[437],{"type":18,"tag":59,"props":438,"children":439},{},[440],{"type":24,"value":441},"Reference",{"type":18,"tag":26,"props":443,"children":444},{},[445],{"type":18,"tag":59,"props":446,"children":447},{},[448],{"type":24,"value":449},"[1] Yao Y, Luo Z, Li S, et al. Mvsnet: Depth inference for unstructured multi-view stereo[C]//Proceedings of the European Conference on Computer Vision (ECCV). 2018: 767-783.",{"type":18,"tag":26,"props":451,"children":452},{},[453],{"type":18,"tag":59,"props":454,"children":455},{},[456],{"type":24,"value":457},"[2] Yao Y, Luo Z, Li S, et al. Recurrent mvsnet for high-resolution multi-view stereo depth inference[C]//Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019: 5525-5534.",{"type":18,"tag":26,"props":459,"children":460},{},[461],{"type":18,"tag":59,"props":462,"children":463},{},[464],{"type":24,"value":465},"[3] LaroGu X, Fan Z, Zhu S, et al. Cascade cost volume for high-resolution multi-view stereo and stereo matching[C]//Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020: 2495-2504.chelle H, Erhan D, Bengio Y. Zero-data learning of new tasks[C]//AAAI. 2008, 1(2): 3.",{"type":18,"tag":26,"props":467,"children":468},{},[469],{"type":18,"tag":59,"props":470,"children":471},{},[472],{"type":24,"value":473},"[4] Chen R, Han S, Xu J. Visibility-aware point-based multi-view stereo network[J]. IEEE transactions on pattern analysis and machine intelligence, 2020.",{"type":18,"tag":26,"props":475,"children":476},{},[477],{"type":18,"tag":59,"props":478,"children":479},{},[480],{"type":24,"value":481},"[5] Yang J, Mao W, Alvarez J M, et al. Cost volume pyramid based depth inference for multi-view stereo[C]//Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020: 4877-4886.",{"type":18,"tag":26,"props":483,"children":484},{},[485],{"type":18,"tag":59,"props":486,"children":487},{},[488],{"type":24,"value":489},"[6] Dai J, Qi H, Xiong Y, et al. Deformable convolutional networks[C]//Proceedings of the IEEE international conference on computer vision. 2017: 764-773.",{"title":7,"searchDepth":491,"depth":491,"links":492},4,[],"markdown","content:technology-blogs:zh:711.md","content","technology-blogs/zh/711.md","technology-blogs/zh/711","md",1776506140112]