[{"data":1,"prerenderedAt":399},["ShallowReactive",2],{"content-query-Xu2oQvFZiN":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":393,"_id":394,"_source":395,"_file":396,"_stem":397,"_extension":398},"/technology-blogs/zh/2983","zh",false,"","MindSpore AI科学计算系列 | MeshGPT显著提升三维几何表示的质量","作者：于璠 来源：知乎","2024-02-18","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/02/23/767cc89223c642578664429c4ab3d327.png","technology-blogs",{"type":14,"children":15,"toc":388},"root",[16,24,43,51,65,70,80,85,99,104,109,114,119,124,129,137,142,150,155,160,167,172,177,182,189,194,202,207,212,217,225,230,240,245,252,257,267,272,277,284,289,299,304,309,318,329,334,339,344,355,366,377],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"mindspore-ai科学计算系列-meshgpt显著提升三维几何表示的质量",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28,30,36,38],{"type":23,"value":29},"**作者：**",{"type":17,"tag":31,"props":32,"children":33},"strong",{},[34],{"type":23,"value":35},"于璠",{"type":23,"value":37}," ",{"type":17,"tag":31,"props":39,"children":40},{},[41],{"type":23,"value":42},"来源：知乎",{"type":17,"tag":25,"props":44,"children":45},{},[46],{"type":17,"tag":31,"props":47,"children":48},{},[49],{"type":23,"value":50},"背景",{"type":17,"tag":25,"props":52,"children":53},{},[54,56],{"type":23,"value":55},"在之前的文章中，我们介绍了三维形状表征（",{"type":17,"tag":57,"props":58,"children":62},"a",{"href":59,"rel":60},"https://zhuanlan.zhihu.com/p/670504839)%E3%80%82%E4%B8%8E%E8%AF%B8%E5%A6%82%E7%82%B9%E4%BA%91%E6%88%96%E4%BD%93%E7%B4%A0%E7%9A%843D%E5%BD%A2%E7%8A%B6%E8%A1%A8%E7%A4%BA%E7%9B%B8%E6%AF%94%EF%BC%8C%E7%BD%91%E6%A0%BC%E6%8F%90%E4%BE%9B%E4%BA%86%E6%9B%B4%E8%BF%9E%E8%B4%AF%E7%9A%84%E8%A1%A8%E9%9D%A2%E8%A1%A8%E7%A4%BA%EF%BC%8C%E5%AE%83%E4%BB%AC%E6%9B%B4%E5%8F%AF%E6%8E%A7%EF%BC%8C%E6%9B%B4%E6%98%93%E4%BA%8E%E6%93%8D%E4%BD%9C%EF%BC%8C%E6%9B%B4%E7%B4%A7%E5%87%91%EF%BC%8C%E5%90%8C%E6%97%B6%E4%B9%9F%E6%9B%B4%E6%8E%A5%E8%BF%91%E7%BD%91%E6%A0%BC%E7%94%9F%E6%88%90%E5%90%8E%E7%9A%84%E4%B8%8B%E4%B8%80%E6%AD%A5%E5%B7%A5%E4%BD%9C%E5%86%85%E5%AE%B9%E3%80%82",[61],"nofollow",[63],{"type":23,"value":64},"https://zhuanlan.zhihu.com/p/670504839)。与诸如点云或体素的3D形状表示相比，网格提供了更连贯的表面表示，它们更可控，更易于操作，更紧凑，同时也更接近网格生成后的下一步工作内容。",{"type":17,"tag":25,"props":66,"children":67},{},[68],{"type":23,"value":69},"然而目前AI生成网格主要通过iso-surfacing方法，生成的三角形网格往往十分密集，否则会损失几何细节或者造成几何特征失真。近期，慕尼黑工业大学牵头发表了通过Decoder-only Transformer生成三角形网格的MeshGPT[1]，在生成高质量紧凑网格方面取得了进展。",{"type":17,"tag":25,"props":71,"children":72},{},[73,75],{"type":23,"value":74},"**1、**",{"type":17,"tag":31,"props":76,"children":77},{},[78],{"type":23,"value":79},"方法",{"type":17,"tag":25,"props":81,"children":82},{},[83],{"type":23,"value":84},"MeshGPT主要由两个网络组成。首先通过Encoder-Decoder架构，学习到经过残差量化的三角形embeddings。而后通过Decoder-only GPT，生成紧凑三角形网格。",{"type":17,"tag":25,"props":86,"children":87},{},[88,93,94],{"type":17,"tag":31,"props":89,"children":90},{},[91],{"type":23,"value":92},"1.1",{"type":23,"value":37},{"type":17,"tag":31,"props":95,"children":96},{},[97],{"type":23,"value":98},"学习量化三角形embeddings",{"type":17,"tag":25,"props":100,"children":101},{},[102],{"type":23,"value":103},"如图1所示，学习量化三角形embeddings主要分成四部分：",{"type":17,"tag":25,"props":105,"children":106},{},[107],{"type":23,"value":108},"1）通过图卷积encoder，将输入的网格转换成rich features；",{"type":17,"tag":25,"props":110,"children":111},{},[112],{"type":23,"value":113},"2）经过残差量化，将features量化为码表embeddings，这样可以有效减小序列长度，减轻transformer压力；",{"type":17,"tag":25,"props":115,"children":116},{},[117],{"type":23,"value":118},"3）对embeddings进行排序；",{"type":17,"tag":25,"props":120,"children":121},{},[122],{"type":23,"value":123},"4）通过一维ResNet，输出最简单的9通道网格embeddings。网络的输入和输出均为网格信息。",{"type":17,"tag":25,"props":125,"children":126},{},[127],{"type":23,"value":128},"下面我们对其中三部分进行具体介绍(对embeddings的排序案例中没有详细说明)。",{"type":17,"tag":25,"props":130,"children":131},{},[132],{"type":17,"tag":133,"props":134,"children":136},"img",{"alt":7,"src":135},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/02/23/e26aeb20f4de4684a30e2a76037fc674.png",[],{"type":17,"tag":25,"props":138,"children":139},{},[140],{"type":23,"value":141},"图1. 学习量化三角形embeddings的过程",{"type":17,"tag":25,"props":143,"children":144},{},[145],{"type":17,"tag":31,"props":146,"children":147},{},[148],{"type":23,"value":149},"(1) GraphSAGE作为encoder",{"type":17,"tag":25,"props":151,"children":152},{},[153],{"type":23,"value":154},"本案例采用的encoder参考了PolyGen。但直接套用PolyGen会对接下来的transformer训练形成挑战，PolyGen采用离散坐标点作为token不能捕捉到几何特征，缺乏与临近网格的联系。因此本案例采用了图神经网络GraphSAGE[2]作为encoder。GraphSAGE的核心两点可以总结为：邻居采样和特征聚合。",{"type":17,"tag":25,"props":156,"children":157},{},[158],{"type":23,"value":159},"具体地，GraphSAGE的SAGE Conv Layer将节点自身的属性特征与采样的邻居节点特征分别做一次线性变换（也就是乘一个W参数矩阵，一般还会加个relu激活增强表示），然后将两者连接起来，再进行一次线性变换得到目标节点的特征表示，如图2所示。GraphSAGE的实践发现，采样阶数k不必取很大的值，当k=2时，效果就非常好了，也就是只用扩展到2阶邻居即可。至于邻居的个数，文中提到S1×S2\u003C=500，即两次扩展的邻居数之际小于500，大约每次只需要扩展20个邻居即可。",{"type":17,"tag":25,"props":161,"children":162},{},[163],{"type":17,"tag":133,"props":164,"children":166},{"alt":7,"src":165},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/02/23/43b13a58135d4ac5a55ddacc63bb7d78.png",[],{"type":17,"tag":25,"props":168,"children":169},{},[170],{"type":23,"value":171},"图2. GraphSAGE示意图",{"type":17,"tag":25,"props":173,"children":174},{},[175],{"type":23,"value":176},"使用GraphSAGE的好处在于，通过邻居采样的方式可以解决GCN内存爆炸的问题，适用于大规模图；将直推式学习（transductive）转化为归纳式（inductive）学习，避免节点的特征每次都需要重训的情况，支持增量特征；引入邻居采样，会将直推式节点只表示一种局部结构转变为对应多种局部结构的节点归纳表示，可有效防止训练过拟合，增强泛化能力。",{"type":17,"tag":25,"props":178,"children":179},{},[180],{"type":23,"value":181},"在本案例中，可以将每个网格面视作一个节点，与其相邻的面与它本身通过无方向线段连接，如图3所示。每个节点包括信息有：9个坐标值(三个顶点，每个顶点三维坐标)、面的法向方向、面的三个角的角度和面的面积。而后，经过多层前文提到的SAGE Conv Layer，将输入的网格转换成rich features。",{"type":17,"tag":25,"props":183,"children":184},{},[185],{"type":17,"tag":133,"props":186,"children":188},{"alt":7,"src":187},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/02/23/8bfc81bdb8894f18b9dd4ac0a8014418.png",[],{"type":17,"tag":25,"props":190,"children":191},{},[192],{"type":23,"value":193},"图3. 本案例中SAGE Conv Layer的应用",{"type":17,"tag":25,"props":195,"children":196},{},[197],{"type":17,"tag":31,"props":198,"children":199},{},[200],{"type":23,"value":201},"(2) 残差量化",{"type":17,"tag":25,"props":203,"children":204},{},[205],{"type":23,"value":206},"如果直接使用上一节生成的rich features作为embeddings，即每个网格面使用单个code，会造成重构精度不足的现象。因此，本案例参考了残差向量量化(Residual vector Quantization，RQ)[3]的方法，每个面使用D个codes(D为RQ的深度)来进行改善。",{"type":17,"tag":25,"props":208,"children":209},{},[210],{"type":23,"value":211},"残差量化应用了残差的思想，将原先的一层码本映射变成很多层映射，可以理解是用一个共享的码本去将原先的下采样的一个数值矩阵变成一个深度为D的向量矩阵，那么每个矩阵（i，j）位置对应的内容就成为了一个D维的向量，其中每个维度代表了一个残差层的结果。共享码本(也就是说D个层共用一个码本)的好处是：首先，使用单独的码本需要进行广泛的超参数搜索以确定每个深度的码本大小，但共享码本只需要确定总码本大小；其次，共享码本使每个量化深度的所有代码嵌入都可用，因此，代码可以在每个深度使用，以最大化其效用。",{"type":17,"tag":25,"props":213,"children":214},{},[215],{"type":23,"value":216},"在本案例中，将特征通道分解到每个面的三个网格节点上，而后通过共享的网格节点聚合特征，每个节点保留D/3个codes，最终得到每个面的D个codes。这样在保证重建质量的前提下，能够降低其计算成本，提高生成速度。",{"type":17,"tag":25,"props":218,"children":219},{},[220],{"type":17,"tag":31,"props":221,"children":222},{},[223],{"type":23,"value":224},"(3) 一维ResNet作为Decoder",{"type":17,"tag":25,"props":226,"children":227},{},[228],{"type":23,"value":229},"Decoder的应用较为简单。解码过程使用的是一维ResNet34网络。特别地，本案例发现相比于在连续实数域预测坐标，离散地预测往往效果更好(例如，准备一组离散的位置，给出在每个位置上的概率)。本案例也采用了这种方式。",{"type":17,"tag":25,"props":231,"children":232},{},[233,235],{"type":23,"value":234},"**1.**",{"type":17,"tag":31,"props":236,"children":237},{},[238],{"type":23,"value":239},"2 Decoder-only GPT",{"type":17,"tag":25,"props":241,"children":242},{},[243],{"type":23,"value":244},"本案例采用了GPT2-medium model，如图4所示。其输入为经过RQ量化的SAGE输出的embeddings。通过学到的离散位置encoding用来指明网格面的顺序。另外，注意到输出的网格节点是重复的(一点连接多面)，额外使用了MeshLab来封闭节点。",{"type":17,"tag":25,"props":246,"children":247},{},[248],{"type":17,"tag":133,"props":249,"children":251},{"alt":7,"src":250},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/02/23/b41c0605d3d94fd5b9840ff47bd170cb.png",[],{"type":17,"tag":25,"props":253,"children":254},{},[255],{"type":23,"value":256},"图4. 本文使用的Decoder-only GPT",{"type":17,"tag":25,"props":258,"children":259},{},[260,262],{"type":23,"value":261},"**2、**",{"type":17,"tag":31,"props":263,"children":264},{},[265],{"type":23,"value":266},"实验与结果",{"type":17,"tag":25,"props":268,"children":269},{},[270],{"type":23,"value":271},"本案例的实验中，Encoder-Decoder采用RQ深度为2，对应每个面的D为6，其中每个d有192个维度。Code book通过指数加权平均EWMA进行feature更新。采用的数据集为ShapeNetV2 Dataset，2张卡训练耗时约2天。GPT-2的context window选取为4608，需要4张卡训练约5天。",{"type":17,"tag":25,"props":273,"children":274},{},[275],{"type":23,"value":276},"如图5，相比对比方案，本案例明显能够通过紧凑轻量的网格对三维几何进行更高精度的表示。",{"type":17,"tag":25,"props":278,"children":279},{},[280],{"type":17,"tag":133,"props":281,"children":283},{"alt":7,"src":282},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/02/23/357cc4173ea7434090a1ce0b6ab4fdf7.png",[],{"type":17,"tag":25,"props":285,"children":286},{},[287],{"type":23,"value":288},"图5. 本案例生成的三维网格表示与其他方法的对比",{"type":17,"tag":25,"props":290,"children":291},{},[292,294],{"type":23,"value":293},"**3、**",{"type":17,"tag":31,"props":295,"children":296},{},[297],{"type":23,"value":298},"感想",{"type":17,"tag":25,"props":300,"children":301},{},[302],{"type":23,"value":303},"本案例整体架构思路清晰明确，在三维形状的网格表示上对准确性和细节处理成本改善明显。值得参考的地方在于：使用GraphSAGE对网格进行编码并进行RQ残差量化的整体思路和一些细节的处理，例如相比于在连续实数域预测坐标，离散地预测往往效果更好。",{"type":17,"tag":25,"props":305,"children":306},{},[307],{"type":23,"value":308},"但同时也应该注意到，本案例的效果是基于在数据集中选取四类几何种类进行强化训练后得到的。因此，在几何种类增多的情况下网络的泛化性有待进一步考察。同时，科学计算场景下，需要考虑三维形状表征的网格与科学计算中的几何离散网格如何建立起更直接高效的匹配。",{"type":17,"tag":310,"props":311,"children":313},"h2",{"id":312},"参考文献",[314],{"type":17,"tag":31,"props":315,"children":316},{},[317],{"type":23,"value":312},{"type":17,"tag":25,"props":319,"children":320},{},[321,323],{"type":23,"value":322},"[1] Siddiqui Y, Alliegro A, Artemov A, et al. MeshGPT: Generating Triangle Meshes with Decoder-Only Transformers[J]. arXiv preprint arXiv:2311.15475, 2023. ",{"type":17,"tag":57,"props":324,"children":327},{"href":325,"rel":326},"https://arxiv.org/abs/2311.15475",[61],[328],{"type":23,"value":325},{"type":17,"tag":25,"props":330,"children":331},{},[332],{"type":23,"value":333},"[2] Hamilton W, Ying Z, Leskovec J. Inductive representation learning on large graphs[J]. Advances in neural information processing systems, 2017, 30.",{"type":17,"tag":25,"props":335,"children":336},{},[337],{"type":23,"value":338},"[3] Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11523–11532, 2022.",{"type":17,"tag":25,"props":340,"children":341},{},[342],{"type":23,"value":343},"往期回顾",{"type":17,"tag":18,"props":345,"children":347},{"id":346},"mindspore-ai科学计算系列-metnet3融合稀疏站点数据实现提前24h天气预报",[348],{"type":17,"tag":57,"props":349,"children":352},{"href":350,"rel":351},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247614540&idx=1&sn=08b091b4e629243defcdf97270234d88&chksm=c11e1003f6699915db3166681b1cb30c321964d71c0b1b362ba62d7ac3f1f05f2586b396be25&scene=21#wechat_redirect",[61],[353],{"type":23,"value":354},"MindSpore AI科学计算系列 | MetNet3融合稀疏站点数据，实现提前24h天气预报",{"type":17,"tag":18,"props":356,"children":358},{"id":357},"mindspore-ai科学计算系列-化学深度学习模型chemgpt的性能评估公式拟合",[359],{"type":17,"tag":57,"props":360,"children":363},{"href":361,"rel":362},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247614481&idx=1&sn=e45cb91a0dc5f51db7e80761d639a474&chksm=c11e105ef66999488545e230a32ef8c771768c8b488024d5b039bba9ed12974339f795cbe42e&scene=21#wechat_redirect",[61],[364],{"type":23,"value":365},"MindSpore AI科学计算系列 | 化学深度学习模型ChemGPT的性能评估公式拟合",{"type":17,"tag":18,"props":367,"children":369},{"id":368},"mindspore-ai科学计算系列-gnn-mom基于昇思mindspore-elec的图残差学习电磁求解器",[370],{"type":17,"tag":57,"props":371,"children":374},{"href":372,"rel":373},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247614352&idx=1&sn=6eddd5488123a7bfbb85681317d6b950&chksm=c11e2fdff669a6c95c412f2e5711d34310f56cd9d468bd7a5c0edecc6cfd6cd55ecefee097a0&scene=21#wechat_redirect",[61],[375],{"type":23,"value":376},"MindSpore AI科学计算系列 | GNN-MoM基于昇思MindSpore Elec的图残差学习电磁求解器",{"type":17,"tag":18,"props":378,"children":380},{"id":379},"mindspore-ai科学计算系列-vae基于mindspore-elec的特征域mt反演提升大地电磁反演的精度和分辨率",[381],{"type":17,"tag":57,"props":382,"children":385},{"href":383,"rel":384},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247613627&idx=1&sn=ad3dd2c6f59f0626a93afb93c736123e&chksm=c11e2cf4f669a5e22f5c7f33c3088e3efa800f13cb1cfac88f583df387787f3087f941167dd3&scene=21#wechat_redirect",[61],[386],{"type":23,"value":387},"MindSpore AI科学计算系列 | VAE基于MindSpore Elec的特征域MT反演，提升大地电磁反演的精度和分辨率",{"title":7,"searchDepth":389,"depth":389,"links":390},4,[391],{"id":312,"depth":392,"text":312},2,"markdown","content:technology-blogs:zh:2983.md","content","technology-blogs/zh/2983.md","technology-blogs/zh/2983","md",1776506125059]