[{"data":1,"prerenderedAt":293},["ShallowReactive",2],{"content-query-idsxJjRzKU":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":287,"_id":288,"_source":289,"_file":290,"_stem":291,"_extension":292},"/technology-blogs/zh/772","zh",false,"","关于AI Architecture未来的一些思考","Sparse-Activation/Dynamic-Routing是已经最急迫的需要","2021-11-03","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/04/7345ff7c32d649aa89ad14dfbf2ce28f.png","technology-blogs","大V博文",{"type":15,"children":16,"toc":284},"root",[17,25,31,44,55,69,74,79,84,89,104,109,114,119,128,133,138,143,148,156,161,199,204,209,214,219,224,229,234,239,244,249,254,259,264,269,274,279],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"关于ai-architecture未来的一些思考",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"作者：金雪锋",{"type":18,"tag":26,"props":32,"children":33},{},[34,36],{"type":24,"value":35},"作者主页：",{"type":18,"tag":37,"props":38,"children":42},"a",{"href":39,"rel":40},"https://www.zhihu.com/people/jin-xue-feng",[41],"nofollow",[43],{"type":24,"value":39},{"type":18,"tag":26,"props":45,"children":46},{},[47,49],{"type":24,"value":48},"文章来源：",{"type":18,"tag":37,"props":50,"children":53},{"href":51,"rel":52},"https://zhuanlan.zhihu.com/p/428802599",[41],[54],{"type":24,"value":51},{"type":18,"tag":26,"props":56,"children":57},{},[58,60,67],{"type":24,"value":59},"10月28日，Jeaf Dean发了一篇博客（",{"type":18,"tag":37,"props":61,"children":64},{"href":62,"rel":63},"https://link.zhihu.com/?target=https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/",[41],[65],{"type":24,"value":66},"https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/",{"type":24,"value":68},"），简单介绍了谷歌下一代AI架构Pathways的关键特性。",{"type":18,"tag":26,"props":70,"children":71},{},[72],{"type":24,"value":73},"文章引起了AI圈内很多人士的关注，特别是对AI平台架构有兴趣的对此做了非常丰富的解读。",{"type":18,"tag":26,"props":75,"children":76},{},[77],{"type":24,"value":78},"MindSpore项目组内部也进行了讨论，总结了一些观点供大家思考（这些观点主要来自岳大师）。",{"type":18,"tag":26,"props":80,"children":81},{},[82],{"type":24,"value":83},"在分享我们对AI架构的未来的一些分析判断之前，也先简单的谈谈对Pathways的几个关键点的看法。",{"type":18,"tag":26,"props":85,"children":86},{},[87],{"type":24,"value":88},"Pathways一文，三个要点，其实是两个方面：",{"type":18,"tag":90,"props":91,"children":92},"ul",{},[93,99],{"type":18,"tag":94,"props":95,"children":96},"li",{},[97],{"type":24,"value":98},"一是关于智能模型能力提升的两个点：one model fit 'all' tasks (multi-task)和multiple senses(multi-modeling)；",{"type":18,"tag":94,"props":100,"children":101},{},[102],{"type":24,"value":103},"二是切实影响了AI平台架构的一个点：sparse activation，dynamic-routing。",{"type":18,"tag":26,"props":105,"children":106},{},[107],{"type":24,"value":108},"关于第一方面，多任务，是智能算法一直在发展的方向，多任务模型问题在于任务到底有多么多？泛泛的说多任务，其实需要看这些不同的问题之间，内部是否具有通用的机理，多任务的模型对各个任务都有增益；多模态，也是智能算法一直在发展的方向，到底融合哪些信息，其实是一个按应用分场景的问题。我们认为，在多感官信息融合，类似拟人机器人等场合，为了丰富信息来源，本就是必然的；如何多信息融合，不一定有一个通用的最优模式。",{"type":18,"tag":26,"props":110,"children":111},{},[112],{"type":24,"value":113},"总之，关于第一方面的两个点，如果泛泛的一股脑的多任务多模态，效果未必就好，甚至可能过于理想主义。关键要看是否必要：智能模型的部署，至少要粗略的分两种场景，数据中心的巨无霸和边缘终端等专一小模型，在计算力和能耗上不得不考虑。 如果是面向更强或者更通用的智能的研究，怎么探索都不为过。 只是多任务和多模态，对所谓下一代AI架构来说，个人感觉不是最关键的。",{"type":18,"tag":26,"props":115,"children":116},{},[117],{"type":24,"value":118},"关于第二方面，文章提到的GShard和Switch Transformer等代表的稀疏激活/动态路由等机制，确实会对AI平台的架构产生影响。",{"type":18,"tag":26,"props":120,"children":121},{},[122],{"type":18,"tag":123,"props":124,"children":125},"strong",{},[126],{"type":24,"value":127},"我们认为除了Jeaf Dean提到的几个点外，下面几个方向也会深刻影响AI架构未来，值得关注：",{"type":18,"tag":26,"props":129,"children":130},{},[131],{"type":24,"value":132},"1）当前AI计算模式和理想模式的差距；",{"type":18,"tag":26,"props":134,"children":135},{},[136],{"type":24,"value":137},"AI算法最基本的两部分，表达和优化。前者NeruralNetwork/DeepLearning对平台来说是算子的DAG；后者BackPropagation对平台来说是计算梯度然后逐层调整权重。对于后者，从计算流程上看，形象的说，就是ABCDE层前馈，然后EDCBA反馈，一轮又一轮。每一轮，输出对标签的“全局”错误信号，本质上要的逐层传播到最前面的层。",{"type":18,"tag":26,"props":139,"children":140},{},[141],{"type":24,"value":142},"对比人脑，很难找到这种全局错误信号的一轮次一轮次的同步的传播，做到基于物理邻接的层(节点）的错误信息进行学习，应该才是更理想的表达和学习的机制。其中Local Learning是对该问题的研究，曾经系统的跟踪了这些方向：Predictive Coding，Proxy Objective，Feedback Connection，Weight Constraint，Synthetic Gradient，Target Propagation，NGRAD GlobalErrorVector DistributionalCodeDopamine，Biological Plausibility，Equilibrium Propagation，Extra X-Learning。",{"type":18,"tag":26,"props":144,"children":145},{},[146],{"type":24,"value":147},"按照不同的\"Local\"思路，进一步展开看：",{"type":18,"tag":90,"props":149,"children":150},{},[151],{"type":18,"tag":94,"props":152,"children":153},{},[154],{"type":24,"value":155},"朴素的物理原则/生物机制(关联和竞争) ：Hebbian/ContrastiveHebbian /Grossberg/Oja/LWA/…",{"type":18,"tag":26,"props":157,"children":158},{},[159],{"type":24,"value":160},"Random Feedback Weights",{"type":18,"tag":90,"props":162,"children":163},{},[164,169,174,179,184,189,194],{"type":18,"tag":94,"props":165,"children":166},{},[167],{"type":24,"value":168},"辅助网络学习调节：RL for Local Learning/MetaL for Local Learning/",{"type":18,"tag":94,"props":170,"children":171},{},[172],{"type":24,"value":173},"时变比对：Real Time Recurrent Leaning/Recurrent Backpropagation/Eligibility Propagation(资格传播)/Equilibrium Propagation(均衡传播) (No.3)",{"type":18,"tag":94,"props":175,"children":176},{},[177],{"type":24,"value":178},"生成模型：Target Propagation/Difference Target Propagation/Predictive Coding （No.1）",{"type":18,"tag":94,"props":180,"children":181},{},[182],{"type":24,"value":183},"反馈连接比对：Feedback Connections/Direct Feedback Alignment (No.2)/ Deep Feedback Control",{"type":18,"tag":94,"props":185,"children":186},{},[187],{"type":24,"value":188},"对称权重或符号：Weight Mirror & KP+/ Weight Symmetry/ Sign Symmetry",{"type":18,"tag":94,"props":190,"children":191},{},[192],{"type":24,"value":193},"神经/脉冲启发的扩展图（节点）：SpikeGrad/",{"type":18,"tag":94,"props":195,"children":196},{},[197],{"type":24,"value":198},"其他：NGRAD/Neural Gradient Representation by Activity Differences / Dynamic Stimuli(动态刺激) Trace Learning/ GlobalErrorVector Broadcasting/ Node Perturbation(节点扰动)",{"type":18,"tag":26,"props":200,"children":201},{},[202],{"type":24,"value":203},"我们需要从三个维度权衡出一条好的路线：1) 能否规模化学习深度网络处理复杂大任务，2) 是否具有局部学习之外的其他学习能力（在线，增量，持续），3) 是否具有硬件友好性（哪怕是全新的可行的底层硬件结构）。",{"type":18,"tag":26,"props":205,"children":206},{},[207],{"type":24,"value":208},"可惜到目前为止，这些方向的研究进展，都还没有达到期望的效果。",{"type":18,"tag":26,"props":210,"children":211},{},[212],{"type":24,"value":213},"回到我们讨论的AI下一代架构的话题上来，Local Learning/Non-BP/Gradient-Free的突破，应该是对架构影响最大的，直接对DAG和BP的冲击。",{"type":18,"tag":26,"props":215,"children":216},{},[217],{"type":24,"value":218},"2）类脑等神经科学发展和相关机制/思想被引入智能算法的影响；",{"type":18,"tag":26,"props":220,"children":221},{},[222],{"type":24,"value":223},"在Bio-Brain和Brain-like的神经计算中，我们觉得这几个关键特征，对AI架构影响很大，现在的AI平台在计算流程中并没有很好的支撑：Asynchronized，Full-Parallel，Pipeline Inference，Sparse Activation，Dynamic Routing。",{"type":18,"tag":26,"props":225,"children":226},{},[227],{"type":24,"value":228},"以异步、高并行度来说，每一个神经元都可以看成一个带有独立内存的和独立芯片的完整的计算单元，甚至更小的神经突触都可以看成异步并行的单元。",{"type":18,"tag":26,"props":230,"children":231},{},[232],{"type":24,"value":233},"以Pipeline Inference来说，人脑是不间断的在做Inference，而且Learning/Train和Inference是紧密的结合在一起的，现在即使是Online Learning也是相对明确可区分的Train和Infer。",{"type":18,"tag":26,"props":235,"children":236},{},[237],{"type":24,"value":238},"当然，另外一个大的方向Spike NN如果能够在CV，NLP等主要的问题上效果超越，AI的平台架构也将是大的改变甚至重启。",{"type":18,"tag":26,"props":240,"children":241},{},[242],{"type":24,"value":243},"Pathways提到的关键点中，包含了Sparse Activation，Dynamic Routing；而Multitask-SingleModel，MultiModeling我们任务对AI架构冲击不大。",{"type":18,"tag":26,"props":245,"children":246},{},[247],{"type":24,"value":248},"3）AI算法发展可能引出的架构需求；",{"type":18,"tag":26,"props":250,"children":251},{},[252],{"type":24,"value":253},"在AI算法发展中，如Reservoir Computing，那种Dense的Topology和Recurrent的计算流，如果大型的网络，会对当前的AI平台有一定挑战。 当前AI算法能较好的解决很多类型的问题如CV,NLP的，特别Transformer为主的巨无霸模型大有一招鲜吃遍天的感觉；但还有一些问题求解极其糟糕，比如Math-Problem（参见google，facebook等公开的一些数据集和当前AI解决的进展），在这些问题上，堆规模几乎无用甚至还有反作用，我们分析，可能Modularity Compositional-Generalization，Neural Memory，High-Integrated Complex Reinforcement Learning，Discrete/Non-Differentiable Algorithms等会是潜在的突破方向；这些方向对AI下一代的架构，可能会有冲击，虽然还不太明朗。",{"type":18,"tag":26,"props":255,"children":256},{},[257],{"type":24,"value":258},"4）AI重量级应用方向的计算的特点；",{"type":18,"tag":26,"props":260,"children":261},{},[262],{"type":24,"value":263},"笼统的说，相对通用的问题如CV,NLP的AI的算法发展进入了一个平缓期，我们认为AI+Science，诸如生物、材料、数字艺术等领域，会有更多的人力物力会投入进来。",{"type":18,"tag":26,"props":265,"children":266},{},[267],{"type":24,"value":268},"这些方向，我们已经感受到一些对AI下一代架构的冲击或诉求：",{"type":18,"tag":26,"props":270,"children":271},{},[272],{"type":24,"value":273},"1）在生物制药、物理材料、求解器等科学问题上，有一类共同的问题：Size Generalization或Scale Generalization。如果我们叫传统的泛化为Instance Generalization，即我们训练和推理的输出结构一样，需要的泛化能力是对新样本的正确预测。而Size/Scale泛化，则是在一个小体系上训练，需要在大若干倍的体系上预测。在Protein/AlphaFold，RL|GNN-TSP，Material等场景已经存在。这既需要算法支持，对平台也有一定的冲击，比如计算图动态能力，超大规模数据结构（不一定稀疏）的支持。",{"type":18,"tag":26,"props":275,"children":276},{},[277],{"type":24,"value":278},"2）同样，在科学计算领域，广义物理世界的很多问题，是含有时间因素的，即Time Dependent。无论是原子，分子，到蛋白质，到细胞。含时对模型和平台都有新的挑战。",{"type":18,"tag":26,"props":280,"children":281},{},[282],{"type":24,"value":283},"综上，我们分析了包含Pathways一文三个关键点中真正对AI Architecture有影响的Sparse-Activation/Dynamic-Routing，也分析了其他一些有潜在大冲击的方向，特别是Local Learning和AI Science Computing。 当然最近期看，Sparse-Activation/Dynamic-Routing是已经最急迫的需要，大模型上由平台原生支持：类似Dynamic-Dispatch这种面向用户的API/算子，和内部在节点内和节点间的Sparse Activation来低能耗大模型计算。",{"title":7,"searchDepth":285,"depth":285,"links":286},4,[],"markdown","content:technology-blogs:zh:772.md","content","technology-blogs/zh/772.md","technology-blogs/zh/772","md",1776506140964]