[{"data":1,"prerenderedAt":788},["ShallowReactive",2],{"content-query-W7pDhRr2gl":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":782,"_id":783,"_source":784,"_file":785,"_stem":786,"_extension":787},"/technology-blogs/zh/1755","zh",false,"","昇思金箍棒：基于MindSpore的业界SOTA模型压缩算法集","端边设备的算力、电量和内存虽然有提升，但并不能匹配神经网络的部署要求，为解决这个矛盾，模型压缩算法就应运而生。","2022-08-26","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/6e865b785c6d47498e5b534de58b2101.png","technology-blogs","实践",{"type":15,"children":16,"toc":779},"root",[17,25,34,39,48,53,60,65,79,84,92,97,102,109,114,118,123,127,132,137,142,147,152,157,165,170,175,183,188,193,201,206,211,216,221,228,233,238,246,251,256,263,275,283,288,293,298,303,308,315,320,327,332,337,344,355,362,367,372,377,384,389,429,434,439,444,451,458,463,470,475,482,487,491,499,504,509,514,523,528,538,546,558,563,568,573,578,583,592,597,606,611,620,625,635,644,652,657,662,667,672,677,682,687,692,697,704,712,722,738,748,764],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"昇思金箍棒基于mindspore的业界sota模型压缩算法集",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":18,"tag":30,"props":31,"children":33},"img",{"alt":7,"src":32},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/2c12a0f929e74d91ade981a5b642c010.gif",[],{"type":18,"tag":26,"props":35,"children":36},{},[37],{"type":24,"value":38},"1",{"type":18,"tag":26,"props":40,"children":41},{},[42],{"type":18,"tag":43,"props":44,"children":45},"strong",{},[46],{"type":24,"value":47},"为什么需要压缩模型",{"type":18,"tag":26,"props":49,"children":50},{},[51],{"type":24,"value":52},"近些年来，随着算力的提升、数据的爆炸式增长和深度神经网络技术的成熟，深度神经网络在CV、NLP等很多领域都取得了远超传统机器学习的效果，而网络的规模和参数量也在迅速增加。端边设备的算力、电量和内存虽然有提升，但并不能匹配神经网络的部署要求，为解决这个矛盾，模型压缩算法就应运而生。",{"type":18,"tag":26,"props":54,"children":55},{},[56],{"type":18,"tag":30,"props":57,"children":59},{"alt":7,"src":58},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/46a9171d83d54da08c1d2aa5e19c7dd8.png",[],{"type":18,"tag":26,"props":61,"children":62},{},[63],{"type":24,"value":64},"图.训练模型所需算力和摩尔定律的趋势，设备算力的增长远不及模型算力需求的增长；在部署模型时同样面临着类似的困难。",{"type":18,"tag":26,"props":66,"children":67},{},[68,70],{"type":24,"value":69},"（图片来源：UC Berkeley: ",{"type":18,"tag":71,"props":72,"children":76},"a",{"href":73,"rel":74},"https://medium.com/riselab/ai-and-memory-wall-2cb4265cb0b8%EF%BC%89",[75],"nofollow",[77],{"type":24,"value":78},"https://medium.com/riselab/ai-and-memory-wall-2cb4265cb0b8）",{"type":18,"tag":26,"props":80,"children":81},{},[82],{"type":24,"value":83},"2",{"type":18,"tag":26,"props":85,"children":86},{},[87],{"type":18,"tag":43,"props":88,"children":89},{},[90],{"type":24,"value":91},"MindSpore Golden Stick（昇思金箍棒）",{"type":18,"tag":26,"props":93,"children":94},{},[95],{"type":24,"value":96},"MindSpore Golden Stick是华为诺亚实验室和昇思MindSpore团队基于昇思MindSpore研发的模型压缩算法集，提供丰富的模型压缩算法如剪枝、量化等，以达成缩减模型参数量等效果，降低深度神经网络部署在端边设备上的门槛；同时提供一套简单易用的算法接口，降低应用模型压缩算法的成本。",{"type":18,"tag":26,"props":98,"children":99},{},[100],{"type":24,"value":101},"MindSpore Golden Stick的整体架构图如下：",{"type":18,"tag":26,"props":103,"children":104},{},[105],{"type":18,"tag":30,"props":106,"children":108},{"alt":7,"src":107},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/714bb36f41514aa2ae472a2922204e42.png",[],{"type":18,"tag":26,"props":110,"children":111},{},[112],{"type":24,"value":113},"图.MindSpore Golden Stick架构图",{"type":18,"tag":26,"props":115,"children":116},{},[117],{"type":24,"value":38},{"type":18,"tag":26,"props":119,"children":120},{},[121],{"type":24,"value":122},"底层的MindSpore Rewrite模块提供修改前端网络的能力，基于此模块的接口，算法开发者可以按照特定的规则对MindSpore的前端网络做节点和拓扑关系的增删查改；",{"type":18,"tag":26,"props":124,"children":125},{},[126],{"type":24,"value":83},{"type":18,"tag":26,"props":128,"children":129},{},[130],{"type":24,"value":131},"基于MindSpore Rewrite这个基础能力，MindSpore Golden Stick会提供各种类型的算法，比如SimQAT（Simulated Quantization Aware Training，量化感知训练）算法、SLB（Searching for low-bit weights，基于权值搜索的低比特神经网络量化）量化算法、SCOP（Scientific Control for Reliable Neural Network Pruning，基于科学控制法的神经网络剪枝）剪枝算法等；",{"type":18,"tag":26,"props":133,"children":134},{},[135],{"type":24,"value":136},"3",{"type":18,"tag":26,"props":138,"children":139},{},[140],{"type":24,"value":141},"基于一些基础的量化、剪枝等算法，MindSpore Golden Stick还规划了如多种算法组合技术、AMC（AutoML for Model Compression and Acceleration on Mobile Devices，自动模型压缩技术）、NAS（Neural architecture search，网络结构搜索）、HAQ（Hardware-Aware Automated Quantization，硬件感知的自动量化）等高阶技术；",{"type":18,"tag":26,"props":143,"children":144},{},[145],{"type":24,"value":146},"4",{"type":18,"tag":26,"props":148,"children":149},{},[150],{"type":24,"value":151},"为了方便开发者分析调试算法，MindSpore Golden Stick规划了一些工具，如可视化工具、逐层分析工具、算法压缩效果分析工具等。",{"type":18,"tag":26,"props":153,"children":154},{},[155],{"type":24,"value":156},"上述介绍的第三点和第四点能力当前正在规划中，敬请期待。",{"type":18,"tag":26,"props":158,"children":159},{},[160],{"type":18,"tag":43,"props":161,"children":162},{},[163],{"type":24,"value":164},"统一的算法入口",{"type":18,"tag":26,"props":166,"children":167},{},[168],{"type":24,"value":169},"模型压缩算法种类繁多，不同算法的应用方式往往各不相同，这增加了应用算法的学习成本。",{"type":18,"tag":26,"props":171,"children":172},{},[173],{"type":24,"value":174},"MindSpore Golden Stick对算法应用流程做了梳理和抽象，提供了一套统一的算法应用接口，最大程度缩减算法应用的学习成本，同时这也方便了在各种算法的基础上，做一些高阶技术的探索。",{"type":18,"tag":26,"props":176,"children":177},{},[178],{"type":18,"tag":43,"props":179,"children":180},{},[181],{"type":24,"value":182},"网络重写能力",{"type":18,"tag":26,"props":184,"children":185},{},[186],{"type":24,"value":187},"模型压缩算法往往会针对特定的网络结构做优化，如感知量化算法往往在网络中的Conv2d或者Conv2d + BatchNorm2d结构上插入伪量化节点。",{"type":18,"tag":26,"props":189,"children":190},{},[191],{"type":24,"value":192},"MindSpore Golden Stick提供了基于Pattern修改前端网络的能力，算法开发者可以基于此能力制定通用的改图规则去实现算法逻辑，而不需要对每个特定的网络都实现一遍算法逻辑算法，从而提升算法接入效率。",{"type":18,"tag":26,"props":194,"children":195},{},[196],{"type":18,"tag":43,"props":197,"children":198},{},[199],{"type":24,"value":200},"SimQAT算法",{"type":18,"tag":26,"props":202,"children":203},{},[204],{"type":24,"value":205},"网络量化是一种将浮点计算转成低比特定点计算的技术，可以有效地降低网络计算量、参数大小和内存消耗，但往往带来一些精度损失。",{"type":18,"tag":26,"props":207,"children":208},{},[209],{"type":24,"value":210},"量化即以较低的推理精度损失，将网络中的32位有限范围浮点型（float32）权重或激活近似为有限多个离散值（通常为int8）的过程。",{"type":18,"tag":26,"props":212,"children":213},{},[214],{"type":24,"value":215},"换言之，它是以更少位数的数据类型来近似表示float32数据的过程，而网络的输入输出依然是浮点型，从而达到减少网络尺寸大小、减少网络部署时的内存消耗及加快网络推理速度等目标。",{"type":18,"tag":26,"props":217,"children":218},{},[219],{"type":24,"value":220},"量化相当于给网络引入了噪声，所以量化会损失网络精度，但是神经网络一般对噪声是不太敏感的，只要控制好量化的程度，对高级任务精度影响可以做到很小。量化后的网络相较于原始网络，在网络推理时使用int8运算代替了原有的float32计算，性能能够得到极大的提升。",{"type":18,"tag":26,"props":222,"children":223},{},[224],{"type":18,"tag":30,"props":225,"children":227},{"alt":7,"src":226},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/73c1ee2f96b9423698108c714f0e05e0.png",[],{"type":18,"tag":26,"props":229,"children":230},{},[231],{"type":24,"value":232},"图.与FP32类型相比，FP16、INT8等低精度数据表达类型所占用空间更小。使用低精度数据表达类型替换高精度数据表达类型，可以大幅降低存储空间和传输时间；而低比特的计算性能也更高，INT8相对比FP32的加速比可达到3倍甚至更高，对于相同的计算，功耗上也有明显优势。",{"type":18,"tag":26,"props":234,"children":235},{},[236],{"type":24,"value":237},"SimQAT（Simulated Quantization Aware Training）算法是一种最基础的量化感知训练算法，在训练时使用伪量化节点来模拟量化计算的损失，并通过反向传播更新网络参数，使得网络参数更好地适应量化带来的损失。具体方案可以参考文献[1]。",{"type":18,"tag":26,"props":239,"children":240},{},[241],{"type":18,"tag":43,"props":242,"children":243},{},[244],{"type":24,"value":245},"效果",{"type":18,"tag":26,"props":247,"children":248},{},[249],{"type":24,"value":250},"当前SimQAT支持int8量化，常用模型的精度数据如下表。",{"type":18,"tag":26,"props":252,"children":253},{},[254],{"type":24,"value":255},"表. 感知量化训练后模型精度",{"type":18,"tag":26,"props":257,"children":258},{},[259],{"type":18,"tag":30,"props":260,"children":262},{"alt":7,"src":261},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/8705001fa9464d1cb6842041c2824759.png",[],{"type":18,"tag":26,"props":264,"children":265},{},[266],{"type":18,"tag":43,"props":267,"children":268},{},[269],{"type":18,"tag":270,"props":271,"children":272},"em",{},[273],{"type":24,"value":274},"SCOP结构化剪枝，缩减模型功耗50%",{"type":18,"tag":26,"props":276,"children":277},{},[278],{"type":18,"tag":43,"props":279,"children":280},{},[281],{"type":24,"value":282},"原理",{"type":18,"tag":26,"props":284,"children":285},{},[286],{"type":24,"value":287},"神经网络剪枝技术通过去除神经网络中的部分参数来减少参数量和计算量，主要分为非结构化剪枝和结构化剪枝两类。以卷积神经网络（CNN）为例，非结构化剪枝是去除卷积核中的部分权值，尽管它可以实现很高的压缩比，但实际的加速依赖于特殊的硬件设计，难以在通用的Ascend、GPU、CPU平台上获得收益。而结构化剪枝直接去除CNN中完整的卷积核，不破坏网络的拓扑结构，无需特定的软件和硬件适配即可直接实现模型的推理加速。",{"type":18,"tag":26,"props":289,"children":290},{},[291],{"type":24,"value":292},"发现冗余的卷积核是结构化剪枝的关键一步，常用的方法可分为两种：",{"type":18,"tag":26,"props":294,"children":295},{},[296],{"type":24,"value":297},"第一种方法不需要训练数据，通过定义一些卷积核重要性的假设，来判定不同卷积核的重要性。一个典型的假设是范数小的卷积核不重要，砍掉一些范数小的卷积核不会太多地影响网络的表现。",{"type":18,"tag":26,"props":299,"children":300},{},[301],{"type":24,"value":302},"第二种方法是数据驱动的方法，引入训练数据来学习不同卷积核的重要性。比如通过给每个卷积核引入额外的控制系数，学习这些控制系数，来度量不同卷积核的重要性，小的控制系数对应的卷积核被认为不重要。",{"type":18,"tag":26,"props":304,"children":305},{},[306],{"type":24,"value":307},"华为诺亚自研的剪枝方法：SCOP（基于科学控制法的神经网络剪枝）[4]是使用数据驱动的方式来发现冗余的卷积核，通过引入独立同分布的对照特征，进行对照实验来减少各种无关因素对剪枝过程的干扰，提高剪枝结果的可靠性。真实数据（Real data）和高仿数据（Knockoff data）同时输入到网络中，分别生成真实特征和高仿特征。如果一个卷积核对应的高仿特征抑制住了真实特征，则认为这个卷积核是冗余的，应当被删除。",{"type":18,"tag":26,"props":309,"children":310},{},[311],{"type":18,"tag":30,"props":312,"children":314},{"alt":7,"src":313},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/e7c8d85311104ff7a00c4a3ca1c5ec18.png",[],{"type":18,"tag":26,"props":316,"children":317},{},[318],{"type":24,"value":319},"图. 基于科学控制法的神经网络剪枝",{"type":18,"tag":26,"props":321,"children":322},{},[323],{"type":18,"tag":43,"props":324,"children":325},{},[326],{"type":24,"value":245},{"type":18,"tag":26,"props":328,"children":329},{},[330],{"type":24,"value":331},"对ResNet50网络应用SCOP剪枝，并使用CIFAR-10数据集评估，实验结果如下表所示。可以发现，在当前任务中，与原始模型相比，在剪枝率45%的情况下，SCOP剪枝后的模型大幅降低了模型的参数量，精度损失在0.5%以内。",{"type":18,"tag":26,"props":333,"children":334},{},[335],{"type":24,"value":336},"表. SCOP剪枝算法用于ResNet50的CIFAR-10数据集结果",{"type":18,"tag":26,"props":338,"children":339},{},[340],{"type":18,"tag":30,"props":341,"children":343},{"alt":7,"src":342},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/9f3109827395488c8164c9909c2495d4.png",[],{"type":18,"tag":26,"props":345,"children":346},{},[347],{"type":18,"tag":43,"props":348,"children":349},{},[350],{"type":18,"tag":270,"props":351,"children":352},{},[353],{"type":24,"value":354},"SLB量化训练，压缩模型8-32倍",{"type":18,"tag":26,"props":356,"children":357},{},[358],{"type":18,"tag":43,"props":359,"children":360},{},[361],{"type":24,"value":282},{"type":18,"tag":26,"props":363,"children":364},{},[365],{"type":24,"value":366},"传统的量化方法在计算梯度时，通常使用STE(Straight Through Estimator) [1]或者自行设计的梯度计算方式[2]。量化函数的不可微往往会导致计算出来的梯度有误差，从而提供不准确的优化方向，导致最终推理精度比较差。因此，迫切需要一种能规避这种不准确梯度估计的神经网络学习量化方法。",{"type":18,"tag":26,"props":368,"children":369},{},[370],{"type":24,"value":371},"SLB(Searching for low-bit weights) [3]是华为诺亚自研的权重量化算法，提供了一种基于权值搜索的低比特量化算法，能避开不准确的梯度估计。",{"type":18,"tag":26,"props":373,"children":374},{},[375],{"type":24,"value":376},"针对低比特网络量化，由于量化网络权值的有效解数量比较少，因此，对网络的量化可以通过对权值搜索实现，即将量化过程转换成权值搜索的过程。对给定量化网络预设一组量化权值，然后定义一个概率矩阵来表示不同量化权值被保留的概率，在训练阶段通过优化概率矩阵实现网络权重的量化。",{"type":18,"tag":26,"props":378,"children":379},{},[380],{"type":18,"tag":30,"props":381,"children":383},{"alt":7,"src":382},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/ddbb97baefc24ae4b829b470200b8601.png",[],{"type":18,"tag":26,"props":385,"children":386},{},[387],{"type":24,"value":388},"图. 传统量化算法vs SLB量化算法",{"type":18,"tag":26,"props":390,"children":391},{},[392,394,399,401,406,408,413,415,420,422,427],{"type":24,"value":393},"如上图所示，",{"type":18,"tag":43,"props":395,"children":396},{},[397],{"type":24,"value":398},"左边图",{"type":24,"value":400},"是用传统量化算法做二值量化，训练时用不准确的梯度更新浮点权重，最后对浮点权重做二值化(用sigmoid函数)处理得到量化权重。",{"type":18,"tag":43,"props":402,"children":403},{},[404],{"type":24,"value":405},"右边图",{"type":24,"value":407},"是用SLB量化算法做二值量化，利用连续松弛策略搜索离散权重，训练时优化离散权重的权值概率矩阵，最后根据概率挑选离散权重实现量化。",{"type":18,"tag":43,"props":409,"children":410},{},[411],{"type":24,"value":412},"左边图中红色点",{"type":24,"value":414},"对应的单个值是由sigmoid函数得到，表示权重被量化为-1的概率。",{"type":18,"tag":43,"props":416,"children":417},{},[418],{"type":24,"value":419},"蓝色点",{"type":24,"value":421},"对应的单个值是由sigmoid函数得到，表示权重被量化为+1的概率。传统量化算法中不准确的梯度更新会影响浮点权重的更新，从而导致这里的概率出现较大的偏差。",{"type":18,"tag":43,"props":423,"children":424},{},[425],{"type":24,"value":426},"右边图中红蓝相间的点",{"type":24,"value":428},"对应的2个值是由softmax函数得到，表示权重被量化为-1或+1的概率。由于避开了不准确的梯度更新，这里的概率会更精准。",{"type":18,"tag":26,"props":430,"children":431},{},[432],{"type":24,"value":433},"在分类任务中，softmax分布通常用于计算输出被分为各个类的概率。SLB也使用softmax分布来计算权重被量化为各个量化权值的概率，并最终根据最大概率挑选对应权值作为量化结果。",{"type":18,"tag":26,"props":435,"children":436},{},[437],{"type":24,"value":438},"为了提高量化结果的置信度，SLB引入了温度因子，通过逐步调整温度因子，能使softmax分布逐渐变得陡峭，慢慢趋近于one-hot分布，从而最大化量化结果的置信度，缩减量化误差。",{"type":18,"tag":26,"props":440,"children":441},{},[442],{"type":24,"value":443},"下面公式左边是标准的softmax函数，右边是SLB算法中引入了温度因子后的softmax函数。",{"type":18,"tag":26,"props":445,"children":446},{},[447],{"type":18,"tag":30,"props":448,"children":450},{"alt":7,"src":449},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/821f5773050a415ca5094ff787333998.png",[],{"type":18,"tag":26,"props":452,"children":453},{},[454],{"type":18,"tag":30,"props":455,"children":457},{"alt":7,"src":456},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/ec1341a55cb64701a703d5100162bc7b.png",[],{"type":18,"tag":26,"props":459,"children":460},{},[461],{"type":24,"value":462},"图. 逐步调整温度因子时，softmax分布的变化过程，最右侧是one-hot分布",{"type":18,"tag":26,"props":464,"children":465},{},[466],{"type":18,"tag":43,"props":467,"children":468},{},[469],{"type":24,"value":245},{"type":18,"tag":26,"props":471,"children":472},{},[473],{"type":24,"value":474},"对ResNet18网络应用SLB量化，并使用CIFAR-10数据集评估，实验结果如下图所示,可以发现，在当前任务中，与全精度模型相比，4bit权重量化后的模型top1精度没有损失，1bit权重量化的精度损失在0.6%以内。SLB量化大幅降低了模型的参数量，使得在资源受限的端侧部署模型变得更加便利。",{"type":18,"tag":26,"props":476,"children":477},{},[478],{"type":18,"tag":30,"props":479,"children":481},{"alt":7,"src":480},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/c70cbea174084b839b30e48f36ebd37d.png",[],{"type":18,"tag":26,"props":483,"children":484},{},[485],{"type":24,"value":486},"图. SLB权重量化算法用于量化ResNet18的CIFAR-10数据集结果。W32表示全精度模型，W4，W2，W1分别对应4bit、2bit和1bit权重量化模型。",{"type":18,"tag":26,"props":488,"children":489},{},[490],{"type":24,"value":136},{"type":18,"tag":26,"props":492,"children":493},{},[494],{"type":18,"tag":43,"props":495,"children":496},{},[497],{"type":24,"value":498},"总结与展望",{"type":18,"tag":26,"props":500,"children":501},{},[502],{"type":24,"value":503},"MindSpore Golden Stick是一个模型压缩算法集，同时也是一个平台，统一的算法应用接口和网络定义修改能力希望在算法应用者和算法开发者之间提供一个桥梁，让更多优秀的算法创造商用价值。",{"type":18,"tag":26,"props":505,"children":506},{},[507],{"type":24,"value":508},"在后续版本中，MindSpore Golden Stick一方面会提供更多优秀的算法助力神经网络应用部署在端边侧设备上，如ViT场景下的后量化技术[5]、知识蒸馏技术[6,8]、GhostNet技术[7]；另一方面会完善的当前的网络定义修改能力，并大力打磨网络调测能力。欢迎优秀的算法开发者来社区贡献。",{"type":18,"tag":26,"props":510,"children":511},{},[512],{"type":24,"value":513},"MindSpore Golden Stick当前的开源在（点击“阅读原文”可直接浏览）：",{"type":18,"tag":26,"props":515,"children":516},{},[517],{"type":18,"tag":71,"props":518,"children":521},{"href":519,"rel":520},"https://gitee.com/mindspore/golden-stick",[75],[522],{"type":24,"value":519},{"type":18,"tag":26,"props":524,"children":525},{},[526],{"type":24,"value":527},"更多详细的介绍和快速入门资料可以参考文档：",{"type":18,"tag":26,"props":529,"children":530},{},[531],{"type":18,"tag":71,"props":532,"children":535},{"href":533,"rel":534},"https://www.mindspore.cn/golden%5C_stick/docs/zh-CN/r0.1/index.html",[75],[536],{"type":24,"value":537},"https://www.mindspore.cn/golden\\_stick/docs/zh-CN/r0.1/index.html",{"type":18,"tag":26,"props":539,"children":540},{},[541],{"type":18,"tag":43,"props":542,"children":543},{},[544],{"type":24,"value":545},"诺亚方舟实验室简介",{"type":18,"tag":26,"props":547,"children":548},{},[549,551,556],{"type":24,"value":550},"MindSpore Golden Stick当中的核心算法为",{"type":18,"tag":43,"props":552,"children":553},{},[554],{"type":24,"value":555},"华为诺亚方舟实验室",{"type":24,"value":557},"自研原创的优秀算法。华为诺亚方舟实验室是华为的AI能力研究中心，立足于人工智能基础算法研究，聚焦打造数据高效和能耗高效的AI引擎。实验室广泛分布于世界各地，在亚洲、欧洲、北美等均设有研发分部。",{"type":18,"tag":26,"props":559,"children":560},{},[561],{"type":24,"value":562},"诺亚的使命是通过在人工智能、数据挖掘及相关领域的持续创新，为公司和社会做出重大贡献。在创新的每一个阶段，基于长期和重大项目驱动，我们追求在AI相关领域打造最先进技术，助力公司提供更好的产品和服务。",{"type":18,"tag":26,"props":564,"children":565},{},[566],{"type":24,"value":567},"作为世界级的实验室，诺亚全力推进AI各领域的前端研发，我们勇于面对人工智能和大数据时代的挑战与机遇，秉承“把数字世界带入每个人、每个家庭、每个组织，构建万物互联的智能世界”这一理念，通过全流程智能化，彻底改进人们工作和生活的方式，以及公司开展业务的模式。",{"type":18,"tag":26,"props":569,"children":570},{},[571],{"type":24,"value":572},"诺亚的研究领域集中在计算视觉、语音和自然语言处理、推荐系统和搜索引擎、决策推理、AI基础理论五大方向，自2012年创立至今，已发展成为一个在学界和业界都取得了重大成就的研究机构。",{"type":18,"tag":26,"props":574,"children":575},{},[576],{"type":24,"value":577},"近期诺亚实验室的一些工作介绍，欢迎大家关注：",{"type":18,"tag":26,"props":579,"children":580},{},[581],{"type":24,"value":582},"1. 业界首篇视觉Transformer综述：",{"type":18,"tag":26,"props":584,"children":585},{},[586],{"type":18,"tag":71,"props":587,"children":590},{"href":588,"rel":589},"https://mp.weixin.qq.com/s/5-heJB91cKa1Km20rrzN8Q",[75],[591],{"type":24,"value":588},{"type":18,"tag":26,"props":593,"children":594},{},[595],{"type":24,"value":596},"2. 对话生成模型【盘古Bot】：",{"type":18,"tag":26,"props":598,"children":599},{},[600],{"type":18,"tag":71,"props":601,"children":604},{"href":602,"rel":603},"https://mp.weixin.qq.com/s/VALSLd3ITz97SurJ9IJawg",[75],[605],{"type":24,"value":602},{"type":18,"tag":26,"props":607,"children":608},{},[609],{"type":24,"value":610},"3. 高性能代码生成模型【盘古Coder】：",{"type":18,"tag":26,"props":612,"children":613},{},[614],{"type":18,"tag":71,"props":615,"children":618},{"href":616,"rel":617},"https://mp.weixin.qq.com/s/3l31RYbk7TAIyrHfiFKI-w",[75],[619],{"type":24,"value":616},{"type":18,"tag":26,"props":621,"children":622},{},[623],{"type":24,"value":624},"4. 诺亚CVPR 2022论文集：",{"type":18,"tag":26,"props":626,"children":627},{},[628],{"type":18,"tag":71,"props":629,"children":632},{"href":630,"rel":631},"https://mp.weixin.qq.com/s/gpw%5C_k8wLPXK5QScdN%5C_tG0w",[75],[633],{"type":24,"value":634},"https://mp.weixin.qq.com/s/gpw\\_k8wLPXK5QScdN\\_tG0w",{"type":18,"tag":26,"props":636,"children":637},{},[638],{"type":18,"tag":71,"props":639,"children":642},{"href":640,"rel":641},"https://mp.weixin.qq.com/s/qvHtq2Fw5i9WJwT6ktoBbA",[75],[643],{"type":24,"value":640},{"type":18,"tag":26,"props":645,"children":646},{},[647],{"type":18,"tag":43,"props":648,"children":649},{},[650],{"type":24,"value":651},"参考资料",{"type":18,"tag":26,"props":653,"children":654},{},[655],{"type":24,"value":656},"[1] Bengio, Yoshua, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through stochastic neurons for conditional computation. 2013.",{"type":18,"tag":26,"props":658,"children":659},{},[660],{"type":24,"value":661},"[2] Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. ICLR, 2019.",{"type":18,"tag":26,"props":663,"children":664},{},[665],{"type":24,"value":666},"[3] Yang Z, Wang Y, Han K, et al. Searching for low-bit weights in quantized neural",{"type":18,"tag":26,"props":668,"children":669},{},[670],{"type":24,"value":671},"networks. NIPS, 2020.",{"type":18,"tag":26,"props":673,"children":674},{},[675],{"type":24,"value":676},"[4] Tang, Yehui, et al. \"Scop: Scientific control for reliable neural network pruning.\" NeurIPS 2020: 10936-10947.",{"type":18,"tag":26,"props":678,"children":679},{},[680],{"type":24,"value":681},"[5] Liu, Zhenhua, et al. \"Post-training quantization for vision transformer.\" Advances in Neural Information Processing Systems 34 (2021): 28092-28103.",{"type":18,"tag":26,"props":683,"children":684},{},[685],{"type":24,"value":686},"[6] Xu, Yixing, et al. \"Kernel based progressive distillation for adder neural networks.\" Advances in Neural Information Processing Systems 33 (2020): 12322-12333.",{"type":18,"tag":26,"props":688,"children":689},{},[690],{"type":24,"value":691},"[7] Han, Kai, et al. \"Ghostnet: More features from cheap operations.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2020.",{"type":18,"tag":26,"props":693,"children":694},{},[695],{"type":24,"value":696},"[8] Chen, Hanting, et al. \"Data-free learning of student networks.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2019.",{"type":18,"tag":26,"props":698,"children":699},{},[700],{"type":18,"tag":30,"props":701,"children":703},{"alt":7,"src":702},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/26/bf071387e88447129d7970fdc1c3c4ab.png",[],{"type":18,"tag":26,"props":705,"children":706},{},[707],{"type":18,"tag":43,"props":708,"children":709},{},[710],{"type":24,"value":711},"MindSpore官方资料",{"type":18,"tag":26,"props":713,"children":714},{},[715,720],{"type":18,"tag":43,"props":716,"children":717},{},[718],{"type":24,"value":719},"官方QQ群",{"type":24,"value":721}," : 486831414",{"type":18,"tag":26,"props":723,"children":724},{},[725,730,732],{"type":18,"tag":43,"props":726,"children":727},{},[728],{"type":24,"value":729},"官网",{"type":24,"value":731},"：",{"type":18,"tag":71,"props":733,"children":736},{"href":734,"rel":735},"https://www.mindspore.cn/",[75],[737],{"type":24,"value":734},{"type":18,"tag":26,"props":739,"children":740},{},[741,746],{"type":18,"tag":43,"props":742,"children":743},{},[744],{"type":24,"value":745},"Gitee",{"type":24,"value":747}," : https : //gitee.com/mindspore/mindspore",{"type":18,"tag":26,"props":749,"children":750},{},[751,756,758],{"type":18,"tag":43,"props":752,"children":753},{},[754],{"type":24,"value":755},"GitHub",{"type":24,"value":757}," : ",{"type":18,"tag":71,"props":759,"children":762},{"href":760,"rel":761},"https://github.com/mindspore-ai/mindspore",[75],[763],{"type":24,"value":760},{"type":18,"tag":26,"props":765,"children":766},{},[767,772,773],{"type":18,"tag":43,"props":768,"children":769},{},[770],{"type":24,"value":771},"论坛",{"type":24,"value":731},{"type":18,"tag":71,"props":774,"children":777},{"href":775,"rel":776},"https://bbs.huaweicloud.com/forum/forum-1076-1.html",[75],[778],{"type":24,"value":775},{"title":7,"searchDepth":780,"depth":780,"links":781},4,[],"markdown","content:technology-blogs:zh:1755.md","content","technology-blogs/zh/1755.md","technology-blogs/zh/1755","md",1776506115661]