[{"data":1,"prerenderedAt":301},["ShallowReactive",2],{"content-query-eelmvrLQxV":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":295,"_id":296,"_source":297,"_file":298,"_stem":299,"_extension":300},"/technology-blogs/zh/3191","zh",false,"","高效架构SLAB: Attention简化，无需LayerNorm，视觉/语言任务精度无损效率提升","作者：xinghao 来源：知乎","2024-06-20","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/8ffea5ce58c94a67985005f24e69b0f4.png","technology-blogs","模型精度调优",{"type":15,"children":16,"toc":292},"root",[17,25,42,47,58,63,72,80,88,93,101,109,123,131,136,143,151,158,163,170,175,183,191,205,212,217,224,231,239,244,251,259,264,271,279,287],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"高效架构slab-attention简化无需layernorm视觉语言任务精度无损效率提升",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29,35,37],{"type":18,"tag":30,"props":31,"children":32},"strong",{},[33],{"type":24,"value":34},"作者：xinghao",{"type":24,"value":36}," ",{"type":18,"tag":30,"props":38,"children":39},{},[40],{"type":24,"value":41},"来源：知乎",{"type":18,"tag":26,"props":43,"children":44},{},[45],{"type":24,"value":46},"论文见：",{"type":18,"tag":26,"props":48,"children":49},{},[50],{"type":18,"tag":51,"props":52,"children":56},"a",{"href":53,"rel":54},"https://arxiv.org/abs/2211.12905",[55],"nofollow",[57],{"type":24,"value":53},{"type":18,"tag":26,"props":59,"children":60},{},[61],{"type":24,"value":62},"MindSpore代码：",{"type":18,"tag":26,"props":64,"children":65},{},[66],{"type":18,"tag":51,"props":67,"children":70},{"href":68,"rel":69},"https://gitee.com/mindspore/models/tree/master/research/cv/ghostnetv2",[55],[71],{"type":24,"value":68},{"type":18,"tag":26,"props":73,"children":74},{},[75],{"type":18,"tag":30,"props":76,"children":77},{},[78],{"type":24,"value":79},"01",{"type":18,"tag":26,"props":81,"children":82},{},[83],{"type":18,"tag":30,"props":84,"children":85},{},[86],{"type":24,"value":87},"引言",{"type":18,"tag":26,"props":89,"children":90},{},[91],{"type":24,"value":92},"Transformer已经成为了语言和视觉任务中常用的基础架构之一。然而，由于Transformer结构高计算开销的影响，其在端侧等资源受限设备中的应用依然面临很大的挑战。我们对Transformer结构中的标准化层和注意力机制两个模块的优化策略进行了深入探索，从而构建一个高效的Transformer结构。其中，LayerNorm作为Transformer结构中常用的标准化层，但模型推理时仍需计算数据的统计值，导致了推理的低效。我们提出了渐进式的LayerNorm替换策略，并对标准的BatchNorm进行了改进以更好地取代LayerNorm层。同时，我们采用了一种简单高效的线性注意力模块（Simplified Linear Attention），来获得更强的模型性能。我们将这两种策略的结合简称为SLAB。我们在图像分类、目标检测以及语言任务上都进行了大量的实验，获得了很好的效果。例如，我们的SLAB-Swin-S在ImageNet1k数据集上获得了83.6%的分类精度，相对Flatten-Swin-S在精度提升0.1%的情况下，时延减少了2.4ms。",{"type":18,"tag":26,"props":94,"children":95},{},[96],{"type":18,"tag":30,"props":97,"children":98},{},[99],{"type":24,"value":100},"02",{"type":18,"tag":26,"props":102,"children":103},{},[104],{"type":18,"tag":30,"props":105,"children":106},{},[107],{"type":24,"value":108},"方法",{"type":18,"tag":26,"props":110,"children":111},{},[112,117,118],{"type":18,"tag":30,"props":113,"children":114},{},[115],{"type":24,"value":116},"2.1",{"type":24,"value":36},{"type":18,"tag":30,"props":119,"children":120},{},[121],{"type":24,"value":122},"渐进式重参数化BatchNorm",{"type":18,"tag":26,"props":124,"children":125},{},[126],{"type":18,"tag":127,"props":128,"children":130},"img",{"alt":7,"src":129},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/6bc799f5011a424e95665239a46bd749.png",[],{"type":18,"tag":26,"props":132,"children":133},{},[134],{"type":24,"value":135},"LN作为Transformer中常用的标准化层结构，由于其在训练和推理两阶段均存在均值和方差的计算，影响了Transformer的执行速度。与之相对，BN仅在训练阶段存在均值和方差的计算，且在推理阶段可与相邻的线性层融合，可以去除标准化层对模型推理速度的影响。但是，在Transformer结构中将LN简单替换为BN训练会导致模型精度下降以及训练崩溃等问题。为解决这个问题，我们对BN进行了优化，并提出了渐进式重参数化BatchNorm策略。",{"type":18,"tag":26,"props":137,"children":138},{},[139],{"type":18,"tag":127,"props":140,"children":142},{"alt":7,"src":141},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/bb752216b1c5492cb455de8269fca900.png",[],{"type":18,"tag":26,"props":144,"children":145},{},[146],{"type":18,"tag":30,"props":147,"children":148},{},[149],{"type":24,"value":150},"2.2 简化线性注意力",{"type":18,"tag":26,"props":152,"children":153},{},[154],{"type":18,"tag":127,"props":155,"children":157},{"alt":7,"src":156},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/8a890e025e634400b56a9bf2d4e7549f.png",[],{"type":18,"tag":26,"props":159,"children":160},{},[161],{"type":24,"value":162},"Attention是Transformer网络中重要的模块之一。为进一步压缩模型计算量，我们引入了线性注意力模块。在该模块中，我们仅使用了硬件亲和的ReLU算子作为相似度函数，并增加了一个深度可分离模块增强局部特征提取。该简单线性注意力模块（simplified linear attention, SLA）形式如下：",{"type":18,"tag":26,"props":164,"children":165},{},[166],{"type":18,"tag":127,"props":167,"children":169},{"alt":7,"src":168},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/fc215827cc3a435ca0605a206e8a3485.png",[],{"type":18,"tag":26,"props":171,"children":172},{},[173],{"type":24,"value":174},"式中，DWC表示深度可分离卷积。",{"type":18,"tag":26,"props":176,"children":177},{},[178],{"type":18,"tag":30,"props":179,"children":180},{},[181],{"type":24,"value":182},"03",{"type":18,"tag":26,"props":184,"children":185},{},[186],{"type":18,"tag":30,"props":187,"children":188},{},[189],{"type":24,"value":190},"实验结果",{"type":18,"tag":26,"props":192,"children":193},{},[194,199,200],{"type":18,"tag":30,"props":195,"children":196},{},[197],{"type":24,"value":198},"3.1",{"type":24,"value":36},{"type":18,"tag":30,"props":201,"children":202},{},[203],{"type":24,"value":204},"分类任务",{"type":18,"tag":26,"props":206,"children":207},{},[208],{"type":18,"tag":127,"props":209,"children":211},{"alt":7,"src":210},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/5630029b54d947ac9ed94f5a07f0d342.png",[],{"type":18,"tag":26,"props":213,"children":214},{},[215],{"type":24,"value":216},"我们在ImageNet1k数据集上进行了实验，实验结果证明在多个backbone上，我们的PRepBN均获得了与LN相当甚至更好的性能。从实验结果看，相当基于LN的模型，PRepBN模型的分类精度有0.1%~1.4%的提升。而基于我们SLAB的模型，能在精度与Flatten Transformer相当的情况下，减少模型的推理的时延。",{"type":18,"tag":26,"props":218,"children":219},{},[220],{"type":18,"tag":127,"props":221,"children":223},{"alt":7,"src":222},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/92ba16522eb741d5bd029f646dbd1558.png",[],{"type":18,"tag":26,"props":225,"children":226},{},[227],{"type":18,"tag":127,"props":228,"children":230},{"alt":7,"src":229},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/75b79cc0ed794fb185e1c8ba3fc8923b.png",[],{"type":18,"tag":26,"props":232,"children":233},{},[234],{"type":18,"tag":30,"props":235,"children":236},{},[237],{"type":24,"value":238},"3.2 检测任务",{"type":18,"tag":26,"props":240,"children":241},{},[242],{"type":24,"value":243},"此外，我们验证了不同backbone在COCO数据集上的效果。从实验结果可以看出，我们的方法实现了与原Backbone模型相当的性能，但拥有更低的模型推理时延。",{"type":18,"tag":26,"props":245,"children":246},{},[247],{"type":18,"tag":127,"props":248,"children":250},{"alt":7,"src":249},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/74ef32d616b84923ac263ade6ab03d53.png",[],{"type":18,"tag":26,"props":252,"children":253},{},[254],{"type":18,"tag":30,"props":255,"children":256},{},[257],{"type":24,"value":258},"3.3 语言任务",{"type":18,"tag":26,"props":260,"children":261},{},[262],{"type":24,"value":263},"我们基于Adaptive inputs方法在Wikitext-103数据集上评测了PRepBN在语言任务的能力。同时，我们也将PRepBN应用在了LlaMA-350M模型中，并评测了模型在下游任务的性能。从实验结果可以看出，我们的PRepBN方法在语言任务上也表现出了不错的性能，精度无损的情况下将LLaMA-350M速度从44tokens/s提升到了50.4tokens/s。",{"type":18,"tag":26,"props":265,"children":266},{},[267],{"type":18,"tag":127,"props":268,"children":270},{"alt":7,"src":269},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/21/6aab2f92d6fb412697466b191787dfd4.png",[],{"type":18,"tag":26,"props":272,"children":273},{},[274],{"type":18,"tag":30,"props":275,"children":276},{},[277],{"type":24,"value":278},"04",{"type":18,"tag":26,"props":280,"children":281},{},[282],{"type":18,"tag":30,"props":283,"children":284},{},[285],{"type":24,"value":286},"总结",{"type":18,"tag":26,"props":288,"children":289},{},[290],{"type":24,"value":291},"我们对Transformer结构中的标准化层和注意力机制两个模块的优化策略进行了深入探索，提出了渐进式的LayerNorm替换策略，同时采用一种简单高效的线性注意力模块，来获得更加高效的Transformer模型架构。这个方法在图像分类、目标检测以及语言任务上进行了大量的实验验证，在精度无损的情况下，大幅提升Transformer的推理效率。",{"title":7,"searchDepth":293,"depth":293,"links":294},4,[],"markdown","content:technology-blogs:zh:3191.md","content","technology-blogs/zh/3191.md","technology-blogs/zh/3191","md",1776506126972]