[{"data":1,"prerenderedAt":229},["ShallowReactive",2],{"content-query-wHr8SYbN9B":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":223,"_id":224,"_source":225,"_file":226,"_stem":227,"_extension":228},"/technology-blogs/zh/1704","zh",false,"","【MindSpore易点通】Transformer的编码和解码","Transformer注意力机制和残差结构的设计，在NLP领域中是将任意两个单词的距离是1，对解决NLP中棘手的长期依赖问题是非常有效，从而也有效提高模型性能。","2022-08-08","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/15/20087fb746b94d4b83bd52bdd4f9a895.png","technology-blogs","基础知识",{"type":15,"children":16,"toc":210},"root",[17,25,35,52,60,69,81,88,95,100,107,112,117,124,130,139,144,150,155,162,167,172,177,185,190,197,205],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"mindspore易点通transformer的编码和解码",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":29},"h2",{"id":28},"简介",[30],{"type":18,"tag":31,"props":32,"children":33},"strong",{},[34],{"type":24,"value":28},{"type":18,"tag":36,"props":37,"children":38},"p",{},[39,41,50],{"type":24,"value":40},"在上一篇",{"type":18,"tag":42,"props":43,"children":47},"a",{"href":44,"rel":45},"https://bbs.huaweicloud.com/forum/thread-193974-1-1.html",[46],"nofollow",[48],{"type":24,"value":49},"【MindSpore易点通】Transformer的注意力机制",{"type":24,"value":51},"帖子中，跟大家分享了注意力机制的结构和原理。基于上篇内容，本次再和大家一起解锁下Transformer中的Encoder和Decoder以及Softmax部分。",{"type":18,"tag":36,"props":53,"children":54},{},[55],{"type":18,"tag":56,"props":57,"children":59},"img",{"alt":7,"src":58},"https://bbs-img.huaweicloud.com/data/forums/attachment/forum/20228/5/1659663696067769776.png",[],{"type":18,"tag":26,"props":61,"children":63},{"id":62},"encoder结构",[64],{"type":18,"tag":31,"props":65,"children":66},{},[67],{"type":24,"value":68},"Encoder结构",{"type":18,"tag":36,"props":70,"children":71},{},[72,74,79],{"type":24,"value":73},"图中红框内就是Transformer的Encoder block结构，从下到上是由Multi-Head Attention-->Add & Norm-->Feed Forward-->Add & Norm组成的。因为Multi-Head Attention部分比较复杂，所以单独有分享：",{"type":18,"tag":42,"props":75,"children":77},{"href":44,"rel":76},[46],[78],{"type":24,"value":49},{"type":24,"value":80},"，了解了Multi-Head Attention的结构和计算过程，现在重点解析下Add & Norm和Feed Forward部分。",{"type":18,"tag":82,"props":83,"children":85},"h3",{"id":84},"add-normadd-norm层由有两个输入的路径一个是经过了multi-head-attention处理一个是直接输入并且add和norm两部分组成其结构和计算公式如下",[86],{"type":24,"value":87},"Add & Norm：Add & Norm层由有两个输入的路径，一个是经过了Multi-Head Attention处理，一个是直接输入。并且Add和Norm两部分组成，其结构和计算公式如下：",{"type":18,"tag":36,"props":89,"children":90},{},[91],{"type":18,"tag":56,"props":92,"children":94},{"alt":7,"src":93},"https://bbs-img.huaweicloud.com/data/forums/attachment/forum/20228/5/1659663834515980074.png",[],{"type":18,"tag":36,"props":96,"children":97},{},[98],{"type":24,"value":99},"Add的计算：X+MultiHeadAttention(X)。这是一种残差结构，对于网络层数较多的常用此结构，可以让网络只关注当前差异的部分，我们所熟悉的ResNet网络中就包含这种结构：",{"type":18,"tag":36,"props":101,"children":102},{},[103],{"type":18,"tag":56,"props":104,"children":106},{"alt":7,"src":105},"https://bbs-img.huaweicloud.com/data/forums/attachment/forum/20228/5/1659663850716957409.png",[],{"type":18,"tag":36,"props":108,"children":109},{},[110],{"type":24,"value":111},"Norm指Layer Normalization，通常用于RNN结构，Layer Normalization会将每一层神经元的输入都转成均值方差都一样的，这样可以加快收敛。",{"type":18,"tag":36,"props":113,"children":114},{},[115],{"type":24,"value":116},"Feed Forward层比较简单，是一个两层的全连接层，第一层的激活函数为Relu，第二层不使用激活函数，对应的公式如下",{"type":18,"tag":36,"props":118,"children":119},{},[120],{"type":18,"tag":56,"props":121,"children":123},{"alt":7,"src":122},"https://bbs-img.huaweicloud.com/data/forums/attachment/forum/20228/5/1659663871386271280.png",[],{"type":18,"tag":82,"props":125,"children":127},{"id":126},"通过以上几层计算就可以构造出一个encoder-blockencoder-block接收输入矩阵xnd并输出一个矩阵ond通过多个-encoder-block-叠加就可以组成-encoder第一个-encoder-block-的输入为句子单词的表示向量矩阵后续-encoder-block-的输入是前一个encoder-block的输出最后一个encoder-block输出的矩阵就是编码信息矩阵c这一矩阵后续会用到decoder中",[128],{"type":24,"value":129},"通过以上几层计算就可以构造出一个Encoder block，Encoder block接收输入矩阵X(n*d)，并输出一个矩阵O(n*d)。通过多个 Encoder block 叠加就可以组成 Encoder。第一个 Encoder block 的输入为句子单词的表示向量矩阵，后续 Encoder block 的输入是前一个Encoder block的输出，最后一个Encoder block输出的矩阵就是编码信息矩阵C，这一矩阵后续会用到Decoder中。",{"type":18,"tag":26,"props":131,"children":133},{"id":132},"decoder结构",[134],{"type":18,"tag":31,"props":135,"children":136},{},[137],{"type":24,"value":138},"Decoder结构",{"type":18,"tag":36,"props":140,"children":141},{},[142],{"type":24,"value":143},"图中蓝框部分为Transformer的Decoder block结构，包含两个Multi-Head Attention层。第一个Multi-Head Attention层采用了Masked操作。第二个Multi-Head Attention层的K, V矩阵使用 Encoder 的编码信息矩阵C进行计算，而Q使用上一个 Decoder block 的输出计算。最后有一个 Softmax 层计算下一个翻译单词的概率。",{"type":18,"tag":82,"props":145,"children":147},{"id":146},"第一层multi-head-attentiondecoder-block的第一个multi-head-attention采用了masked操作因为在翻译的过程中是顺序翻译的即翻译完第i个单词才可以翻译第i1个单词通过masked操作可以防止第i个单词知道i1个单词之后的信息",[148],{"type":24,"value":149},"第一层Multi-Head Attention：Decoder block的第一个Multi-Head Attention采用了Masked操作，因为在翻译的过程中是顺序翻译的，即翻译完第i个单词，才可以翻译第i+1个单词。通过Masked操作可以防止第i个单词知道i+1个单词之后的信息。",{"type":18,"tag":36,"props":151,"children":152},{},[153],{"type":24,"value":154},"Masked计算：是Decoder的输入矩阵和Mask矩阵，输入矩阵包含词的表示向量，Mask是一个n×n的矩阵。下图就是计算过程",{"type":18,"tag":36,"props":156,"children":157},{},[158],{"type":18,"tag":56,"props":159,"children":161},{"alt":7,"src":160},"https://bbs-img.huaweicloud.com/data/forums/attachment/forum/20228/5/1659663894942535072.png",[],{"type":18,"tag":36,"props":163,"children":164},{},[165],{"type":24,"value":166},"首先得到输入X矩阵，通过输入矩阵X计算得到Q,K,V矩阵，计算得出Q矩阵与K转置矩阵的乘积。在得到乘积之后需要进行Softmax，计算注意力分数，因为在翻译的过程中是顺序翻译的，即翻译完第 i 个单词，才可以翻译第 i+1 个单词。通过 Masked 操作可以防止第 i 个单词知道 i+1 个单词之后的信息。所以在Softmax之前需要使用Mask矩阵遮挡住每一个单词之后的信息。然后与矩阵V相乘，得出输出Z，这样第m个单词的输出向量Zm只包含单词m的信息。",{"type":18,"tag":36,"props":168,"children":169},{},[170],{"type":24,"value":171},"根据上述的计算过程得到一个Mask Self-Attention的输出矩阵，然后通过Multi-Head Attention拼接多个输出，得到第一个Multi-Head Attention的输出Z，Z与输入X维度一样。",{"type":18,"tag":36,"props":173,"children":174},{},[175],{"type":24,"value":176},"第二层Multi-Head Attention与第一层的区别在于其中Self-Attention的K, V矩阵不是使用上一个Decoder block的输出计算的，而是使用Encoder的编码信息矩阵计算的。这样做的好处是在 Decoder 的时候，每一位单词都可以利用到 Encoder 所有单词的信息。",{"type":18,"tag":36,"props":178,"children":179},{},[180],{"type":18,"tag":31,"props":181,"children":182},{},[183],{"type":24,"value":184},"Softmax预测",{"type":18,"tag":36,"props":186,"children":187},{},[188],{"type":24,"value":189},"在图中的绿色框中是最终的预测输出，按照矩阵行的顺序进行预测，预测完成，输出最终的翻译结果。",{"type":18,"tag":36,"props":191,"children":192},{},[193],{"type":18,"tag":56,"props":194,"children":196},{"alt":7,"src":195},"https://bbs-img.huaweicloud.com/data/forums/attachment/forum/20228/5/1659663913959508106.png",[],{"type":18,"tag":36,"props":198,"children":199},{},[200],{"type":18,"tag":31,"props":201,"children":202},{},[203],{"type":24,"value":204},"总结",{"type":18,"tag":36,"props":206,"children":207},{},[208],{"type":24,"value":209},"本篇分享中将Transformer分成了Encoder、Decoder和Softmax三个部分进行了分析。Transformer注意力机制和残差结构的设计，在NLP领域中是将任意两个单词的距离是1，对解决NLP中棘手的长期依赖问题是非常有效，从而也有效提高模型性能。",{"title":7,"searchDepth":211,"depth":211,"links":212},4,[213,215,220],{"id":28,"depth":214,"text":28},2,{"id":62,"depth":214,"text":68,"children":216},[217,219],{"id":84,"depth":218,"text":87},3,{"id":126,"depth":218,"text":129},{"id":132,"depth":214,"text":138,"children":221},[222],{"id":146,"depth":218,"text":149},"markdown","content:technology-blogs:zh:1704.md","content","technology-blogs/zh/1704.md","technology-blogs/zh/1704","md",1776506115471]