[{"data":1,"prerenderedAt":634},["ShallowReactive",2],{"content-query-Cww8KI79Ee":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":628,"_id":629,"_source":630,"_file":631,"_stem":632,"_extension":633},"/technology-blogs/zh/748","zh",false,"","百行代码写BERT，昇思MindSpore能力大赏","18年NLP里程碑式的模型-BERT","2021-10-12","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/f3d3d499d5794ec7af36c01b6b452376.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":605},"root",[17,25,34,52,57,74,79,102,117,122,129,134,140,155,160,165,172,177,180,190,195,202,206,212,220,225,240,245,252,256,261,269,274,278,286,291,299,313,318,325,330,350,363,371,384,389,393,401,406,421,426,434,439,447,455,460,467,472,477,485,490,498,503,518,523,530,535,540,555,560,565,570,577,582,595,600],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"百行代码写bert昇思mindspore能力大赏",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":18,"tag":30,"props":31,"children":33},"img",{"alt":7,"src":32},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/9a9f9a4f125443e8b6ccd76aba3a064f.gif",[],{"type":18,"tag":26,"props":35,"children":36},{},[37,39,45,47],{"type":24,"value":38},"作者：",{"type":18,"tag":40,"props":41,"children":42},"strong",{},[43],{"type":24,"value":44},"吕昱峰",{"type":24,"value":46}," ｜",{"type":18,"tag":40,"props":48,"children":49},{},[50],{"type":24,"value":51},"来源：知乎",{"type":18,"tag":26,"props":53,"children":54},{},[55],{"type":24,"value":56},"之前在如何评价华为MindSpore 1.5 ?提到了MindSpore的易用性已经具备百行代码写个BERT的能力，这次补上。BERT作为18年NLP里程碑式的模型，在被无数人追捧的同时也被解构分析了无数次，我力争稍微讲清楚模型的同时，让读者也能Get到MindSpore当前的能力。虽然多少有点广告的嫌疑，但是敬请各位看官听我细细讲来。",{"type":18,"tag":58,"props":59,"children":61},"h3",{"id":60},"_01-近1000行的bert实现",[62,67,69],{"type":18,"tag":40,"props":63,"children":64},{},[65],{"type":24,"value":66},"01",{"type":24,"value":68}," ",{"type":18,"tag":40,"props":70,"children":71},{},[72],{"type":24,"value":73},"近1000行的BERT实现",{"type":18,"tag":26,"props":75,"children":76},{},[77],{"type":24,"value":78},"动了写这个文章的念头是因为使用MindSpore也写了不少模型，尤其做了些预训练语言模型的复现，其间不断的参考Model Zoo的模型实现，当时就有个疑惑，写个BERT需要将近1000行，MindSpore有这么复杂且难用吗？",{"type":18,"tag":26,"props":80,"children":81},{},[82,84,89,91,100],{"type":24,"value":83},"官方实现的链接放上（",{"type":18,"tag":40,"props":85,"children":86},{},[87],{"type":24,"value":88},"gitee.com/mindspore/models/blob/master/official/nlp/bert/src/bert_model.py",{"type":24,"value":90},"），有兴趣的读者可以看看，去掉注释，这份实现仍旧复杂冗长，而且丝毫没有体现出MindSpore宣传的那样——“",{"type":18,"tag":40,"props":92,"children":93},{},[94],{"type":18,"tag":95,"props":96,"children":97},"em",{},[98],{"type":24,"value":99},"简单的开发体验",{"type":24,"value":101},"”。后来想要去迁移huggingface的checkpoint，自己动手写了一版，发现这冗长的官方实现完全是可以压缩的，且如此复杂的实现会给后来者增加一些困惑，因此就有了百行代码的version。",{"type":18,"tag":58,"props":103,"children":105},{"id":104},"_02-bert模型",[106,111,112],{"type":18,"tag":40,"props":107,"children":108},{},[109],{"type":24,"value":110},"02",{"type":24,"value":68},{"type":18,"tag":40,"props":113,"children":114},{},[115],{"type":24,"value":116},"BERT模型",{"type":18,"tag":26,"props":118,"children":119},{},[120],{"type":24,"value":121},"BERT是“Bidirectional Encoder Reporesentation from Transfromers”的缩写，也是芝麻街动漫人物的名字（谷歌老彩蛋人了）。",{"type":18,"tag":26,"props":123,"children":124},{},[125],{"type":18,"tag":30,"props":126,"children":128},{"alt":7,"src":127},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/4a316fd0290c43a7b480b34b34f40c4c.jpg",[],{"type":18,"tag":26,"props":130,"children":131},{},[132],{"type":24,"value":133},"芝麻街中的BERT",{"type":18,"tag":58,"props":135,"children":137},{"id":136},"标题就已经点出了模型的核心双向的transformer-encoderbert在gpt和elmo的基础上吸纳了二者的优势并且充分利用transformer的特征提取能力在当年狂刷各个评测数据集成为无法跨越的sota模型接下来我会从最基础的组成部分一点一点的配合公式图文代码用mindspore完成一个非常轻量的bert由于篇幅原因这里不会对transformer或预训练语言模型的基础知识进行详细讲解仅以实现bert为主线",[138],{"type":24,"value":139},"标题就已经点出了模型的核心，双向的Transformer Encoder，BERT在GPT和ELMo的基础上，吸纳了二者的优势，并且充分利用Transformer的特征提取能力，在当年狂刷各个评测数据集，成为无法跨越的SOTA模型。接下来我会从最基础的组成部分一点一点的配合公式、图文、代码，用MindSpore完成一个非常轻量的BERT。由于篇幅原因，这里不会对Transformer或预训练语言模型的基础知识进行详细讲解，仅以实现BERT为主线。",{"type":18,"tag":58,"props":141,"children":143},{"id":142},"_03-multi-head-attention",[144,149,150],{"type":18,"tag":40,"props":145,"children":146},{},[147],{"type":24,"value":148},"03",{"type":24,"value":68},{"type":18,"tag":40,"props":151,"children":152},{},[153],{"type":24,"value":154},"Multi-head Attention",{"type":18,"tag":26,"props":156,"children":157},{},[158],{"type":24,"value":159},"不同于Paper解析，我不会上来就讲BERT和Transformer在Embedding的差异，以及设置的预训练任务，而是从最基础的模块实现开始讲起。首先就是多头注意力(Multi-head Attention)模块。",{"type":18,"tag":26,"props":161,"children":162},{},[163],{"type":24,"value":164},"由于BERT模型的基本骨架完全由Transformer的Encoder构成，所以这里先对Transformer中Self-Attention和Multi-head Attention进行简述。首先是Self-Attention，即论文中的Scaled Dot-product Attention，其公式如下：",{"type":18,"tag":26,"props":166,"children":167},{},[168],{"type":18,"tag":30,"props":169,"children":171},{"alt":7,"src":170},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/b2a592268a7942a18fc155536ba6efa8.jpg",[],{"type":18,"tag":26,"props":173,"children":174},{},[175],{"type":24,"value":176},"这里的Self-Attention由三个输入进行运算，分别是Q(query matrix), K(key matrix), V(value matrix), 其分别由同一个输入经过全连接层进行线性变换而得到。这里的实现可以参照公式进行完全复现，其代码如下：",{"type":18,"tag":58,"props":178,"children":179},{"id":7},[],{"type":18,"tag":181,"props":182,"children":184},"pre",{"code":183},"class ScaledDotProductAttention(Cell):\n    def __init__(self, d_k, dropout):\n        super().__init__()\n        self.scale = Tensor(d_k, mindspore.float32)\n        self.matmul = nn.MatMul()\n        self.transpose = P.Transpose()\n        self.softmax = nn.Softmax(axis=-1)\n        self.sqrt = P.Sqrt()\n        self.masked_fill = MaskedFill(-1e9)\n        if dropout > 0.0:\n            self.dropout = nn.Dropout(1-dropout)\n        else:\n            self.dropout = None\n\n    def construct(self, Q, K, V, attn_mask):\n        K = self.transpose(K, (0, 1, 3, 2))\n        scores = self.matmul(Q, K) / self.sqrt(self.scale) # scores : [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]\n        scores = self.masked_fill(scores, attn_mask) # Fills elements of self tensor with value where mask is one.\n        attn = self.softmax(scores)\n        context = self.matmul(attn, V)\n        if self.dropout is not None:\n            context = self.dropout(context)\n        return context, attn\n",[185],{"type":18,"tag":186,"props":187,"children":188},"code",{"__ignoreMap":7},[189],{"type":24,"value":183},{"type":18,"tag":26,"props":191,"children":192},{},[193],{"type":24,"value":194},"其中，Q*K^T并进行缩放后，做了一步masked_fill的操作，是参考Pytorch版本实现，将计算所得的结果和初始输入序列中Padding为0的对应位置的数值进行替换，替换为接近于0的数，如上述代码中的-1e-9。此外还有增加模型鲁棒性的Dropout操作。",{"type":18,"tag":26,"props":196,"children":197},{},[198],{"type":18,"tag":30,"props":199,"children":201},{"alt":7,"src":200},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/64ceca1c999144cbb0226b83ac76aef3.jpg",[],{"type":18,"tag":26,"props":203,"children":204},{},[205],{"type":24,"value":154},{"type":18,"tag":58,"props":207,"children":209},{"id":208},"在完成基本的scaled-dot-product-attention后再来看多头注意力机制的实现所谓多头实际上是将原本单一的qkv投影为h个q-k-v由此在不改变计算量的情况下增强了模型的泛化能力这里既可以将其视为多个head在模型内部的集成ensamble也可以等价视作卷积操作中的多通道channel实际上multi-head-attention也不无借鉴cnn的味道多年前听刘铁岩老师在讲习班中提及下面来看实现部分",[210],{"type":24,"value":211},"在完成基本的Scaled Dot-product Attention后，再来看多头注意力机制的实现。所谓多头，实际上是将原本单一的Q、K、V投影为h个Q', K', V'。由此在不改变计算量的情况下，增强了模型的泛化能力。这里既可以将其视为多个head在模型内部的集成(ensamble)，也可以等价视作卷积操作中的多通道(channel)，实际上Multi-head Attention也不无借鉴CNN的味道(多年前听刘铁岩老师在讲习班中提及)。下面来看实现部分：",{"type":18,"tag":181,"props":213,"children":215},{"code":214},"class MultiHeadAttention(Cell):\n    def __init__(self, d_model, n_heads, dropout):\n        super().__init__()\n        self.n_heads = n_heads\n        self.W_Q = Dense(d_model, d_model)\n        self.W_K = Dense(d_model, d_model)\n        self.W_V = Dense(d_model, d_model)\n        self.linear = Dense(d_model, d_model)\n        self.head_dim = d_model // n_heads\n        assert self.head_dim * n_heads == d_model, \"embed_dim must be divisible by num_heads\"\n        self.layer_norm = nn.LayerNorm((d_model, ), epsilon=1e-12)\n        self.attention = ScaledDotProductAttention(self.head_dim, dropout)\n        # ops\n        self.transpose = P.Transpose()\n        self.expanddims = P.ExpandDims()\n        self.tile = P.Tile()\n        \n    def construct(self, Q, K, V, attn_mask):\n        # q: [batch_size x len_q x d_model], k: [batch_size x len_k x d_model], v: [batch_size x len_k x d_model]\n        residual, batch_size = Q, Q.shape[0]\n        q_s = self.W_Q(Q).view((batch_size, -1, self.n_heads, self.head_dim)) \n        k_s = self.W_K(K).view((batch_size, -1, self.n_heads, self.head_dim)) \n        v_s = self.W_V(V).view((batch_size, -1, self.n_heads, self.head_dim)) \n        # (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)\n        q_s = self.transpose(q_s, (0, 2, 1, 3)) # q_s: [batch_size x n_heads x len_q x d_k]\n        k_s = self.transpose(k_s, (0, 2, 1, 3)) # k_s: [batch_size x n_heads x len_k x d_k]\n        v_s = self.transpose(v_s, (0, 2, 1, 3)) # v_s: [batch_size x n_heads x len_k x d_v]\n\n        attn_mask = self.expanddims(attn_mask, 1)\n        attn_mask = self.tile(attn_mask, (1, self.n_heads, 1, 1)) # attn_mask : [batch_size x n_heads x len_q x len_k]\n        \n        # context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]\n        context, attn = self.attention(q_s, k_s, v_s, attn_mask)\n        context = self.transpose(context, (0, 2, 1, 3)).view((batch_size, -1, self.n_heads * self.head_dim)) # context: [batch_size x len_q x n_heads * d_v]\n        output = self.linear(context) \n        return self.layer_norm(output + residual), attn # output: [batch_size x len_q x d_model]\n",[216],{"type":18,"tag":186,"props":217,"children":218},{"__ignoreMap":7},[219],{"type":24,"value":214},{"type":18,"tag":26,"props":221,"children":222},{},[223],{"type":24,"value":224},"Q，K，V首先经过全连接层（Dense）进行线性变换，然后经过reshape（view）切换为多头，继而进行相应的转置满足送入ScaledDotProductAttention的需要。最后将获得的输出进行拼接，注意这里并没有显式的进行Concat操作，而是直接通过view，将context的shape[-1]还原为heads*hidden_size的大小。此外，最后return时加入了Add&Norm操作，即Encoder结构中对应的残差和Norm计算。这里不进行详述，参见下一节。",{"type":18,"tag":58,"props":226,"children":228},{"id":227},"_04-transformer-encoder",[229,234,235],{"type":18,"tag":40,"props":230,"children":231},{},[232],{"type":24,"value":233},"04",{"type":24,"value":68},{"type":18,"tag":40,"props":236,"children":237},{},[238],{"type":24,"value":239},"Transformer Encoder",{"type":18,"tag":26,"props":241,"children":242},{},[243],{"type":24,"value":244},"在完成基础的Multi-head Attention模块后，可以将其余部分完成，构造单层的Encoder。这里先对单层Encoder的结构进行简单说明，Transformer Encoder由Poswise Feed Forward Layer和Multi-head Attention Layer构成，并且每个Layer的输入和输出做了Residual运算(即: y = f(x) + x), 来保证加深神经网络层数不会产生退化问题，以及Layer Norm来满足深层神经网络可训练(缓解梯度消失和梯度爆炸)。这里为何使用Layer Norm而非Batch Norm可自行搜索，也是Transformer模型构造的一个有趣的trick。",{"type":18,"tag":26,"props":246,"children":247},{},[248],{"type":18,"tag":30,"props":249,"children":251},{"alt":7,"src":250},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/c74ab33bf00244128870c1db45bca939.jpg",[],{"type":18,"tag":26,"props":253,"children":254},{},[255],{"type":24,"value":239},{"type":18,"tag":26,"props":257,"children":258},{},[259],{"type":24,"value":260},"讲完Encoder的结构，需要将缺少的Poswise Feed Forward Layer进行实现，同时与Multi-head Attention Layer相仿，将Residual和Layer Norm集成到一起，代码实现如下：",{"type":18,"tag":181,"props":262,"children":264},{"code":263},"class PoswiseFeedForwardNet(Cell):\n    def __init__(self, d_model, d_ff, activation:str='gelu'):\n        super().__init__()\n        self.fc1 = Dense(d_model, d_ff)\n        self.fc2 = Dense(d_ff, d_model)\n        self.activation = activation_map.get(activation, nn.GELU())\n        self.layer_norm = nn.LayerNorm((d_model,), epsilon=1e-12)\n\n    def construct(self, inputs):\n        residual = inputs\n        outputs = self.fc1(inputs)\n        outputs = self.activation(outputs)\n        \n        outputs = self.fc2(outputs)\n        return self.layer_norm(outputs + residual)\n",[265],{"type":18,"tag":186,"props":266,"children":267},{"__ignoreMap":7},[268],{"type":24,"value":263},{"type":18,"tag":26,"props":270,"children":271},{},[272],{"type":24,"value":273},"将Multi-head Attention Layer和Poswise Feed Forward Layer连接即可获得Encoder：",{"type":18,"tag":58,"props":275,"children":277},{"id":276},"_1",[],{"type":18,"tag":181,"props":279,"children":281},{"code":280},"class BertEncoderLayer(Cell):\n    def __init__(self, d_model, n_heads, d_ff, activation, dropout):\n        super().__init__()\n        self.enc_self_attn = MultiHeadAttention(d_model, n_heads, dropout)\n        self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, activation)\n\n    def construct(self, enc_inputs, enc_self_attn_mask):\n        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask)\n        enc_outputs = self.pos_ffn(enc_outputs)\n        return enc_outputs, attn\n",[282],{"type":18,"tag":186,"props":283,"children":284},{"__ignoreMap":7},[285],{"type":24,"value":280},{"type":18,"tag":26,"props":287,"children":288},{},[289],{"type":24,"value":290},"而后根据配置的层数、hidden_size, head数等参数，将n层Encoder依次连接，即可完成BERT的Encoder，这里使用nn.CellList容器进行实现：",{"type":18,"tag":181,"props":292,"children":294},{"code":293},"class BertEncoder(Cell):\n    def __init__(self, config):\n        super().__init__()\n        self.layers = nn.CellList([BertEncoderLayer(config.hidden_size, config.num_attention_heads, config.intermediate_size, config.hidden_act, config.hidden_dropout_prob) for _ in range(config.num_hidden_layers)])\n\n    def construct(self, inputs, enc_self_attn_mask):\n        outputs = inputs\n        for layer in self.layers:\n            outputs, enc_self_attn = layer(outputs, enc_self_attn_mask)\n        return outputs\n",[295],{"type":18,"tag":186,"props":296,"children":297},{"__ignoreMap":7},[298],{"type":24,"value":293},{"type":18,"tag":26,"props":300,"children":301},{},[302,307,308],{"type":18,"tag":40,"props":303,"children":304},{},[305],{"type":24,"value":306},"05",{"type":24,"value":68},{"type":18,"tag":40,"props":309,"children":310},{},[311],{"type":24,"value":312},"构造BERT",{"type":18,"tag":26,"props":314,"children":315},{},[316],{"type":24,"value":317},"在完成了Encoder后，可以开始组装完整的BERT模型。前述章节的内容都是Transformer Encoder结构的实现，而BERT模型的核心创新或差异则主要在Transformer backbone以外。首先是对Embedding的处理。",{"type":18,"tag":26,"props":319,"children":320},{},[321],{"type":18,"tag":30,"props":322,"children":324},{"alt":7,"src":323},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/e1d8a421135e4be3893aeb2207cc755d.jpg",[],{"type":18,"tag":26,"props":326,"children":327},{},[328],{"type":24,"value":329},"如图所示，文本输入后送入BERT模型的Embedding获得隐层表示由三种不同的Embedding加和而得，其中包括：",{"type":18,"tag":331,"props":332,"children":333},"ul",{},[334,340,345],{"type":18,"tag":335,"props":336,"children":337},"li",{},[338],{"type":24,"value":339},"Token Embeddings：即最常见的词向量，其中第一个占位符为[CLS]，用于后续编码后表达整条输入文本的编码，用于分类任务(因此成为CLS，即classifier)。此外还有[SEP]占位符用于分隔同一条输入的两个不同的句子，以及[PAD]表示Padding。",{"type":18,"tag":335,"props":341,"children":342},{},[343],{"type":24,"value":344},"Segment Embedding：用以区分同一条输入的两个不同句子。该Embedding的加入是为了进行Next Sentence Predict任务。",{"type":18,"tag":335,"props":346,"children":347},{},[348],{"type":24,"value":349},"Position Embedding：与Transformer一样，其无法像LSTM天然保留了位置信息，则需要手动对位置信息进行编码，这里的区别在于Transformer使用了三角函数，而这里则直接将位置对应的index送入Embedding层获取编码。（二者并无本质区别，且后者更简单直接）",{"type":18,"tag":26,"props":351,"children":352},{},[353,355,361],{"type":24,"value":354},"分析完三种不同的Embedding，直接使用",{"type":18,"tag":186,"props":356,"children":358},{"className":357},[],[359],{"type":24,"value":360},"nn.Embedding",{"type":24,"value":362},"即可完成该部分，对应代码如下：",{"type":18,"tag":181,"props":364,"children":366},{"code":365},"class BertEmbeddings(Cell):\n    def __init__(self, config):\n        super().__init__()\n        self.tok_embed = Embedding(config.vocab_size, config.hidden_size)\n        self.pos_embed = Embedding(config.max_position_embeddings, config.hidden_size)\n        self.seg_embed = Embedding(config.type_vocab_size, config.hidden_size)\n        self.norm = nn.LayerNorm((config.hidden_size,), epsilon=1e-12)\n\n    def construct(self, x, seg):\n        seq_len = x.shape[1]\n        pos = mnp.arange(seq_len) # mindspore.numpy\n        pos = P.BroadcastTo(x.shape)(P.ExpandDims()(pos, 0))\n        seg_embedding = self.seg_embed(seg)\n        tok_embedding = self.tok_embed(x)\n        embedding = tok_embedding + self.pos_embed(pos) + seg_embedding\n        return self.norm(embedding)\n",[367],{"type":18,"tag":186,"props":368,"children":369},{"__ignoreMap":7},[370],{"type":24,"value":365},{"type":18,"tag":26,"props":372,"children":373},{},[374,376,382],{"type":24,"value":375},"这里使用了",{"type":18,"tag":186,"props":377,"children":379},{"className":378},[],[380],{"type":24,"value":381},"mindspore.numpy.arange",{"type":24,"value":383},"来生成位置index，其余均为简单的调用和矩阵加。",{"type":18,"tag":26,"props":385,"children":386},{},[387],{"type":24,"value":388},"在完成Embedding层后，将Encoder和输出的pooler组合，即可构成完整的BERT模型，其代码如下：",{"type":18,"tag":58,"props":390,"children":392},{"id":391},"_2",[],{"type":18,"tag":181,"props":394,"children":396},{"code":395},"class BertModel(Cell):\n    def __init__(self, config):\n        super().__init__(config)\n        self.embeddings = BertEmbeddings(config)\n        self.encoder = BertEncoder(config)\n        self.pooler = Dense(config.hidden_size, config.hidden_size, activation='tanh')\n        \n    def construct(self, input_ids, segment_ids):\n        outputs = self.embeddings(input_ids, segment_ids)\n        enc_self_attn_mask = get_attn_pad_mask(input_ids, input_ids)\n        outputs = self.encoder(outputs, enc_self_attn_mask)\n        h_pooled = self.pooler(outputs[:, 0]) \n        return outputs, h_pooled\n",[397],{"type":18,"tag":186,"props":398,"children":399},{"__ignoreMap":7},[400],{"type":24,"value":395},{"type":18,"tag":26,"props":402,"children":403},{},[404],{"type":24,"value":405},"这里使用一个全连接层，对位置为0的输出进行pooler操作，即对应[CLS]占位符的输入文本表示，以供后续的分类任务使用。",{"type":18,"tag":58,"props":407,"children":409},{"id":408},"_06-bert预训练任务",[410,415,416],{"type":18,"tag":40,"props":411,"children":412},{},[413],{"type":24,"value":414},"06",{"type":24,"value":68},{"type":18,"tag":40,"props":417,"children":418},{},[419],{"type":24,"value":420},"BERT预训练任务",{"type":18,"tag":26,"props":422,"children":423},{},[424],{"type":24,"value":425},"BERT模型的精髓在于任务设计而非模型结构，是大家对其Paper的共识。BERT共设计了两个预训练任务，来完成无监督条件下的语言模型训练(实际上并非无监督)。",{"type":18,"tag":26,"props":427,"children":428},{},[429],{"type":18,"tag":40,"props":430,"children":431},{},[432],{"type":24,"value":433},"1. Next Sentence Predict",{"type":18,"tag":26,"props":435,"children":436},{},[437],{"type":24,"value":438},"首先先对实现较为简单的NSP任务进行分析。加入NSP任务主要是为了针对QA或NLI等输入句子数为2个的下游任务，增强模型在此类任务的能力。该预训练任务顾名思义，将句子A和B拼接作为输入，其中B有一半为正确情况，是A的下一句，另一半则随机选取非下一句的文本。预测任务则是二分类，预测B是否为A的下一句。具体实现如下：",{"type":18,"tag":181,"props":440,"children":442},{"code":441},"class BertNextSentencePredict(Cell):\n    def __init__(self, config):\n        super().__init__()\n        self.classifier = Dense(config.hidden_size, 2)\n\n    def construct(self, h_pooled):\n        logits_clsf = self.classifier(h_pooled)\n        return logits_clsf\n",[443],{"type":18,"tag":186,"props":444,"children":445},{"__ignoreMap":7},[446],{"type":24,"value":441},{"type":18,"tag":26,"props":448,"children":449},{},[450],{"type":18,"tag":40,"props":451,"children":452},{},[453],{"type":24,"value":454},"2. Masked Language Model",{"type":18,"tag":26,"props":456,"children":457},{},[458],{"type":24,"value":459},"Mask的Token。该任务有别于传统语言模型(或GPT的语言模型)，在于其为双向，即：",{"type":18,"tag":26,"props":461,"children":462},{},[463],{"type":18,"tag":30,"props":464,"children":466},{"alt":7,"src":465},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/f04358da5f45434bba4bcf2b6218c28f.jpg",[],{"type":18,"tag":26,"props":468,"children":469},{},[470],{"type":24,"value":471},"以此为目标函数，通过上下文预测被Mask的Token，天然符合完形填空的形态。",{"type":18,"tag":26,"props":473,"children":474},{},[475],{"type":24,"value":476},"这里不涉及数据预处理的内容，就不对Mask和替换比例进行详述了。对应的实现比较简单，实际上是Dense+activation+LayerNorm+Dense，实现如下：",{"type":18,"tag":181,"props":478,"children":480},{"code":479},"class BertMaskedLanguageModel(Cell):\n    def __init__(self, config, tok_embed_table):\n        super().__init__()\n        self.transform = Dense(config.hidden_size, config.hidden_size)\n        self.activation = activation_map.get(config.hidden_act, nn.GELU())\n        self.norm = nn.LayerNorm((config.hidden_size, ), epsilon=1e-12)\n        self.decoder = Dense(tok_embed_table.shape[1], tok_embed_table.shape[0], weight_init=tok_embed_table)\n\n    def construct(self, hidden_states):\n        hidden_states = self.transform(hidden_states)\n        hidden_states = self.activation(hidden_states)\n        hidden_states = self.norm(hidden_states)\n        hidden_states = self.decoder(hidden_states)\n        return hidden_states\n",[481],{"type":18,"tag":186,"props":482,"children":483},{"__ignoreMap":7},[484],{"type":24,"value":479},{"type":18,"tag":26,"props":486,"children":487},{},[488],{"type":24,"value":489},"将两个Task组合，即可完成预训练的BERT模型：",{"type":18,"tag":181,"props":491,"children":493},{"code":492},"class BertForPretraining(Cell):\n    def __init__(self, config):\n        super().__init__(config)\n        self.bert = BertModel(config)\n        self.nsp = BertNextSentencePredict(config)\n        self.mlm = BertMaskedLanguageModel(config, self.bert.embeddings.tok_embed.embedding_table)\n\n    def construct(self, input_ids, segment_ids):\n        outputs, h_pooled = self.bert(input_ids, segment_ids)\n        nsp_logits = self.nsp(h_pooled)\n        mlm_logits = self.mlm(outputs)\n        return mlm_logits, nsp_logits\n",[494],{"type":18,"tag":186,"props":495,"children":496},{"__ignoreMap":7},[497],{"type":24,"value":492},{"type":18,"tag":26,"props":499,"children":500},{},[501],{"type":24,"value":502},"行文至此，用MindSpore实现整个BERT模型就完成了，可以看到，每个模块都和公式或图示能够完全对应，且单个模块的实现均在10-20行左右，总体实现代码在150-200行之间，相较于Model Zoo的800+代码，实在简洁。",{"type":18,"tag":58,"props":504,"children":506},{"id":505},"_07-前后对比",[507,512,513],{"type":18,"tag":40,"props":508,"children":509},{},[510],{"type":24,"value":511},"07",{"type":24,"value":68},{"type":18,"tag":40,"props":514,"children":515},{},[516],{"type":24,"value":517},"前后对比",{"type":18,"tag":26,"props":519,"children":520},{},[521],{"type":24,"value":522},"由于官方实现实在冗长，这里选择一部分代码的截图对比来看",{"type":18,"tag":26,"props":524,"children":525},{},[526],{"type":18,"tag":30,"props":527,"children":529},{"alt":7,"src":528},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/79a9af279a774db5b27466309a61e444.jpg",[],{"type":18,"tag":26,"props":531,"children":532},{},[533],{"type":24,"value":534},"左侧为官方实现的BERTModel，右侧为上述实现的集成，同样的BERT模型可以通过100多行代码进行简洁实现。由此可见，MindSpore在经过多版本迭代后，其本身的算子支持度和前端表达的易用性已经逐步趋向于完善，百行代码实现BERT，以前或许只有Pytorch能做到的，如今MindSpore也可以。",{"type":18,"tag":26,"props":536,"children":537},{},[538],{"type":24,"value":539},"当然，官方实现由于是早期版本一直持续维护，应该没有在版本更迭后考虑使用更简洁的方式去完成，但是这会给用户造成MindSpore很难用，要多写很多代码的错觉。在1.2版本发布后，其能力逐步已经能够支撑与Pytorch持平的代码量完成同量级模型，希望这篇文章，能够成为一个小的样例。",{"type":18,"tag":58,"props":541,"children":543},{"id":542},"_08-小结",[544,549,550],{"type":18,"tag":40,"props":545,"children":546},{},[547],{"type":24,"value":548},"08",{"type":24,"value":68},{"type":18,"tag":40,"props":551,"children":552},{},[553],{"type":24,"value":554},"小结",{"type":18,"tag":26,"props":556,"children":557},{},[558],{"type":24,"value":559},"最后还是总结一下。首先，从我个人的使用体会，MindSpore从0.7的将就可用，到1.0的基本完善，再到1.5的易用性提升，在“简单的开发体验”这个目标上，是有质的飞跃的。而ModelZoo毕竟模型众多，且少有人不断重构优化，造成的误解应该不在少数。所以，就以BERT这个里程碑式的模型为例，让大家有一点直观的体会。",{"type":18,"tag":26,"props":561,"children":562},{},[563],{"type":24,"value":564},"此外，对所有的NLPer多啰嗦几句，BERT也就是100来行代码的一个模型，Transformer结构所见即所得，不要畏惧大模型，自己复现一下，不管是做实验发Paper还是面试回答问题，都要得心应手得多。所以，拿着MindSpore写起来吧！",{"type":18,"tag":26,"props":566,"children":567},{},[568],{"type":24,"value":569},"扫描下方二维码加入MindSpore项目↓",{"type":18,"tag":26,"props":571,"children":572},{},[573],{"type":18,"tag":30,"props":574,"children":576},{"alt":7,"src":575},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/10/12/05edeb7a2be34ec6861b7eaa5624411c.jpg",[],{"type":18,"tag":26,"props":578,"children":579},{},[580],{"type":24,"value":581},"MindSpore官方资料",{"type":18,"tag":26,"props":583,"children":584},{},[585,587],{"type":24,"value":586},"GitHub : ",{"type":18,"tag":588,"props":589,"children":593},"a",{"href":590,"rel":591},"https://github.com/mindspore-ai/mindspore",[592],"nofollow",[594],{"type":24,"value":590},{"type":18,"tag":26,"props":596,"children":597},{},[598],{"type":24,"value":599},"Gitee : https : //gitee.com/mindspore/mindspore",{"type":18,"tag":26,"props":601,"children":602},{},[603],{"type":24,"value":604},"官方QQ群 : 871543426",{"title":7,"searchDepth":606,"depth":606,"links":607},4,[608,611,613,614,616,617,618,620,621,622,624,626],{"id":60,"depth":609,"text":610},3,"01 近1000行的BERT实现",{"id":104,"depth":609,"text":612},"02 BERT模型",{"id":136,"depth":609,"text":139},{"id":142,"depth":609,"text":615},"03 Multi-head Attention",{"id":7,"depth":609,"text":7},{"id":208,"depth":609,"text":211},{"id":227,"depth":609,"text":619},"04 Transformer Encoder",{"id":276,"depth":609,"text":7},{"id":391,"depth":609,"text":7},{"id":408,"depth":609,"text":623},"06 BERT预训练任务",{"id":505,"depth":609,"text":625},"07 前后对比",{"id":542,"depth":609,"text":627},"08 小结","markdown","content:technology-blogs:zh:748.md","content","technology-blogs/zh/748.md","technology-blogs/zh/748","md",1776506140616]