[{"data":1,"prerenderedAt":579},["ShallowReactive",2],{"content-query-YJABAvEPNW":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":573,"_id":574,"_source":575,"_file":576,"_stem":577,"_extension":578},"/technology-blogs/zh/774","zh",false,"","技术干货｜昇思MindSpore NLP模型迁移之Roberta ——情感分析任务（一）","Roberta 模型：在 BERT 模型的基础上进行了一定的改进","2021-11-05","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/3ae453e9ae2f4871a5eb013ca82d7056.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":564},"root",[17,25,34,52,59,64,69,74,79,84,89,94,99,113,118,130,135,143,148,153,158,163,168,173,178,185,190,211,221,233,240,245,252,257,265,273,285,290,297,305,312,317,325,333,338,346,354,359,364,369,374,379,384,392,397,405,412,417,425,432,437,445,450,458,463,471,478,483,491,496,504,509,517,522,527,534,538,543,554,559],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"技术干货昇思mindspore-nlp模型迁移之roberta-情感分析任务一",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":18,"tag":30,"props":31,"children":33},"img",{"alt":7,"src":32},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/71bbc2986fb74deeb9b909c3eb25c7a4.gif",[],{"type":18,"tag":26,"props":35,"children":36},{},[37,39,45,47],{"type":24,"value":38},"作者：",{"type":18,"tag":40,"props":41,"children":42},"strong",{},[43],{"type":24,"value":44},"unityli、钰哥",{"type":24,"value":46}," ｜",{"type":18,"tag":40,"props":48,"children":49},{},[50],{"type":24,"value":51},"来源：知乎",{"type":18,"tag":53,"props":54,"children":56},"h2",{"id":55},"熟悉-bert-模型的小伙伴对于-roberta-模型肯定不陌生了roberta-模型在-bert-模型的基础上进行了一定的改进主要改进点有以下几个部分",[57],{"type":24,"value":58},"熟悉 BERT 模型的小伙伴对于 Roberta 模型肯定不陌生了。Roberta 模型在 BERT 模型的基础上进行了一定的改进，主要改进点有以下几个部分：",{"type":18,"tag":26,"props":60,"children":61},{},[62],{"type":24,"value":63},"1. **训练语料：**BERT只使用 16 GB 的Book Corpus数据集和英语维基百科进行训练，Roberta增加了 CC-NEWS 、OPEN WEB TEXT、STORIES 等语料，一共有 160 GB 的纯文本。",{"type":18,"tag":26,"props":65,"children":66},{},[67],{"type":24,"value":68},"2. **Batch Size：**Roberta模型在训练中使用了更大的Batch Size -> [256 ~ 8000]。",{"type":18,"tag":26,"props":70,"children":71},{},[72],{"type":24,"value":73},"3. **训练时间：**Roberta模型使用 1024 块 V100 的 GPU 训练了整整 1 天的时间，模型参数量和训练时间更加多和更加长。",{"type":18,"tag":26,"props":75,"children":76},{},[77],{"type":24,"value":78},"同时Roberta在具体的训练方法上也有所改进：",{"type":18,"tag":26,"props":80,"children":81},{},[82],{"type":24,"value":83},"1. 动态 MASK 机制",{"type":18,"tag":26,"props":85,"children":86},{},[87],{"type":24,"value":88},"2. 去除了 NSP 任务",{"type":18,"tag":26,"props":90,"children":91},{},[92],{"type":24,"value":93},"3. Tokenizer部分更换成 BPE 算法，分词这一点和 GPT-2 十分相似",{"type":18,"tag":26,"props":95,"children":96},{},[97],{"type":24,"value":98},"Roberta 源码（huggingface）：",{"type":18,"tag":26,"props":100,"children":101},{},[102],{"type":18,"tag":40,"props":103,"children":104},{},[105],{"type":18,"tag":106,"props":107,"children":111},"a",{"href":108,"rel":109},"https://huggingface.co/roberta-base",[110],"nofollow",[112],{"type":24,"value":108},{"type":18,"tag":26,"props":114,"children":115},{},[116],{"type":24,"value":117},"Roberta论文：",{"type":18,"tag":26,"props":119,"children":120},{},[121],{"type":18,"tag":40,"props":122,"children":123},{},[124],{"type":18,"tag":106,"props":125,"children":128},{"href":126,"rel":127},"https://arxiv.org/abs/1907.11692",[110],[129],{"type":24,"value":126},{"type":18,"tag":26,"props":131,"children":132},{},[133],{"type":24,"value":134},"这里我们采用了华为开发的 MindSpore 框架，并且选择了 Pytorch 版本 Roberta 模型进行模型迁移。欢迎大家一起参与到 MindSpore 开源社区的开发中来！",{"type":18,"tag":53,"props":136,"children":138},{"id":137},"前言",[139],{"type":18,"tag":40,"props":140,"children":141},{},[142],{"type":24,"value":137},{"type":18,"tag":26,"props":144,"children":145},{},[146],{"type":24,"value":147},"本文环境：",{"type":18,"tag":26,"props":149,"children":150},{},[151],{"type":24,"value":152},"系统：Ubuntu 18",{"type":18,"tag":26,"props":154,"children":155},{},[156],{"type":24,"value":157},"GPU：RTX 3090",{"type":18,"tag":26,"props":159,"children":160},{},[161],{"type":24,"value":162},"MindSpore版本：1.3",{"type":18,"tag":26,"props":164,"children":165},{},[166],{"type":24,"value":167},"数据集：SST-2（情感分析任务）",{"type":18,"tag":26,"props":169,"children":170},{},[171],{"type":24,"value":172},"SST-2 数据集定义：",{"type":18,"tag":26,"props":174,"children":175},{},[176],{"type":24,"value":177},"这是一个二分类的数据集，训练集和验证集的句子所对应的标签是0或1",{"type":18,"tag":26,"props":179,"children":180},{},[181],{"type":18,"tag":30,"props":182,"children":184},{"alt":7,"src":183},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/ef0d6f6f36cc42ae970e417c5882d91c.png",[],{"type":18,"tag":26,"props":186,"children":187},{},[188],{"type":24,"value":189},"我们需要将 Pytorch 版本的 Roberta 权重转换成 MindSpore 适用的权重，这里提供一个转换的思路。主要可以参考官网的API映射文档进行改写。",{"type":18,"tag":26,"props":191,"children":192},{},[193,195,200,202,209],{"type":24,"value":194},"官网链接：",{"type":18,"tag":40,"props":196,"children":197},{},[198],{"type":24,"value":199},"转换映射",{"type":24,"value":201},"**（",{"type":18,"tag":106,"props":203,"children":206},{"href":204,"rel":205},"https://www.mindspore.cn/docs/migration%5C_guide/zh-CN/r1.5/api%5C_mapping/pytorch%5C_api%5C_mapping.html%EF%BC%89",[110],[207],{"type":24,"value":208},"https://www.mindspore.cn/docs/migration\\_guide/zh-CN/r1.5/api\\_mapping/pytorch\\_api\\_mapping.html）",{"type":24,"value":210},"**",{"type":18,"tag":212,"props":213,"children":215},"pre",{"code":214},"def torch_to_ms(model, torch_model):\n    \"\"\"\n    Updates mobilenetv2 model mindspore param's data from torch param's data.\n    Args:\n        model: mindspore model\n        torch_model: torch model\n    \"\"\"\n    print(\"start load\")\n    # load torch parameter and mindspore parameter\n    torch_param_dict = torch_model\n    ms_param_dict = model.parameters_dict()\n    count = 0\n    for ms_key in ms_param_dict.keys():\n        ms_key_tmp = ms_key.split('.')\n        if ms_key_tmp[0] == 'roberta_embedding_lookup':\n            count += 1\n            update_torch_to_ms(torch_param_dict, ms_param_dict, 'embeddings.word_embeddings.weight', ms_key)\n        elif ms_key_tmp[0] == 'roberta_embedding_postprocessor':\n            if ms_key_tmp[1] == \"token_type_embedding\":\n                count += 1\n                update_torch_to_ms(torch_param_dict, ms_param_dict, 'embeddings.token_type_embeddings.weight', ms_key)\n            elif ms_key_tmp[1] == \"full_position_embedding\":\n                count += 1\n                update_torch_to_ms(torch_param_dict, ms_param_dict, 'embeddings.position_embeddings.weight',\n                                   ms_key)\n            elif ms_key_tmp[1] ==\"layernorm\":\n                if ms_key_tmp[2]==\"gamma\":\n                    count += 1\n                    update_torch_to_ms(torch_param_dict, ms_param_dict, 'embeddings.LayerNorm.weight',\n                                       ms_key)\n                else:\n                    count += 1\n                    update_torch_to_ms(torch_param_dict, ms_param_dict, 'embeddings.LayerNorm.bias',\n                                       ms_key)\n        elif ms_key_tmp[0] == \"roberta_encoder\":\n            if ms_key_tmp[3]=='attention':\n                    par = ms_key_tmp[4].split('_')[0]\n                    count += 1\n                    update_torch_to_ms(torch_param_dict, ms_param_dict, 'encoder.layer.'+ms_key_tmp[2]+'.'+ms_key_tmp[3]+'.'\n                                       +'self.'+par+'.'+ms_key_tmp[5],\n                                       ms_key)\n            elif ms_key_tmp[3]=='attention_output':\n                if ms_key_tmp[4]=='dense':\n                    count += 1\n                    update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                   'encoder.layer.' + ms_key_tmp[2] + '.attention.output.'+ms_key_tmp[4]+'.'+ms_key_tmp[5],\n                                   ms_key)\n\n                elif ms_key_tmp[4]=='layernorm':\n                    if ms_key_tmp[5]=='gamma':\n                        count += 1\n                        update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                           'encoder.layer.' + ms_key_tmp[2] + '.attention.output.LayerNorm.weight',\n                                           ms_key)\n                    else:\n                        count += 1\n                        update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                           'encoder.layer.' + ms_key_tmp[2] + '.attention.output.LayerNorm.bias',\n                                           ms_key)\n            elif ms_key_tmp[3]=='intermediate':\n                count += 1\n                update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                   'encoder.layer.' + ms_key_tmp[2] + '.intermediate.dense.'+ms_key_tmp[4],\n                                   ms_key)\n            elif ms_key_tmp[3]=='output':\n                if ms_key_tmp[4]=='dense':\n                    count += 1\n                    update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                   'encoder.layer.' + ms_key_tmp[2] + '.output.dense.'+ms_key_tmp[5],\n                                   ms_key)\n                else:\n                    if ms_key_tmp[5]=='gamma':\n                        count += 1\n                        update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                       'encoder.layer.' + ms_key_tmp[2] + '.output.LayerNorm.weight',\n                                       ms_key)\n\n                    else:\n                        count += 1\n                        update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                       'encoder.layer.' + ms_key_tmp[2] + '.output.LayerNorm.bias',\n                                       ms_key)\n\n        if ms_key_tmp[0]=='dense':\n            if ms_key_tmp[1]=='weight':\n                count += 1\n                update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                   'pooler.dense.weight',\n                                   ms_key)\n            else:\n                count += 1\n                update_torch_to_ms(torch_param_dict, ms_param_dict,\n                                   'pooler.dense.bias',\n                                   ms_key)\n\n    save_checkpoint(model, \"./model/roberta-base.ckpt\")\n    print(count)\n    print(\"finish load\")\n",[216],{"type":18,"tag":217,"props":218,"children":219},"code",{"__ignoreMap":7},[220],{"type":24,"value":214},{"type":18,"tag":26,"props":222,"children":223},{},[224,226,231],{"type":24,"value":225},"这里值得注意的是转换的参数一定要对应的上，不然在后续权重加载中会出现权重加载失败，或是训练过程中loss值降不下去的情况！转换完后可以试着打印对应参数的 ",{"type":18,"tag":40,"props":227,"children":228},{},[229],{"type":24,"value":230},"key 值",{"type":24,"value":232},"防止错漏。",{"type":18,"tag":26,"props":234,"children":235},{},[236],{"type":18,"tag":30,"props":237,"children":239},{"alt":7,"src":238},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/d59ab401cfcf4010aa6b53619379433e.jpg",[],{"type":18,"tag":26,"props":241,"children":242},{},[243],{"type":24,"value":244},"这样我们就获得了转换出来的roberta.ckpt文件用于加载权重。这里的权重文件一定要和tensorflow的权重文件进行区分！",{"type":18,"tag":26,"props":246,"children":247},{},[248],{"type":18,"tag":30,"props":249,"children":251},{"alt":7,"src":250},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/2d13d485bd0c4a53a6c3b6dfa1062306.png",[],{"type":18,"tag":26,"props":253,"children":254},{},[255],{"type":24,"value":256},"对于数据的输入这一块，MindSpore和Pytorch也是存在差异的。这里我们直接使用自己准备的dataset部分，这里面封装了多种NLP任务的数据集处理方式，可以将数据集转换成mindrecord形式，然后用于模型的训练和验证。下面跟着笔者继续去了解一下吧！",{"type":18,"tag":212,"props":258,"children":260},{"code":259},"\"\"\"\n    SST-2 dataset\n\"\"\"\nfrom typing import Union, Dict, List\n\nimport mindspore.dataset as ds\n\nfrom ..base_dataset import CLSBaseDataset\n\n\nclass SST2Dataset(CLSBaseDataset):\n    \"\"\"\n    SST2 dataset.\n\n    Args:\n        paths (Union[str, Dict[str, str]], Optional): Dataset file path or Dataset directory path, default None.\n        tokenizer (Union[str]): Tokenizer function, default 'spacy'.\n        lang (str): Tokenizer language, default 'en'.\n        max_size (int, Optional): Vocab max size, default None.\n        min_freq (int, Optional): Min word frequency, default None.\n        padding (str): Padding token, default ``.\n        unknown (str): Unknown token, default ``.\n        buckets (List[int], Optional): Padding row to the length of buckets, default None.\n\n    Examples:\n        >>> sst2 = SST2Dataset(tokenizer='spacy', lang='en')\n        # sst2 = SST2Dataset(tokenizer='spacy', lang='en', buckets=[16,32,64])\n        >>> ds = sst2()\n    \"\"\"\n\n    def __init__(self, paths: Union[str, Dict[str, str]] = None,\n                 tokenizer: Union[str] = 'spacy', lang: str = 'en', max_size: int = None, min_freq: int = None,\n                 padding: str = '', unknown: str = '',\n                 buckets: List[int] = None):\n        super(SST2Dataset, self).__init__(sep='\\t', name='SST-2')\n        self._paths = paths\n        self._tokenize = tokenizer\n        self._lang = lang\n        self._vocab_max_size = max_size\n        self._vocab_min_freq = min_freq\n        self._padding = padding\n        self._unknown = unknown\n        self._buckets = buckets\n\n    def __call__(self) -> Dict[str, ds.MindDataset]:\n        self.load(self._paths)\n        self.process(tokenizer=self._tokenize, lang=self._lang, max_size=self._vocab_max_size,\n                     min_freq=self._vocab_min_freq, padding=self._padding,\n                     unknown=self._unknown, buckets=self._buckets)\n        return self.mind_datasets\n",[261],{"type":18,"tag":217,"props":262,"children":263},{"__ignoreMap":7},[264],{"type":24,"value":259},{"type":18,"tag":212,"props":266,"children":268},{"code":267},"from mindtext.dataset.classification import SST2Dataset\n\n#对SST-2情感分析的数据集进行处理 如果本来缓存就有只需要直接读缓存\ndataset = SST2Dataset(paths='./mindtext/dataset/SST-2',\n                      tokenizer=\"roberta-base\",\n                      max_length=128,\n                      truncation_strategy=True,\n                      columns_list=['input_ids', 'attention_mask','label'],\n                      test_columns_list=['input_ids', 'attention_mask'],\n                      batch_size=64 )\n\n\nds = dataset() #生成对应的train、dev的mindrecord文件\nds = dataset.from_cache( columns_list=['input_ids', 'attention_mask','label'],\n                      test_columns_list=['input_ids', 'attention_mask'],\n                      batch_size=64\n                      )\ndev_dataset = ds['dev'] #取出转成mindrecord的验证集用于验证\n",[269],{"type":18,"tag":217,"props":270,"children":271},{"__ignoreMap":7},[272],{"type":24,"value":267},{"type":18,"tag":26,"props":274,"children":275},{},[276,278,283],{"type":24,"value":277},"生成出来的mindrecord文件，一个是 .mindrecord文件 一个是 .mindrecord.db文件，值得注意的是我们",{"type":18,"tag":40,"props":279,"children":280},{},[281],{"type":24,"value":282},"不能随意去修改它们",{"type":24,"value":284},"的名字，两个文件是存在着一定的映射关系，如果强行去修改它们名字会出现读取不了mindrecord文件的错误！",{"type":18,"tag":26,"props":286,"children":287},{},[288],{"type":24,"value":289},"还有进行数据处理的参数一定要和模型输入的参数一致，例如我们Roberta模型输入的参数是['input_ids', 'attention_mask','label']。",{"type":18,"tag":26,"props":291,"children":292},{},[293],{"type":18,"tag":30,"props":294,"children":296},{"alt":7,"src":295},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/11dc068ffe3f4a978ac146c6f66d4822.png",[],{"type":18,"tag":26,"props":298,"children":299},{},[300],{"type":18,"tag":40,"props":301,"children":302},{},[303],{"type":24,"value":304},"项目架构图：",{"type":18,"tag":26,"props":306,"children":307},{},[308],{"type":18,"tag":30,"props":309,"children":311},{"alt":7,"src":310},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/f7d152ed8b654cd38161b030e905b8b7.jpg",[],{"type":18,"tag":26,"props":313,"children":314},{},[315],{"type":24,"value":316},"架构方面我们主要是参考的fastnlp的划分方式。将模型划分为Encoder、Embedding、Tokenizer三个部分，后续会进一步的优化模型的架构。",{"type":18,"tag":26,"props":318,"children":319},{},[320],{"type":18,"tag":40,"props":321,"children":322},{},[323],{"type":24,"value":324},"1.Embedding",{"type":18,"tag":212,"props":326,"children":328},{"code":327},"\"\"\"Roberta Embedding.\"\"\"\nimport logging\nfrom typing import Tuple\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom mindtext.modules.encoder.roberta import RobertaModel, RobertaConfig\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\nclass RobertaEmbedding(nn.Cell):\n    \"\"\"\n    This is a class that loads pre-trained weight files into the model.\n    \"\"\"\n    def __init__(self, roberta_config: RobertaConfig, is_training: bool = False):\n        super(RobertaEmbedding, self).__init__()\n        self.roberta = RobertaModel(roberta_config, is_training)\n\n    def init_robertamodel(self,roberta):\n        \"\"\"\n        Manual initialization BertModel\n        \"\"\"\n        self.roberta=roberta\n\n    def from_pretrain(self, ckpt_file):\n        \"\"\"\n        Load the model parameters from checkpoint\n        \"\"\"\n        param_dict = load_checkpoint(ckpt_file)\n        load_param_into_net(self.roberta, param_dict)\n\n    def construct(self, input_ids: Tensor, input_mask: Tensor)-> Tuple[Tensor, Tensor]:\n        \"\"\"\n        Returns the result of the model after loading the pre-training weights\n\n        Args:\n            input_ids:A vector containing the transformation of characters into corresponding ids.\n            input_mask:the mask for input_ids.\n\n        Returns:\n            sequence_output:the sequence output .\n            pooled_output:the pooled output of first token:cls..\n        \"\"\"\n        sequence_output, pooled_output, _ = self.roberta(input_ids, input_mask)\n        return sequence_output, pooled_output\n",[329],{"type":18,"tag":217,"props":330,"children":331},{"__ignoreMap":7},[332],{"type":24,"value":327},{"type":18,"tag":26,"props":334,"children":335},{},[336],{"type":24,"value":337},"这部分主要用来加载预训练好的权重，在前文我们就得到了mindrecord形式的权重文件。",{"type":18,"tag":26,"props":339,"children":340},{},[341],{"type":18,"tag":40,"props":342,"children":343},{},[344],{"type":24,"value":345},"2.Encoder",{"type":18,"tag":212,"props":347,"children":349},{"code":348},"class RobertaModel(nn.Cell):\n    \"\"\"\n        Used from mindtext.modules.encoder.roberta with Roberta\n\n        Args:\n        config (Class): Configuration for RobertaModel.\n        is_training (bool): True for training mode. False for eval mode.\n        use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form.\n                            Default: False.\n    \"\"\"\n\n    def __init__(self,\n                 config: RobertaConfig,\n                 is_training: bool,\n                 use_one_hot_embeddings: bool = False):\n        super().__init__()\n        config = copy.deepcopy(config)\n        if not is_training:\n            config.hidden_dropout_prob = 0.0\n            config.attention_probs_dropout_prob = 0.0\n\n        self.seq_length = config.seq_length\n        self.hidden_size = config.hidden_size\n        self.num_hidden_layers = config.num_hidden_layers\n        self.embedding_size = config.hidden_size\n        self.token_type_ids = None\n        self.compute_type = numbtpye2mstype(config.compute_type)\n        self.last_idx = self.num_hidden_layers - 1\n        output_embedding_shape = [-1, self.seq_length, self.embedding_size]\n        self.roberta_embedding_lookup = nn.Embedding(\n            vocab_size=config.vocab_size,\n            embedding_size=self.embedding_size,\n            use_one_hot=use_one_hot_embeddings,\n            embedding_table=TruncatedNormal(config.initializer_range))\n\n        self.roberta_embedding_postprocessor = EmbeddingPostprocessor(\n            embedding_size=self.embedding_size,\n            embedding_shape=output_embedding_shape,\n            use_token_type=False,\n            token_type_vocab_size=config.type_vocab_size,\n            use_one_hot_embeddings=use_one_hot_embeddings,\n            max_position_embeddings=config.max_position_embeddings,\n            dropout_prob=config.hidden_dropout_prob)\n\n        self.roberta_encoder = RobertaTransformer(\n            hidden_size=self.hidden_size,\n            seq_length=self.seq_length,\n            num_attention_heads=config.num_attention_heads,\n            num_hidden_layers=self.num_hidden_layers,\n            intermediate_size=config.intermediate_size,\n            attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n            initializer_range=config.initializer_range,\n            hidden_dropout_prob=config.hidden_dropout_prob,\n            hidden_act=config.hidden_act,\n            compute_type=self.compute_type,\n            return_all_encoders=True)\n\n        self.cast = P.Cast()\n        self.dtype = numbtpye2mstype(config.dtype)\n        self.cast_compute_type = SecurityCast()\n        self.slice = P.StridedSlice()\n\n        self.squeeze_1 = P.Squeeze(axis=1)\n        self.dense = nn.Dense(self.hidden_size, self.hidden_size,\n                              activation=\"tanh\",\n                              weight_init=TruncatedNormal(config.initializer_range))\\\n                                    .to_float(mstype.float32)\n        self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config)\n\n    def construct(self, input_ids: Tensor, input_mask: Tensor) -> Tuple[Tensor, Tensor, Tensor]:\n        \"\"\"Bidirectional Encoder Representations from Transformers.\n        Args:\n            input_ids:A vector containing the transformation of characters into corresponding ids.\n            input_mask:the mask for input_ids.\n\n        Returns:\n            sequence_output:the sequence output .\n            pooled_output:the pooled output of first token:cls.\n            embedding_table:fixed embedding table.\n\n        \"\"\"\n        # embedding\n        embedding_tables = self.roberta_embedding_lookup.embedding_table\n        word_embeddings = self.roberta_embedding_lookup(input_ids)\n        embedding_output = self.roberta_embedding_postprocessor(input_ids,\n                                                                word_embeddings)\n        # attention mask [batch_size, seq_length, seq_length]\n        attention_mask = self._create_attention_mask_from_input_mask(input_mask)\n\n        # roberta encoder\n        encoder_output = self.roberta_encoder(self.cast_compute_type(embedding_output),\n                                             attention_mask)\n\n        sequence_output = self.cast(encoder_output[self.last_idx], self.dtype)\n\n        # pooler\n        batch_size = P.Shape()(input_ids)[0]\n        sequence_slice = self.slice(sequence_output,\n                                    (0, 0, 0),\n                                    (batch_size, 1, self.hidden_size),\n                                    (1, 1, 1))\n        first_token = self.squeeze_1(sequence_slice)\n        pooled_output = self.dense(first_token)\n        pooled_output = self.cast(pooled_output, self.dtype)\n        return sequence_output, pooled_output, embedding_tables\n",[350],{"type":18,"tag":217,"props":351,"children":352},{"__ignoreMap":7},[353],{"type":24,"value":348},{"type":18,"tag":26,"props":355,"children":356},{},[357],{"type":24,"value":358},"这是我们的模型主体，这里我们将模型整体放入到了encoder文件下的roberta.py。我们可以看到encoder部分主要包括 encoder_output 和 sequence_output 两个部分。这里的部分实现我们是参照了 MindSpore ModelZoo 项目的 BERT 模型去实现的（小伙伴们在迁移模型时候可以登录MindSpore官网前去参考哦~），所以只需要简单的将几个重要的模块进行拼接就OK了,这几个模块包括：",{"type":18,"tag":26,"props":360,"children":361},{},[362],{"type":24,"value":363},"**EncoderOutput：**每个sub-layer加上残差连接和归一化的模块。",{"type":18,"tag":26,"props":365,"children":366},{},[367],{"type":24,"value":368},"**RobertaAttention：**单个多头自注意力层。",{"type":18,"tag":26,"props":370,"children":371},{},[372],{"type":24,"value":373},"**RobertaEncoderCell：**单个的RobertaEncoder层。",{"type":18,"tag":26,"props":375,"children":376},{},[377],{"type":24,"value":378},"**RobertaTransformer：**将多个RobertaEncoderCell拼接在一起，形成完整的roberta模块。",{"type":18,"tag":26,"props":380,"children":381},{},[382],{"type":24,"value":383},"这样可以大大减少我们模型架构的时间，也能更好的学习到MindSpore框架的使用。",{"type":18,"tag":26,"props":385,"children":386},{},[387],{"type":18,"tag":40,"props":388,"children":389},{},[390],{"type":24,"value":391},"3.Tokenizer",{"type":18,"tag":26,"props":393,"children":394},{},[395],{"type":24,"value":396},"分词方面我们封装在dataset中。可以通过指定huggingface库中存在的预训练模型的名字来直接加载词表等文件！使用起来非常方便，详情可以回溯到数据处理部分的代码。如果想实现别的下游任务，但在dataset中没有的话，也可以使用我们实现的tokenlizer来用自己的方式去构建mindrecord形式的数据。",{"type":18,"tag":212,"props":398,"children":400},{"code":399},"def get_tokenizer(tokenize_method: str, lang: str = 'en'):\n    \"\"\"\n    Get a tokenizer.\n\n    Args:\n        tokenize_method (str): Select a tokenizer method.\n        lang (str): Tokenizer language, default English.\n\n    Returns:\n        function: A tokenizer function.\n    \"\"\"\n    tokenizer_dict = {\n        'spacy': None,\n        'raw': _split,\n        'cn-char': _cn_split,\n    }\n    if tokenize_method == 'spacy':\n        import spacy\n        spacy.prefer_gpu()\n        if lang != 'en':\n            raise RuntimeError(\"Spacy only supports english\")\n        if parse_version(spacy.__version__) >= parse_version('3.0'):\n            en = spacy.load('en_core_web_sm')\n        else:\n            en = spacy.load(lang)\n\n        def _spacy_tokenizer(text):\n            return [w.text for w in en.tokenizer(text)]\n\n        tokenizer = _spacy_tokenizer\n    elif tokenize_method in tokenizer_dict:\n        tokenizer = tokenizer_dict[tokenize_method]\n    else:\n        raise RuntimeError(f\"Only support {tokenizer_dict.keys()} tokenizer.\")\n    return tokenizer\n",[401],{"type":18,"tag":217,"props":402,"children":403},{"__ignoreMap":7},[404],{"type":24,"value":399},{"type":18,"tag":26,"props":406,"children":407},{},[408],{"type":18,"tag":30,"props":409,"children":411},{"alt":7,"src":410},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/420825b3c1bf41c5b53e9141921db6db.png",[],{"type":18,"tag":26,"props":413,"children":414},{},[415],{"type":24,"value":416},"在模型主体的roberta.py文件中 RobertaConfig 模块可以加载yaml文件中的对应参数到 RobertaModel中去。以下是yaml文件的参数配置：",{"type":18,"tag":212,"props":418,"children":420},{"code":419},"seq_length: 128\nvocab_size: 50265\nhidden_size: 768\nbos_token_id: 0\neos_token_id: 2\nnum_hidden_layers: 12\nnum_attention_heads: 12\nintermediate_size: 3072\nhidden_act: \"gelu\"\nhidden_dropout_prob: 0.1\nattention_probs_dropout_prob: 0.1\nmax_position_embeddings: 514\npad_token_id: 1\ntype_vocab_size: 1\ninitializer_range: 0.02\nuse_relative_positions: False\ndtype: mstype.float32\ncompute_type: mstype.float32\n",[421],{"type":18,"tag":217,"props":422,"children":423},{"__ignoreMap":7},[424],{"type":24,"value":419},{"type":18,"tag":26,"props":426,"children":427},{},[428],{"type":18,"tag":30,"props":429,"children":431},{"alt":7,"src":430},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/e34efeb1a9ed487e8913f7c0ad1a6724.png",[],{"type":18,"tag":26,"props":433,"children":434},{},[435],{"type":24,"value":436},"接下来就是大家最期待的模型训练部分啦！我们首先要做的是将适用于 MindSpore 框架的权重参数加载到Roberta模型中去，同时初始化。可以看到，yaml 形式的超参数配置写入到RobertaModel 中并进行实例化，随后权重参数通过MindSpore内置的load_checkpoint和load_param_intonet函数加载进RobertaModel。这里我们不是直接进行加载，而是中间嵌套了一层RobertaEmbedding。而且需要注意的是训练时我们的 is_training 值设置为True，num_labels视具体下游任务要求设置。",{"type":18,"tag":212,"props":438,"children":440},{"code":439},"roberta_config_file = \"./mindtext/config/test.yaml\"\n    roberta_config = RobertaConfig.from_yaml_file(roberta_config_file)\n    rbm = RobertaModel(roberta_config, True)\n    param_dict = load_checkpoint('./mindtext/pretrain/roberta-base-ms.ckpt')\n    p = load_param_into_net(rbm, param_dict)\n    em = RobertaEmbedding(roberta_config, True)\n    em.initRobertaModel(rbm)\n    roberta = RobertaforSequenceClassification(roberta_config, is_training=True, num_labels=2)\n    roberta.init_embedding(em)\n",[441],{"type":18,"tag":217,"props":442,"children":443},{"__ignoreMap":7},[444],{"type":24,"value":439},{"type":18,"tag":26,"props":446,"children":447},{},[448],{"type":24,"value":449},"模型权重加载完毕后，设置好学习率lr、训练轮数epoch、损失函数、优化器等等，就可以开始训练啦！这里我们根据论文提供的learning_rate来设置：3e-5。其次还使用了warm_up预热学习率，待模型趋于稳定后，再选择预先设置的学习率进行训练,使得模型收敛速度变得更快，模型效果更佳。epoch设为3轮。",{"type":18,"tag":212,"props":451,"children":453},{"code":452},"epoch_num = 3\n    save_path = \"./mindtext/pretrain/output/roberta-base_sst.ckpt\"\n    lr_schedule = RobertaLearningRate(learning_rate=3e-5,\n                                   end_learning_rate=1e-5,\n                                   warmup_steps=int(train_dataset.get_dataset_size() * epoch_num * 0.1),\n                                   decay_steps=train_dataset.get_dataset_size() * epoch_num,\n                                   power=1.0)\n    params = roberta.trainable_params()\n    optimizer = AdamWeightDecay(params, lr_schedule, eps=1e-8)\n",[454],{"type":18,"tag":217,"props":455,"children":456},{"__ignoreMap":7},[457],{"type":24,"value":452},{"type":18,"tag":26,"props":459,"children":460},{},[461],{"type":24,"value":462},"一切准备好后，就进入训练阶段，最后将经过微调的模型参数保存至指定路径的ckpt文件中。",{"type":18,"tag":212,"props":464,"children":466},{"code":465},"def train(train_data, roberta, optimizer, save_path, epoch_num):\n    update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2 ** 32, scale_factor=2, scale_window=1000)\n    netwithgrads = RobertaFinetuneCell(roberta, optimizer=optimizer, scale_update_cell=update_cell)\n    callbacks = [TimeMonitor(train_data.get_dataset_size()), LossCallBack(train_data.get_dataset_size())]\n    model = Model(netwithgrads)\n    model.train(epoch_num, train_data, callbacks=callbacks, dataset_sink_mode=False)\n    save_checkpoint(model.train_network, save_path)\n",[467],{"type":18,"tag":217,"props":468,"children":469},{"__ignoreMap":7},[470],{"type":24,"value":465},{"type":18,"tag":26,"props":472,"children":473},{},[474],{"type":18,"tag":30,"props":475,"children":477},{"alt":7,"src":476},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/54240576565b4343aa7765b4ec84cfea.png",[],{"type":18,"tag":26,"props":479,"children":480},{},[481],{"type":24,"value":482},"评估和训练的流程大致一样，读取转换成mindrecord形式的验证集，准备用作评估模型性能。",{"type":18,"tag":212,"props":484,"children":486},{"code":485},"dataset = SST2Dataset(paths='./mindtext/dataset/SST-2',\n                      tokenizer=\"roberta-base\",\n                      max_length=128,\n                      truncation_strategy=True,\n                      columns_list=['input_ids', 'attention_mask','label'],\n                      test_columns_list=['input_ids', 'attention_mask'],\n                      batch_size=64 )\nds = dataset.from_cache( columns_list=['input_ids', 'attention_mask','label'],\n                      test_columns_list=['input_ids', 'attention_mask'],\n                      batch_size=64\n                      )\ndev_dataset = ds['dev']\n",[487],{"type":18,"tag":217,"props":488,"children":489},{"__ignoreMap":7},[490],{"type":24,"value":485},{"type":18,"tag":26,"props":492,"children":493},{},[494],{"type":24,"value":495},"接着加载超参数配置和模型权重文件，通过from_pretrain函数装载到Roberta模型中去。注意这里的下游任务也需指定 is_training 和 num_labels 两个参数值，根据具体任务设置。",{"type":18,"tag":212,"props":497,"children":499},{"code":498},"roberta_config_file = \"./mindtext/conf/test.yaml\"\n    roberta_config = RobertaConfig.from_yaml_file(roberta_config_file)\n\n    roberta = RobertaforSequenceClassification(roberta_config, is_training=False, num_labels=2, dropout_prob=0.0)\n    model_path = \"./mindtext/pretrain/output/roberta_trainsst2.ckpt\"\n    roberta.from_pretrain(model_path)\n",[500],{"type":18,"tag":217,"props":501,"children":502},{"__ignoreMap":7},[503],{"type":24,"value":498},{"type":18,"tag":26,"props":505,"children":506},{},[507],{"type":24,"value":508},"最后就可以开始评估啦！该任务是一个文本二分类任务，模型预测标签和真实标签进行对比，模型精度为[0-1]之间的小数。",{"type":18,"tag":212,"props":510,"children":512},{"code":511},"def eval(eval_data, model):\n    metirc = Accuracy('classification')\n    metirc.clear()\n    squeeze = mindspore.ops.Squeeze(1)\n    for batch in tqdm(eval_data.create_dict_iterator(num_epochs=1), total=eval_data.get_dataset_size()):\n        input_ids = batch['input_ids']\n        input_mask = batch['attention_mask']\n        label_ids = batch['label']\n        inputs = {\"input_ids\": input_ids,\n                  \"input_mask\": input_mask\n                  }\n        output = model(**inputs)\n        sm = mindspore.nn.Softmax(axis=-1)\n        output = sm(output)\n        metirc.update(output, squeeze(label_ids))\n    print(metirc.eval())\n",[513],{"type":18,"tag":217,"props":514,"children":515},{"__ignoreMap":7},[516],{"type":24,"value":511},{"type":18,"tag":26,"props":518,"children":519},{},[520],{"type":24,"value":521},"最后的最后，请大家多多指点。",{"type":18,"tag":26,"props":523,"children":524},{},[525],{"type":24,"value":526},"扫描下方二维码加入MindSpore项目↓",{"type":18,"tag":26,"props":528,"children":529},{},[530],{"type":18,"tag":30,"props":531,"children":533},{"alt":7,"src":532},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/11/05/f40275b661f34a3b8b6c98741f1f8ef9.jpg",[],{"type":18,"tag":535,"props":536,"children":537},"h3",{"id":7},[],{"type":18,"tag":26,"props":539,"children":540},{},[541],{"type":24,"value":542},"MindSpore官方资料",{"type":18,"tag":26,"props":544,"children":545},{},[546,548],{"type":24,"value":547},"GitHub : ",{"type":18,"tag":106,"props":549,"children":552},{"href":550,"rel":551},"https://github.com/mindspore-ai/mindspore",[110],[553],{"type":24,"value":550},{"type":18,"tag":26,"props":555,"children":556},{},[557],{"type":24,"value":558},"Gitee : https : //gitee.com/mindspore/mindspore",{"type":18,"tag":26,"props":560,"children":561},{},[562],{"type":24,"value":563},"官方QQ群 : 871543426",{"title":7,"searchDepth":565,"depth":565,"links":566},4,[567,569],{"id":55,"depth":568,"text":58},2,{"id":137,"depth":568,"text":137,"children":570},[571],{"id":7,"depth":572,"text":7},3,"markdown","content:technology-blogs:zh:774.md","content","technology-blogs/zh/774.md","technology-blogs/zh/774","md",1776506141008]