[{"data":1,"prerenderedAt":348},["ShallowReactive",2],{"content-query-cKJ55V0RIT":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":342,"_id":343,"_source":344,"_file":345,"_stem":346,"_extension":347},"/technology-blogs/zh/3596","zh",false,"","Barthez模型论文解读，并基于MindSpore NLP推理复现","作者：北辰星         来源：知乎","2025-02-05","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/07/5b1f18f01f954c3a926339e1cbae5da7.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":335},"root",[17,25,42,50,74,82,86,91,96,101,106,114,118,126,134,139,147,152,159,164,168,176,184,189,201,206,213,218,223,231,236,241,246,254,259,268,273,280,288,293,302,307,317,325,330],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"barthez模型论文解读并基于mindspore-nlp推理复现",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29,35,37],{"type":18,"tag":30,"props":31,"children":32},"strong",{},[33],{"type":24,"value":34},"作者：北辰星",{"type":24,"value":36}," ",{"type":18,"tag":30,"props":38,"children":39},{},[40],{"type":24,"value":41},"来源：知乎",{"type":18,"tag":26,"props":43,"children":44},{},[45],{"type":18,"tag":30,"props":46,"children":47},{},[48],{"type":24,"value":49},"Barthez模型概述",{"type":18,"tag":26,"props":51,"children":52},{},[53,58,60,65,67,72],{"type":18,"tag":30,"props":54,"children":55},{},[56],{"type":24,"value":57},"Barthez是第一个法语BART模型",{"type":24,"value":59},"，其在一个非常大的法语单语语料库上进行了预训练，与现有的基于BERT的法语模型（如CamemBERT和FlauBERT）不同，",{"type":18,"tag":30,"props":61,"children":62},{},[63],{"type":24,"value":64},"BARThez特别适合生成任务",{"type":24,"value":66},"，因为",{"type":18,"tag":30,"props":68,"children":69},{},[70],{"type":24,"value":71},"它的编码器和解码器都经过了预训练",{"type":24,"value":73},"。除了FLUE基准测试中的区分任务，本文中发布了一个新颖的摘要数据集OrangeSum，并对BARThez进行了评估。最终得到的模型mBARTHez在性能上显著优于普通的BARThez，并且与CamemBERT和FlauBERT相当或更胜一筹。",{"type":18,"tag":26,"props":75,"children":76},{},[77],{"type":18,"tag":30,"props":78,"children":79},{},[80],{"type":24,"value":81},"论文创新点",{"type":18,"tag":83,"props":84,"children":85},"h2",{"id":7},[],{"type":18,"tag":26,"props":87,"children":88},{},[89],{"type":24,"value":90},"在聊BARThez模型之前，我们需要知道，什么是BART模型。近年来，ChatGPt的问世引发了大模型的热潮，而ChatGPT是基于Transformer架构的，但transformer架构所衍生出来的技术路线不仅仅只有ChatGPT这一条路线，主要有三种，分别是GPT路线，BART路线，BERT路线。",{"type":18,"tag":26,"props":92,"children":93},{},[94],{"type":24,"value":95},"GPT路线是OpenAI公司研发的自回归路线，现在已经取得了非常大的成功，BERT路线是自编码路线，是Google公司研发，BART路线，则是结合了这两者，即自回归+自编码路线。而BARThez模型，则是这BART模型的一个分支，一个法语训练的BART模型。",{"type":18,"tag":26,"props":97,"children":98},{},[99],{"type":24,"value":100},"基于BERT的法语模型早先就已有CamemBERT，FlauBERT这两个模型，且都具有比较好的模型性能，但基于BART的法语模型却一直没有问世，所以本文训练了第一个BART法语模型BARThez，并且在本文提出的法语数据集OrangeSum上也表现出了比较优异的性能。",{"type":18,"tag":26,"props":102,"children":103},{},[104],{"type":24,"value":105},"同时，本文在在Bart模型的基础上，又使用了Barthez模型的语料库进行预训练，最终得到了一个性能显著优于mBarthez的模型，比基于BERT的最先进的语言模型CamemBERT和FlauBERT相当或更胜一筹。",{"type":18,"tag":26,"props":107,"children":108},{},[109],{"type":18,"tag":30,"props":110,"children":111},{},[112],{"type":24,"value":113},"数据集上的指标评价得分",{"type":18,"tag":83,"props":115,"children":117},{"id":116},"_1",[],{"type":18,"tag":26,"props":119,"children":120},{},[121],{"type":18,"tag":30,"props":122,"children":123},{},[124],{"type":24,"value":125},"01",{"type":18,"tag":26,"props":127,"children":128},{},[129],{"type":18,"tag":30,"props":130,"children":131},{},[132],{"type":24,"value":133},"生成任务",{"type":18,"tag":26,"props":135,"children":136},{},[137],{"type":24,"value":138},"本文中使用了四个模型来进行生成任务作为对比，分别是BARThez、mBART、mBARThez、Random，其模型相关信息如下表所示。",{"type":18,"tag":26,"props":140,"children":141},{},[142],{"type":18,"tag":143,"props":144,"children":146},"img",{"alt":7,"src":145},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/07/4175a9fb2b3740bc8508fbbc0405db62.png",[],{"type":18,"tag":26,"props":148,"children":149},{},[150],{"type":24,"value":151},"在本文推出的OrangeSum数据集上分别进行摘要生成和标题生成两个任务，使用ROUGE-1、ROUGE-2和ROUGE-L分数作为指标，而由于ROUGE在抽象摘要中不适用，所以又加入了BERTScore分数，其运行对比结果在下表。",{"type":18,"tag":26,"props":153,"children":154},{},[155],{"type":18,"tag":143,"props":156,"children":158},{"alt":7,"src":157},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/07/11e582f0d5e64f788236fd8ddd6b7835.png",[],{"type":18,"tag":26,"props":160,"children":161},{},[162],{"type":24,"value":163},"从结果上看，BARThez模型虽然和mBART模型的ROUGE分数和BERTScore分数差不多，但是要注意到这两个模型之间的参数差别，mBART模型的参数比BARThez模型的参数多了整整四倍。而mBARThez则在所有指标上都达到了最优，体现了在微调之前将多语言预训练模型适应特定语言的重要性。",{"type":18,"tag":83,"props":165,"children":167},{"id":166},"_2",[],{"type":18,"tag":26,"props":169,"children":170},{},[171],{"type":18,"tag":30,"props":172,"children":173},{},[174],{"type":24,"value":175},"02",{"type":18,"tag":26,"props":177,"children":178},{},[179],{"type":18,"tag":30,"props":180,"children":181},{},[182],{"type":24,"value":183},"识别任务",{"type":18,"tag":26,"props":185,"children":186},{},[187],{"type":24,"value":188},"识别任务中，适用评估框架FLUE，FLUE是一个用于法语自然语言处理系统的评估框架，类似于流行的GLUE基准。其目标是促进未来可重复的实验，并共享法语领域的模型和进展。（",{"type":18,"tag":26,"props":190,"children":191},{},[192],{"type":18,"tag":193,"props":194,"children":198},"a",{"href":195,"rel":196},"https://link.zhihu.com/?target=https%3A//github.com/getalp/Flaubert/tree/master/flue%EF%BC%89",[197],"nofollow",[199],{"type":24,"value":200},"https://link.zhihu.com/?target=https%3A//github.com/getalp/Flaubert/tree/master/flue）",{"type":18,"tag":26,"props":202,"children":203},{},[204],{"type":24,"value":205},"FLUE框架中，包含三个数据集，分别是CLS、PAWSX、XNLI。其中，CLS数据集任务是预测评论的态度是表扬还是批评，PAWSX数据集任务是判断两句话是否语义等价，XNLI数据集任务是推测第一个句子是否蕴含第二个句子。最终运行结果如下。",{"type":18,"tag":26,"props":207,"children":208},{},[209],{"type":18,"tag":143,"props":210,"children":212},{"alt":7,"src":211},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/07/78199469ca1645b7bfb397ce90e90fef.png",[],{"type":18,"tag":26,"props":214,"children":215},{},[216],{"type":24,"value":217},"从结果上来看，BARThez在三个情感分析任务中超越了其他模型，而在释义和推理任务中则与CamemBERT和FlauBERT非常接近。在LARGE模型中，mBARThez在所有任务中都优于mBART，再次显示了预训练阶段的语言适应阶段的重要性。",{"type":18,"tag":26,"props":219,"children":220},{},[221],{"type":24,"value":222},"在5个任务中的3个任务中，FlauBERT比mBARThez略有优势，这可能是因为FlauBERT在单语法语语料库上训练的时间大约是mBARThez的10倍。总的来说，BARThez和mBARThez在生成任务上的出色表现并不会导致它们在判别任务上的性能下降。",{"type":18,"tag":26,"props":224,"children":225},{},[226],{"type":18,"tag":30,"props":227,"children":228},{},[229],{"type":24,"value":230},"创新点优势",{"type":18,"tag":26,"props":232,"children":233},{},[234],{"type":24,"value":235},"1、BARThez模型基于BART架构，通过多语言预训练，使其在处理多种语言任务时具有优势。多语言预训练使得模型能够更好地理解和处理不同语言之间的差异和共性，从而提高了模型的泛化能力和性能。",{"type":18,"tag":26,"props":237,"children":238},{},[239],{"type":24,"value":240},"2、BARThez模型在预训练阶段加入了语言适应性预训练，即在多语言预训练的基础上，继续使用特定语言的语料库进行预训练。这种预训练方法使得模型能够更好地适应特定语言的任务，从而提高了模型的性能。",{"type":18,"tag":26,"props":242,"children":243},{},[244],{"type":24,"value":245},"3、BARThez模型不仅具有强大的编码能力，还具有强大的解码能力，这使得它特别适合于生成任务，如摘要生成。BART架构的这种双向编码和解码的能力，使得模型能够更好地理解和生成自然语言文本。",{"type":18,"tag":26,"props":247,"children":248},{},[249],{"type":18,"tag":30,"props":250,"children":251},{},[252],{"type":24,"value":253},"使用MindSpore NLP对数据集进行推理验证",{"type":18,"tag":26,"props":255,"children":256},{},[257],{"type":24,"value":258},"首先是需要找到与官方相同的数据集，这里我采用了FLUE中提供的数据集，让BARThez模型在CLS数据集上进行文本情感预测。数据集对应链接在下方，需要注意的是，CLS数据集并不对外公开，需要进行申请才能获得使用资格，所以这个只提供链接，不提供数据集文件。如果想要获取数据集，可以点击链接，提交数据集使用申请。",{"type":18,"tag":26,"props":260,"children":261},{},[262],{"type":18,"tag":193,"props":263,"children":266},{"href":264,"rel":265},"https://zenodo.org/records/3251672",[197],[267],{"type":24,"value":264},{"type":18,"tag":26,"props":269,"children":270},{},[271],{"type":24,"value":272},"为了验证BARThez模型的效果，我们选取了CLS-，并在数据集上进行了验证评估，最终在CLS-books数据集上得到的正确率为99.94%，与官方给出的结果94.47%相差较大，这里推测是官方模型在CLS-books数据集上可能出现了数据泄露，故而正确率比较高，接近100%。而其他数据集与官方误差小于2%。同时我们也使用了官方给出的Transformers模型与MindSpore NLP模型进行了对比，官方模型与MindSpore NLP在小数点后两位精度的误差为0.00%，其具体对比结果如下：",{"type":18,"tag":26,"props":274,"children":275},{},[276],{"type":18,"tag":143,"props":277,"children":279},{"alt":7,"src":278},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/07/fc251d4647d544228fa4bb96c3f17e73.png",[],{"type":18,"tag":26,"props":281,"children":282},{},[283],{"type":18,"tag":30,"props":284,"children":285},{},[286],{"type":24,"value":287},"完整代码",{"type":18,"tag":26,"props":289,"children":290},{},[291],{"type":24,"value":292},"数据集预处理代码，需要将数据集放入同一目录下，会生成books、dvd、music三个文件夹，每个文件夹内包含对应的数据集。由于代码比较长，完整代码请详见如下给出gitee仓库链接。",{"type":18,"tag":26,"props":294,"children":295},{},[296],{"type":18,"tag":193,"props":297,"children":300},{"href":298,"rel":299},"https://gitee.com/chenjian2024/barthez-mind-nlp/tree/master/",[197],[301],{"type":24,"value":298},{"type":18,"tag":26,"props":303,"children":304},{},[305],{"type":24,"value":306},"模型推理核心代码：",{"type":18,"tag":308,"props":309,"children":311},"pre",{"code":310},"import numpy as np\nfrom mindnlp.transformers import AutoTokenizer,AutoModelForSequenceClassification\nimport mindspore\n\n barthez_tokenizer = AutoTokenizer.from_pretrained(\"moussaKam/barthez\")\n\n barthez_model = AutoModelForSequenceClassification.from_pretrained(\"moussaKam/barthez-sentiment-classification\")\n\ndef predict_sentiment(text):\n    input_ids = mindspore.tensor(\n        [barthez_tokenizer.encode(text, add_special_tokens=True)]\n    )\n    predict = barthez_model.forward(input_ids)[0]\n    return predict.argmax().item()\n",[312],{"type":18,"tag":313,"props":314,"children":315},"code",{"__ignoreMap":7},[316],{"type":24,"value":310},{"type":18,"tag":26,"props":318,"children":319},{},[320],{"type":18,"tag":30,"props":321,"children":322},{},[323],{"type":24,"value":324},"总结",{"type":18,"tag":26,"props":326,"children":327},{},[328],{"type":24,"value":329},"BARThez模型是第一个专为法语设计的BART模型，它通过在大型法语语料库上进行预训练，并在两个抽象摘要任务上进行评估，展示了与mBART模型相当的竞争力，同时具有更少的参数。此外，通过在预训练阶段添加一个相对便宜的语言适应阶段，mBARThez模型在多个判别任务上提供了显著的性能提升。",{"type":18,"tag":26,"props":331,"children":332},{},[333],{"type":24,"value":334},"MindSpore NLP中提供了与huggingface相同的操作接口，加载、评估和训练模型非常直接高效，在有huggingface使用经验的情况下，学习成本非常低，非常推荐使用MindSpore NLP。",{"title":7,"searchDepth":336,"depth":336,"links":337},4,[338,340,341],{"id":7,"depth":339,"text":7},2,{"id":116,"depth":339,"text":7},{"id":166,"depth":339,"text":7},"markdown","content:technology-blogs:zh:3596.md","content","technology-blogs/zh/3596.md","technology-blogs/zh/3596","md",1776506131908]