[{"data":1,"prerenderedAt":391},["ShallowReactive",2],{"content-query-XStAtIKTV2":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":385,"_id":386,"_source":387,"_file":388,"_stem":389,"_extension":390},"/technology-blogs/zh/3590","zh",false,"","BARTpho模型论文解读，并基于MindSpore NLP推理复现","作者：paff         来源：知乎","2025-01-20","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/01/24/de076b72bc8944ce950baca69bc7302f.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":376},"root",[17,25,31,36,41,46,50,55,69,78,86,94,102,110,115,119,127,135,140,145,153,157,165,173,178,185,190,194,201,209,214,221,229,234,242,250,255,260,265,273,284,289,294,299,304,309,314,319,324,329,334,338,343,347,351,355,367],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"bartpho模型论文解读并基于mindspore-nlp推理复现",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"作者：paff 来源：知乎",{"type":18,"tag":26,"props":32,"children":33},{},[34],{"type":24,"value":35},"BARTpho模型概述",{"type":18,"tag":26,"props":37,"children":38},{},[39],{"type":24,"value":40},"BARTpho包含两个版本：BARTphosyllable 和 BARTphoword，他们是首个公开的针对越南语的单语言大规模序列到序列预训练模型。BARTpho 使用了 BART 的\"large\"架构以及序列到序列去噪自编码器的预训练方案，因此特别适用于生成式NLP任务。",{"type":18,"tag":26,"props":42,"children":43},{},[44],{"type":24,"value":45},"论文创新点",{"type":18,"tag":47,"props":48,"children":49},"h2",{"id":7},[],{"type":18,"tag":26,"props":51,"children":52},{},[53],{"type":24,"value":54},"本文的创新点如下：",{"type":18,"tag":56,"props":57,"children":58},"ol",{},[59],{"type":18,"tag":60,"props":61,"children":62},"li",{},[63],{"type":18,"tag":64,"props":65,"children":66},"strong",{},[67],{"type":24,"value":68},"首个针对越南语的大规模单语言序列到序列预训练模型",{"type":18,"tag":70,"props":71,"children":72},"ul",{},[73],{"type":18,"tag":60,"props":74,"children":75},{},[76],{"type":24,"value":77},"专为越南语设计，填补了越南语声称是自然语言处理任务预训练模型的空白",{"type":18,"tag":26,"props":79,"children":80},{},[81],{"type":18,"tag":64,"props":82,"children":83},{},[84],{"type":24,"value":85},"2. 适配越南语的预训练设计",{"type":18,"tag":70,"props":87,"children":88},{},[89],{"type":18,"tag":60,"props":90,"children":91},{},[92],{"type":24,"value":93},"针对越南语的特点，分别设计了基于音节(BARTphosyllable)和基于单词(BARTphoword)的两种版本",{"type":18,"tag":26,"props":95,"children":96},{},[97],{"type":18,"tag":64,"props":98,"children":99},{},[100],{"type":24,"value":101},"3. 在多个越南语下游任务上的表现显著提升",{"type":18,"tag":70,"props":103,"children":104},{},[105],{"type":18,"tag":60,"props":106,"children":107},{},[108],{"type":24,"value":109},"在越南语文本摘要任务中，BARTpho的ROUGE分数和人工评估分数均强于mBART；在越南语大写和标点恢复任务中，BARTpho的性能也显著优于mBART。",{"type":18,"tag":26,"props":111,"children":112},{},[113],{"type":24,"value":114},"数据集上的指标评价得分",{"type":18,"tag":47,"props":116,"children":118},{"id":117},"_1",[],{"type":18,"tag":26,"props":120,"children":121},{},[122],{"type":18,"tag":64,"props":123,"children":124},{},[125],{"type":24,"value":126},"01",{"type":18,"tag":26,"props":128,"children":129},{},[130],{"type":18,"tag":64,"props":131,"children":132},{},[133],{"type":24,"value":134},"文本摘要生成",{"type":18,"tag":26,"props":136,"children":137},{},[138],{"type":24,"value":139},"本文评估并比较了BARTpho与基线模型mBART在越南语文本摘要这一任务中的性能。使用单文档摘要数据集VNDS进行实验，该数据集包含150704篇新闻文章，每篇文章包括一个新闻摘要和正文。由于该数据集中存在重复文章，本文对数据集进行了去重。",{"type":18,"tag":26,"props":141,"children":142},{},[143],{"type":24,"value":144},"表1 显示了去重之后，mBART和BARTpho的ROUGE分数。显然，两种BARTpho版本在验证集和测试集上都显著优于mBART。除此以外，本文还随机抽取了100个案例进行基于人工的评估。在人工评估中，BARTpho的表现依然优于mBART。表2 显示了在未去重数据集上的结果。表 1 和表 2 的自动评估和人工评估结果证明了基于 BART 的大规模单语言 seq2seq 模型对越南语的有效性。",{"type":18,"tag":26,"props":146,"children":147},{},[148],{"type":18,"tag":149,"props":150,"children":152},"img",{"alt":7,"src":151},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/01/24/8404cfbe5dab4c98907d5726c50f133b.png",[],{"type":18,"tag":47,"props":154,"children":156},{"id":155},"_2",[],{"type":18,"tag":26,"props":158,"children":159},{},[160],{"type":18,"tag":64,"props":161,"children":162},{},[163],{"type":24,"value":164},"02",{"type":18,"tag":26,"props":166,"children":167},{},[168],{"type":18,"tag":64,"props":169,"children":170},{},[171],{"type":24,"value":172},"大写和标点符号恢复",{"type":18,"tag":26,"props":174,"children":175},{},[176],{"type":24,"value":177},"表 3 显示了 BARTpho 和 mBART 在大写任务上的结果。BARTpho 的表现优于 mBART，具体来说，BARTphoword 和 BARTphosyllable 的 F1 分数分别比 mBART 高 1.1% 和 0.7%。表 3 同时展示了 BARTpho 和 mBART 在标点恢复任务上的结果。两种 BARTpho 版本在逗号和问号类型上都优于 mBART，特别是在问号标点上性能差距显著。",{"type":18,"tag":26,"props":179,"children":180},{},[181],{"type":18,"tag":149,"props":182,"children":184},{"alt":7,"src":183},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/01/24/acaf731d52804e1caef89bad3fd9d14a.png",[],{"type":18,"tag":26,"props":186,"children":187},{},[188],{"type":24,"value":189},"相比其他工作的优势",{"type":18,"tag":47,"props":191,"children":193},{"id":192},"_3",[],{"type":18,"tag":26,"props":195,"children":196},{},[197],{"type":18,"tag":64,"props":198,"children":199},{},[200],{"type":24,"value":126},{"type":18,"tag":26,"props":202,"children":203},{},[204],{"type":18,"tag":64,"props":205,"children":206},{},[207],{"type":24,"value":208},"专注于越南语的单语言序列到序列预训练",{"type":18,"tag":26,"props":210,"children":211},{},[212],{"type":24,"value":213},"此前大部分相关工作（如 mBART 和 mT5）主要针对多语言模型，未专注于越南语。由于越南语的语言特点（如音节和单词的特殊性），本文通过分词策略（音节级与单词级）探索了如何更好地为越南语优化 seq2seq 模型，解决了多语言模型无法充分利用越南语语言特性的局限性。",{"type":18,"tag":26,"props":215,"children":216},{},[217],{"type":18,"tag":64,"props":218,"children":219},{},[220],{"type":24,"value":164},{"type":18,"tag":26,"props":222,"children":223},{},[224],{"type":18,"tag":64,"props":225,"children":226},{},[227],{"type":24,"value":228},"性能显著提升",{"type":18,"tag":26,"props":230,"children":231},{},[232],{"type":24,"value":233},"在越南语单文档文本摘要任务中，BARTpho 在自动评估（ROUGE 分数）和人工评估中均超过了强基线 mBART；在越南语大写与标点符号恢复任务中，BARTpho 的性能也全面优于 mBART，尤其在问号和逗号标点符号的恢复上展现了更大的性能提升。",{"type":18,"tag":26,"props":235,"children":236},{},[237],{"type":18,"tag":64,"props":238,"children":239},{},[240],{"type":24,"value":241},"03",{"type":18,"tag":26,"props":243,"children":244},{},[245],{"type":18,"tag":64,"props":246,"children":247},{},[248],{"type":24,"value":249},"专用模型的高效性",{"type":18,"tag":26,"props":251,"children":252},{},[253],{"type":24,"value":254},"即便训练数据量远小于多语言模型，BARTpho 的性能仍然优于 mBART。",{"type":18,"tag":26,"props":256,"children":257},{},[258],{"type":24,"value":259},"基于MindSpore NLP复现",{"type":18,"tag":26,"props":261,"children":262},{},[263],{"type":24,"value":264},"对于文本摘要生成任务，通过查阅本文的参考文献，我找到了文中提到的VNDS数据集。数据集对应链接在下方，该数据集完全公开，可自由下载。而对于大写和标点符号恢复任务，本文使用的测试数据是根据现有数据集生成的，且并未公开，这里无法验证。因此只能尝试文本摘要生成任务。",{"type":18,"tag":70,"props":266,"children":267},{},[268],{"type":18,"tag":60,"props":269,"children":270},{},[271],{"type":24,"value":272},"VNDS数据集：",{"type":18,"tag":26,"props":274,"children":275},{},[276],{"type":18,"tag":277,"props":278,"children":282},"a",{"href":279,"rel":280},"https://github.com/ThanhChinhBK/vietnews",[281],"nofollow",[283],{"type":24,"value":279},{"type":18,"tag":26,"props":285,"children":286},{},[287],{"type":24,"value":288},"在验证本文的工作过程当中，发现文章对于摘要生成的具体参数设置描述不完整，仅提到了输入大小为512，没有说明输出大小应固定在什么范围，且文中的去重后的数据集也未公开。因此这里用去重前的测试数据集，设置输入大小为512，输出为50进行实验。使用官方给出的TransFormers模型与MindSpore NLP模型进行了对比，官方模型与MindSpore NLP在小数点后两位精度的误差为0.00%。",{"type":18,"tag":26,"props":290,"children":291},{},[292],{"type":24,"value":293},"Model",{"type":18,"tag":26,"props":295,"children":296},{},[297],{"type":24,"value":298},"R-1",{"type":18,"tag":26,"props":300,"children":301},{},[302],{"type":24,"value":303},"R-2",{"type":18,"tag":26,"props":305,"children":306},{},[307],{"type":24,"value":308},"R-L",{"type":18,"tag":26,"props":310,"children":311},{},[312],{"type":24,"value":313},"BARTpho-syllable",{"type":18,"tag":26,"props":315,"children":316},{},[317],{"type":24,"value":318},"(TransFormers)",{"type":18,"tag":26,"props":320,"children":321},{},[322],{"type":24,"value":323},"0.4324",{"type":18,"tag":26,"props":325,"children":326},{},[327],{"type":24,"value":328},"0.1434",{"type":18,"tag":26,"props":330,"children":331},{},[332],{"type":24,"value":333},"0.2982",{"type":18,"tag":26,"props":335,"children":336},{},[337],{"type":24,"value":313},{"type":18,"tag":26,"props":339,"children":340},{},[341],{"type":24,"value":342},"(MindSpore NLP)",{"type":18,"tag":26,"props":344,"children":345},{},[346],{"type":24,"value":323},{"type":18,"tag":26,"props":348,"children":349},{},[350],{"type":24,"value":328},{"type":18,"tag":26,"props":352,"children":353},{},[354],{"type":24,"value":333},{"type":18,"tag":70,"props":356,"children":357},{},[358],{"type":18,"tag":60,"props":359,"children":360},{},[361],{"type":18,"tag":47,"props":362,"children":364},{"id":363},"基于mindspore-nlp复现完整代码",[365],{"type":24,"value":366},"基于MindSpore NLP复现完整代码：",{"type":18,"tag":26,"props":368,"children":369},{},[370],{"type":18,"tag":277,"props":371,"children":374},{"href":372,"rel":373},"https://github.com/liuhufa/BARTpho-and-MindNLP/tree/master",[281],[375],{"type":24,"value":372},{"title":7,"searchDepth":377,"depth":377,"links":378},4,[379,381,382,383,384],{"id":7,"depth":380,"text":7},2,{"id":117,"depth":380,"text":7},{"id":155,"depth":380,"text":7},{"id":192,"depth":380,"text":7},{"id":363,"depth":380,"text":366},"markdown","content:technology-blogs:zh:3590.md","content","technology-blogs/zh/3590.md","technology-blogs/zh/3590","md",1776506131769]