[{"data":1,"prerenderedAt":264},["ShallowReactive",2],{"content-query-4t4BGNjdAe":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":258,"_id":259,"_source":260,"_file":261,"_stem":262,"_extension":263},"/technology-blogs/zh/3466","zh",false,"","昇思MindSpore原生论文 | 基于双向长短时记忆网络-注意力机制的高效机器翻译模型Miniformer","Efficient Machine Translation with a BiLSTM-Attention Approach","2024-11-08","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/28/c2eb633f9fe14c3cb842e3b9091259aa.png","technology-blogs","实践",{"type":15,"children":16,"toc":255},"root",[17,25,31,35,40,45,50,61,66,76,81,92,100,109,114,121,126,131,139,151,159,164,169,176,183,188,193,200,205,213,218,225,232,240,245,250],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"昇思mindspore原生论文-基于双向长短时记忆网络-注意力机制的高效机器翻译模型miniformer",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"论文标题",{"type":18,"tag":26,"props":32,"children":33},{},[34],{"type":24,"value":9},{"type":18,"tag":26,"props":36,"children":37},{},[38],{"type":24,"value":39},"论文来源",{"type":18,"tag":26,"props":41,"children":42},{},[43],{"type":24,"value":44},"arXiv",{"type":18,"tag":26,"props":46,"children":47},{},[48],{"type":24,"value":49},"论文链接",{"type":18,"tag":26,"props":51,"children":52},{},[53],{"type":18,"tag":54,"props":55,"children":59},"a",{"href":56,"rel":57},"https://arxiv.org/abs/2410.22335",[58],"nofollow",[60],{"type":24,"value":56},{"type":18,"tag":26,"props":62,"children":63},{},[64],{"type":24,"value":65},"代码链接",{"type":18,"tag":26,"props":67,"children":68},{},[69],{"type":18,"tag":54,"props":70,"children":73},{"href":71,"rel":72},"https://github.com/mindspore-lab/models/tree/master/research/arxiv%5C_papers/miniformer",[58],[74],{"type":24,"value":75},"https://github.com/mindspore-lab/models/tree/master/research/arxiv\\_papers/miniformer",{"type":18,"tag":26,"props":77,"children":78},{},[79],{"type":24,"value":80},"昇思MindSpore作为开源的AI框架，为开发人员带来端边云全场景协同、极简开发、极致性能的体验，支持国内高校/科研机构发表1700+篇AI顶会论文。为鼓励基于昇思MindSpore进行原生创新，昇思开源社区转载、解读系列原生arXiv论文，本文为昇思MindSpore AI arXiv论文系列第2篇。",{"type":18,"tag":26,"props":82,"children":83},{},[84,86],{"type":24,"value":85},"作者：Frank Wu 感谢各位专家教授与同学的投稿，更多精彩的论文精读文章和开源代码实现请访问Models。更多内容请访问： ",{"type":18,"tag":54,"props":87,"children":90},{"href":88,"rel":89},"https://gitee.com/mindspore/community/issues/I9W2Z3",[58],[91],{"type":24,"value":88},{"type":18,"tag":26,"props":93,"children":94},{},[95],{"type":18,"tag":96,"props":97,"children":99},"img",{"alt":7,"src":98},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/f69177296cf94f0d86b63f0a5697e023.png",[],{"type":18,"tag":26,"props":101,"children":102},{},[103],{"type":18,"tag":104,"props":105,"children":106},"strong",{},[107],{"type":24,"value":108},"研究背景",{"type":18,"tag":26,"props":110,"children":111},{},[112],{"type":24,"value":113},"在自然语言处理（NLP）领域，机器翻译作为一种关键技术一直受到学术界和工业界的广泛关注。随着全球化的加速，跨语言信息的无缝流动变得尤为重要，这进一步推动了机器翻译技术的研究与应用。近年来，神经网络的引入给机器翻译带来了革命性的变化。特别是序列到序列（Seq2Seq）模型，以其端到端的特点极大地简化了机器翻译的过程。然而，随着数据量的增长和模型复杂度的提升，模型的存储和计算成本也随之增加。",{"type":18,"tag":26,"props":115,"children":116},{},[117],{"type":18,"tag":96,"props":118,"children":120},{"alt":7,"src":119},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/d1d448510d5f48898c007142f619d16a.png",[],{"type":18,"tag":26,"props":122,"children":123},{},[124],{"type":24,"value":125},"图1.基于Seq2Seq模型的机器翻译示例",{"type":18,"tag":26,"props":127,"children":128},{},[129],{"type":24,"value":130},"Transformer模型凭借其优越的并行处理能力和处理长距离依赖问题的能力，在如机器翻译等NLP任务中取得了突破性的进展。然而，Transformer模型存在一定的局限性，例如大量的模型参数和高昂的计算成本，这在资源受限的环境中可能成为一个问题。为了应对这一问题，研究人员一直在探索更有效的模型结构，以减少模型大小并提高计算效率，同时保持翻译质量。",{"type":18,"tag":26,"props":132,"children":133},{},[134],{"type":18,"tag":104,"props":135,"children":136},{},[137],{"type":24,"value":138},"作者介绍",{"type":18,"tag":26,"props":140,"children":141},{},[142,144,149],{"type":24,"value":143},"论文第一作者为",{"type":18,"tag":104,"props":145,"children":146},{},[147],{"type":24,"value":148},"Frank Wu",{"type":24,"value":150},"，研究方向为人工智能安全，自然语言处理， 对MindSpore和PyTorch这两个流行的深度学习框架有丰富的实践经验，并且对MindSpore NLP工具库的底层代码有深入研究，参与过基于深度学习模型的后门攻击与防御机制研究、机器学习与大型语言模型的开源评估与测试。",{"type":18,"tag":26,"props":152,"children":153},{},[154],{"type":18,"tag":104,"props":155,"children":156},{},[157],{"type":24,"value":158},"论文简介",{"type":18,"tag":26,"props":160,"children":161},{},[162],{"type":24,"value":163},"随着自然语言处理（NLP）技术的迅速发展，机器翻译的准确性和效率已成为研究的热点话题。本研究提出了一种新的Seq2Seq模型，旨在平衡模型的效率与性能。该模型使用双向长短期记忆网络（Bi-LSTM）作为编码器，充分利用输入序列的上下文信息，使模型能够捕获输入序列前后的内容信息，从而增强对源语言的理解。至于解码器部分，我们引入了注意力机制，这不仅提高了模型在源语言中聚焦关键信息的能力，还使得解码过程更加灵活和动态。凭借这种设计，我们的模型能够在保持较小模型尺寸的同时，更准确地捕捉源语言与目标语言之间的复杂映射关系。",{"type":18,"tag":26,"props":165,"children":166},{},[167],{"type":24,"value":168},"具体而言，本研究利用利用昇思MindSpore深度学习框架完成模型的构建，模型包括编码器和解码器两个部分，编码器主要由双向LSTM组成。单向LSTM的运算过程如下：",{"type":18,"tag":26,"props":170,"children":171},{},[172],{"type":18,"tag":96,"props":173,"children":175},{"alt":7,"src":174},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/b762e6c183ba4f6fbf3f457a142bb9b2.png",[],{"type":18,"tag":26,"props":177,"children":178},{},[179],{"type":18,"tag":96,"props":180,"children":182},{"alt":7,"src":181},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/0ff7f3b56a944d2180668f1e2ac4233b.png",[],{"type":18,"tag":26,"props":184,"children":185},{},[186],{"type":24,"value":187},"图2.双向LSTM架构",{"type":18,"tag":26,"props":189,"children":190},{},[191],{"type":24,"value":192},"在解码器部分，本研究同样也使用了注意力机制，将解码器当前的输入序列转换为向量后，通过线性变换分别得到K，V矩阵，将编码器的源语句序列转换为Q矩阵",{"type":18,"tag":26,"props":194,"children":195},{},[196],{"type":18,"tag":96,"props":197,"children":199},{"alt":7,"src":198},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/71022a9d080543dfb64cc489ad9d0a67.png",[],{"type":18,"tag":26,"props":201,"children":202},{},[203],{"type":24,"value":204},"注意力机制主要是获取K相对于Q的权重矩阵，最终获得加权表示，不同的权重表示不同部分和当前单元的关联度。",{"type":18,"tag":26,"props":206,"children":207},{},[208],{"type":18,"tag":104,"props":209,"children":210},{},[211],{"type":24,"value":212},"实验结果",{"type":18,"tag":26,"props":214,"children":215},{},[216],{"type":24,"value":217},"为了验证我们模型的性能，我们在机器翻译领域的标准数据集WMT14上进行了实验。WMT14数据集以其大规模和多样化的语言覆盖而著称，使其成为评估机器翻译模型性能的重要平台。实验结果显示，我们的模型在WMT14数据集上的翻译质量优于Transformer模型，同时在模型大小上也有显著的优势。",{"type":18,"tag":26,"props":219,"children":220},{},[221],{"type":18,"tag":96,"props":222,"children":224},{"alt":7,"src":223},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/d87d3a1f0556446ebe80c9e811e90761.png",[],{"type":18,"tag":26,"props":226,"children":227},{},[228],{"type":18,"tag":96,"props":229,"children":231},{"alt":7,"src":230},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/15/9ec062f39882471dbfbba11a38a67f28.png",[],{"type":18,"tag":26,"props":233,"children":234},{},[235],{"type":18,"tag":104,"props":236,"children":237},{},[238],{"type":24,"value":239},"总结与展望",{"type":18,"tag":26,"props":241,"children":242},{},[243],{"type":24,"value":244},"本文提出了一个新的Seq2Seq模型，旨在通过采用双向长短期记忆网络（Bi-LSTM）作为编码器来捕捉输入序列的上下文信息，并通过在解码器中引入注意力机制来增强模型在翻译过程中对关键信息的关注能力。我们的模型在WMT14机器翻译数据集上实现了更好的性能，同时保持了更小的模型体积。",{"type":18,"tag":26,"props":246,"children":247},{},[248],{"type":24,"value":249},"MindSpore作为一个新兴的深度学习框架，显示出了其灵活性和易用性，特别是在支持多种硬件平台上。本研究利用了MindSpore的特性来实现和验证提出的模型，发现其提供了强大的工具链和丰富的API集合，有助于快速原型设计和模型迭代MindSpore有潜力成为深度学习框架中的佼佼者之一。",{"type":18,"tag":26,"props":251,"children":252},{},[253],{"type":24,"value":254},"MindSpore提供了丰富的功能和工具，可以帮助开发者们更快地实现自己的想法。社区的活跃也为解决问题和分享经验提供了一个良好的平台。希望更多的开发者能够加入进来，共同推动MindSpore的发展，并创造出更多有意义的应用。",{"title":7,"searchDepth":256,"depth":256,"links":257},4,[],"markdown","content:technology-blogs:zh:3466.md","content","technology-blogs/zh/3466.md","technology-blogs/zh/3466","md",1776506130140]