[{"data":1,"prerenderedAt":301},["ShallowReactive",2],{"content-query-ntOlVAbPDW":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":295,"_id":296,"_source":297,"_file":298,"_stem":299,"_extension":300},"/technology-blogs/zh/1739","zh",false,"","MindSpore开源大模型训练框架——MindSpore Transformer，轻松训练大模型！","Transformer模型和自监督预训练模式的提出，给NLP、CV和多模态等多个人工智能应用领域开辟了新的方向。通过增加模型参数量和数据规模，预训练模型在实际领域的表现还在持续地提升。","2022-08-24","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/24/5d30d7a348404ee9bbee0a7e25404194.png","technology-blogs","实践",{"type":15,"children":16,"toc":292},"root",[17,25,34,38,43,48,53,58,67,75,80,85,97,102,107,112,120,128,136,141,146,156,161,171,179,184,189,194,202,210,215,220,227,232,237,257,265,273,278,283],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"mindspore开源大模型训练框架mindspore-transformer轻松训练大模型",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":18,"tag":30,"props":31,"children":33},"img",{"alt":7,"src":32},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/24/1a8b44dbc3b946608eb9fbfc35df19c7.gif",[],{"type":18,"tag":26,"props":35,"children":36},{},[37],{"type":24,"value":9},{"type":18,"tag":26,"props":39,"children":40},{},[41],{"type":24,"value":42},"另一方面，参数量的提升给模型训练带来了新的挑战。GPT-3、T5等大规模Transformer模型通常至少需要上百张GPU卡进行长达数月的训练，耗费几百万美金的训练成本。",{"type":18,"tag":26,"props":44,"children":45},{},[46],{"type":24,"value":47},"模型并行是解决大模型训练时显存不足的一个主流方法，然而模型并行会不可避免的引入通信时间，造成计算资源的空闲等待。如何更加高效地、分布式地训练这些“巨无霸”们，是目前整个业界都在思考的一个问题。",{"type":18,"tag":26,"props":49,"children":50},{},[51],{"type":24,"value":52},"目前，众多企业和开源机构推出了专门的Transformer模型训练库，其中以NVIDIA基于Pytorch开发的Megatron-LM训练库在各方面性能上较为领先。",{"type":18,"tag":26,"props":54,"children":55},{},[56],{"type":24,"value":57},"本次我们开源了MindSpore Transformer模型训练库，相比Megatron，MindSpore Transformer训练库具有更全面的图算融合技术、更高的内存效率，在训练大规模Transformer模型时表现出了更高的性能。",{"type":18,"tag":26,"props":59,"children":60},{},[61],{"type":18,"tag":62,"props":63,"children":64},"strong",{},[65],{"type":24,"value":66},"一、",{"type":18,"tag":26,"props":68,"children":69},{},[70],{"type":18,"tag":62,"props":71,"children":72},{},[73],{"type":24,"value":74},"图算融合优化",{"type":18,"tag":26,"props":76,"children":77},{},[78],{"type":24,"value":79},"MindSpore的图算融合功能可以自动实现算子融合和编译优化，提升模型的内存效率和训练速度。",{"type":18,"tag":26,"props":81,"children":82},{},[83],{"type":24,"value":84},"图算融合技术的详情可以查看此处链接：",{"type":18,"tag":26,"props":86,"children":87},{},[88],{"type":18,"tag":89,"props":90,"children":94},"a",{"href":91,"rel":92},"https://www.mindspore.cn/docs/zh-CN/r1.7/design/graph%5C_kernel%5C_fusion%5C_engine.html",[93],"nofollow",[95],{"type":24,"value":96},"https://www.mindspore.cn/docs/zh-CN/r1.7/design/graph\\_kernel\\_fusion\\_engine.html",{"type":18,"tag":26,"props":98,"children":99},{},[100],{"type":24,"value":101},"Transformer大模型网络中包含大量的memory-intensive类型算子，融合优化空间非常大。为了获得极致性能，传统大模型训练框架通常手工实现大量的融合算子进行替代，需要投入大量的算子开发和调优工作量，同时也很难做到通用和泛化。",{"type":18,"tag":26,"props":103,"children":104},{},[105],{"type":24,"value":106},"为了解决该问题，MindSpore使用了图算融合进行大模型的融合优化，并取得了可以媲美手工极致融合的性能优化效果。这也是目前业界首次完整将自动算子融合技术引入大模型优化场景，并作为主要融合优化手段的技术探索。",{"type":18,"tag":26,"props":108,"children":109},{},[110],{"type":24,"value":111},"针对大模型场景，图算融合充分发挥自身多层次多维度融合子图生成、基于Polyhedral的高性能融合算子调度和优化等关键能力。既实现了极致优化性能，也显著减少了中间tensor占用和显存开销。这使得MindSpore Transformer训练库相比业界其它训练库，具有更好的性能泛化性和通用性，为后续持续进行模型创新提供性能保障。",{"type":18,"tag":26,"props":113,"children":114},{},[115],{"type":18,"tag":62,"props":116,"children":117},{},[118],{"type":24,"value":119},"二、",{"type":18,"tag":26,"props":121,"children":122},{},[123],{"type":18,"tag":62,"props":124,"children":125},{},[126],{"type":24,"value":127},"灵活易用的并行技术，",{"type":18,"tag":26,"props":129,"children":130},{},[131],{"type":18,"tag":62,"props":132,"children":133},{},[134],{"type":24,"value":135},"单卡到大规模集群的无缝切换",{"type":18,"tag":26,"props":137,"children":138},{},[139],{"type":24,"value":140},"MindSpore Transformer利用MindSpore内置的并行技术，能够自动进行拓扑感知，高效地融合数据并行和模型并行策略，实现单卡到大规模集群的无缝切换。",{"type":18,"tag":26,"props":142,"children":143},{},[144],{"type":24,"value":145},"低门槛的并行易用性 受益于MindSpore的并行能力。MindSpore Transformer能够从单卡一键拓展到多卡训练。",{"type":18,"tag":147,"props":148,"children":150},"pre",{"code":149},"context.set_auto_parallel_context(parallel_mode=\"stand_alone\") # 单卡\ncontext.set_auto_parallel_context(parallel_mode=\"data_parallel\") # 数据并行\ncontext.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\") # 半自动并行\n",[151],{"type":18,"tag":152,"props":153,"children":154},"code",{"__ignoreMap":7},[155],{"type":24,"value":149},{"type":18,"tag":26,"props":157,"children":158},{},[159],{"type":24,"value":160},"用户可以在启动脚本中传入\"--parallel_model=data_parallel\"参数来使能上述功能。",{"type":18,"tag":26,"props":162,"children":163},{},[164,169],{"type":18,"tag":62,"props":165,"children":166},{},[167],{"type":24,"value":168},"丰富的并行特性，一键使能",{"type":24,"value":170},"——如下代码展示了MindSpore Transformer库中进行模型并行的设置。MindSpore预定义了一套基础的并行策略，用户通过配置model_parallel模型并行数和data_parallel数据并行数，就可以直接实现Tranformer类网络的模型并行，实现大模型训练。",{"type":18,"tag":147,"props":172,"children":174},{"code":173},"parallel_config = TransformerOpParallelConfig(model_parallel=config.model_parallel, # 模型并行\n                                  data_parallel=config.data_parallel, # 数据并行\n                                  recompute=True, # 开启重计算\n                                  optimizer_shard=True) # 开启优化器并行\ntransformer = Transformer(hidden_size=config.hidden_size,\n                          batch_size=config.batch_size,\n                          ffn_hidden_size=config.hidden_size * 4,\n                          src_seq_length=config.seq_length,\n                          tgt_seq_length=config.seq_length,\n                          encoder_layers=config.num_layers,\n                          attention_dropout_rate=config.dropout_rate,\n                          hidden_dropout_rate=config.dropout_rate,\n                          decoder_layers=0,\n                          num_heads=config.num_heads,\n                          parallel_config=config.parallel_config)\n",[175],{"type":18,"tag":152,"props":176,"children":177},{"__ignoreMap":7},[178],{"type":24,"value":173},{"type":18,"tag":26,"props":180,"children":181},{},[182],{"type":24,"value":183},"受益于MindSpore丰富的并行能力，MindSpore Transformer整体代码量7000行即可实现Megatron几万行的代码量，在代码的灵活度和通用性上，MindSpore Transformer提供了更加清晰和易用的自定义能力。",{"type":18,"tag":26,"props":185,"children":186},{},[187],{"type":24,"value":188},"此外，MindSpore Transformer库同时提供多种并行技术：流水线并行、优化器并行、MicroBatch Interlerved以及专家并行。",{"type":18,"tag":26,"props":190,"children":191},{},[192],{"type":24,"value":193},"为了解决算子级模型并行引入的通信导致计算资源空闲的问题，我们创新性提出了MicroBatch Interlerved来实现模型的加速训练。用户可以持续关注仓库的最新进展并进行体验。",{"type":18,"tag":26,"props":195,"children":196},{},[197],{"type":18,"tag":62,"props":198,"children":199},{},[200],{"type":24,"value":201},"三、",{"type":18,"tag":26,"props":203,"children":204},{},[205],{"type":18,"tag":62,"props":206,"children":207},{},[208],{"type":24,"value":209},"实验结果",{"type":18,"tag":26,"props":211,"children":212},{},[213],{"type":24,"value":214},"我们分别在8p、16p和32p A100集群上测试了百亿规模GPT(hiddensize=5120, num_layers=35, num_heads=40)性能，模型并行路数设置为8，数据并行数分别为1、2、4，Global Batch为1024。",{"type":18,"tag":26,"props":216,"children":217},{},[218],{"type":24,"value":219},"Megatron配置Micro Batch Size=2（Megatron已达到上限），MindSpore配置Micro Batch Size=8（MindSpore已达到上限），相比Megatron，昇思MindSpore的内存利用率更高，可以训练更大的Batch Size。",{"type":18,"tag":26,"props":221,"children":222},{},[223],{"type":18,"tag":30,"props":224,"children":226},{"alt":7,"src":225},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/24/6c3a7e48c24349f9b84a4e6c8802158c.png",[],{"type":18,"tag":26,"props":228,"children":229},{},[230],{"type":24,"value":231},"图 吞吐率对比",{"type":18,"tag":26,"props":233,"children":234},{},[235],{"type":24,"value":236},"如上图所示：",{"type":18,"tag":238,"props":239,"children":240},"ul",{},[241,247,252],{"type":18,"tag":242,"props":243,"children":244},"li",{},[245],{"type":24,"value":246},"8P Megatron的最大吞吐率为7.4 k samples/s；MindSpore 最大吞吐率为9.3k samples/s， 超过Megatron 25%；",{"type":18,"tag":242,"props":248,"children":249},{},[250],{"type":24,"value":251},"16P Megatron的最大吞吐率为13.6k samples/s，MindSpore最大吞吐率为16.9k samples/s，超过Megatron 24%；",{"type":18,"tag":242,"props":253,"children":254},{},[255],{"type":24,"value":256},"32P Megatron的最大吞吐率为20.1k samples/s，MindSpore最大吞吐率为23.8k samples/s，超过Megatron 18%。",{"type":18,"tag":26,"props":258,"children":259},{},[260],{"type":18,"tag":62,"props":261,"children":262},{},[263],{"type":24,"value":264},"四、",{"type":18,"tag":26,"props":266,"children":267},{},[268],{"type":18,"tag":62,"props":269,"children":270},{},[271],{"type":24,"value":272},"未来展望",{"type":18,"tag":26,"props":274,"children":275},{},[276],{"type":24,"value":277},"大模型训练一直是业界的热点之一，国内外的大模型也在不断地推陈出新，MindSpore Transformer 库也将会持续不断的更新和演进。未来我们计划增加更多的预训练语言模型，例如MoE、多模态等大模型，欢迎关注和使用。",{"type":18,"tag":26,"props":279,"children":280},{},[281],{"type":24,"value":282},"开源链接：",{"type":18,"tag":26,"props":284,"children":285},{},[286],{"type":18,"tag":89,"props":287,"children":290},{"href":288,"rel":289},"https://gitee.com/mindspore/transformer",[93],[291],{"type":24,"value":288},{"title":7,"searchDepth":293,"depth":293,"links":294},4,[],"markdown","content:technology-blogs:zh:1739.md","content","technology-blogs/zh/1739.md","technology-blogs/zh/1739","md",1776506115617]