[{"data":1,"prerenderedAt":380},["ShallowReactive",2],{"content-query-WvzKdTmo77":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":374,"_id":375,"_source":376,"_file":377,"_stem":378,"_extension":379},"/version-updates/zh/3811","zh",false,"","昇思MindSpore 2.7版本正式发布，支持ZeroBubbleV流水线并行调度提升训练效率，升级适配vLLM V1架构，采用组合优化提升DeepSeek-V3推理性能","经过昇思MindSpore开源社区开发者们几个月的开发与贡献，现正式发布昇思MindSpore2.7版本。","2025-08-13","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/6800ef8c1b844b85813d48c67ec25046.png","version-updates",{"type":14,"children":15,"toc":361},"root",[16,24,29,34,39,44,49,54,64,74,79,87,100,109,114,119,124,131,140,149,154,159,164,169,174,179,186,196,205,214,219,226,235,240,247,256,261,266,273,282,287,297,306,315,320,325,330,340,349,354],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"昇思mindspore-27版本正式发布支持zerobubblev流水线并行调度提升训练效率升级适配vllm-v1架构采用组合优化提升deepseek-v3推理性能",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":23,"value":9},{"type":17,"tag":25,"props":30,"children":31},{},[32],{"type":23,"value":33},"在大模型训练性能提升方面，新增支持ZeroBubbleV流水线并行调度进一步降低bubble耗时，并创新性实现重计算通信掩盖技术，提升大模型重计算部分训练效率；",{"type":17,"tag":25,"props":35,"children":36},{},[37],{"type":23,"value":38},"在生态兼容扩展方面，升级适配vLLM v0.8.3版本和V1架构，并采用多项组合优化显著提升DeepSeek-V3推理性能；",{"type":17,"tag":25,"props":40,"children":41},{},[42],{"type":23,"value":43},"在强化学习训推性能提升方面，支持推理均匀采样、动态packing训练，提升吞吐效率，同时支持6D并行权重重排技术，实现任意模型并行策略下的权重重排，以及支持强化学习断点续训，实现灵活的训练调试、部署；",{"type":17,"tag":25,"props":45,"children":46},{},[47],{"type":23,"value":48},"在工具效率提升方面，提供了在线监控平台（msMonitor），能够实现快速性能诊断，同时msprobe工具支持静态图模块级Dump与自动比对，提升问题定位效率；",{"type":17,"tag":25,"props":50,"children":51},{},[52],{"type":23,"value":53},"下面就为大家详细解读昇思2.7版本的关键特性。",{"type":17,"tag":18,"props":55,"children":57},{"id":56},"大模型训练性能提升",[58],{"type":17,"tag":59,"props":60,"children":61},"strong",{},[62],{"type":23,"value":63},"——大模型训练性能提升——",{"type":17,"tag":65,"props":66,"children":68},"h2",{"id":67},"_1-支持zerobubblev流水线并行调度进一步降低bubble耗时实现更高比例的计算通信掩盖",[69],{"type":17,"tag":59,"props":70,"children":71},{},[72],{"type":23,"value":73},"1 支持ZeroBubbleV流水线并行调度，进一步降低bubble耗时，实现更高比例的计算通信掩盖",{"type":17,"tag":25,"props":75,"children":76},{},[77],{"type":23,"value":78},"流水线并行是大规模分布式训练的常用并行方式，但是流水线并行会不可避免的引入bubble，从而降低设备利用率。昇思MindSpore2.7版本新增支持ZeroBubbleV流水线调度方案，如下图所示，将dx和dw的计算分离，并把dw填充到bubble中进行计算，进一步缩小了机器的空闲时间，并在正反向交替执行阶段支持1B1F融合掩盖，提升训练效率。",{"type":17,"tag":25,"props":80,"children":81},{},[82],{"type":17,"tag":83,"props":84,"children":86},"img",{"alt":7,"src":85},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/9e03635a7f3043d09624f2264d4d1bf9.png",[],{"type":17,"tag":25,"props":88,"children":89},{},[90,92],{"type":23,"value":91},"参考链接：",{"type":17,"tag":93,"props":94,"children":98},"a",{"href":95,"rel":96},"https://www.mindspore.cn/docs/zh-CN/master/features/parallel/pipeline_parallel.html",[97],"nofollow",[99],{"type":23,"value":95},{"type":17,"tag":65,"props":101,"children":103},{"id":102},"_2-实现重计算通信掩盖技术提升大模型重计算部分训练效率",[104],{"type":17,"tag":59,"props":105,"children":106},{},[107],{"type":23,"value":108},"2 实现重计算通信掩盖技术，提升大模型重计算部分训练效率",{"type":17,"tag":25,"props":110,"children":111},{},[112],{"type":23,"value":113},"随着Transformer模型层数突破千级，传统训练模式面临两大核心挑战：1) 显存爆炸：反向传播需存储中间激活值，百层网络缓存占用超400GB; 2) 通信阻塞：分布式训练中通信占比超50%，计算利用率不足60%。",{"type":17,"tag":25,"props":115,"children":116},{},[117],{"type":23,"value":118},"为此，针对显存开销，业界往往选用重计算方案来大幅消减激活值开销，选用完全重计算可以最大化的降低激活值显存开销，但是此时重计算本身开销也较大。",{"type":17,"tag":25,"props":120,"children":121},{},[122],{"type":23,"value":123},"昇思MindSpore2.7版本创新性实现了重计算通信掩盖架构，通过分组流水线机制提升重计算部分的效率，如下图所示，将重计算层动态划分为两两一组（如Layer1、Layer2为A组，Layer3、Layer4为B组），每组内实现两个重计算Layer之间的计算与通信的掩盖，实现重计算模块性能提升15%。",{"type":17,"tag":25,"props":125,"children":126},{},[127],{"type":17,"tag":83,"props":128,"children":130},{"alt":7,"src":129},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/f82b323c7b684a72beff729d7a8deaee.png",[],{"type":17,"tag":18,"props":132,"children":134},{"id":133},"生态兼容扩展",[135],{"type":17,"tag":59,"props":136,"children":137},{},[138],{"type":23,"value":139},"——生态兼容扩展——",{"type":17,"tag":65,"props":141,"children":143},{"id":142},"_3-升级适配vllm-v083版本和v1架构组合优化显著提升deepseek-v3推理性能",[144],{"type":17,"tag":59,"props":145,"children":146},{},[147],{"type":23,"value":148},"3 升级适配vLLM v0.8.3版本和V1架构，组合优化显著提升DeepSeek-V3推理性能",{"type":17,"tag":25,"props":150,"children":151},{},[152],{"type":23,"value":153},"昇思MindSpore 2.7版本配套的vLLM-MindSpore插件，升级适配了vLLM v0.8.3版本，支持V0和V1架构，新增支持了Prefix Caching、Chunked Prefill、Multi-step Scheduling、MTP、Multi-LoRA等服务化特性。同时，改用动态图（PyNative）和JIT（Just-In-Time）编译实现接入vLLM后端的昇思MindSpore大模型，显著提升了推理后处理性能。",{"type":17,"tag":25,"props":155,"children":156},{},[157],{"type":23,"value":158},"同时，昇思MindSpore采用多项优化组合，显著提升了DeepSeek-V3/R1为代表的稀疏MoE大模型推理性能：",{"type":17,"tag":25,"props":160,"children":161},{},[162],{"type":23,"value":163},"1. 混合并行：支持面向Attention和MoE单元，分别部署张量并行（TP）、数据并行（DP）、专家并行（EP）组合方案，可提升DeepSeek-V3/R1多请求吞吐性能35%+。",{"type":17,"tag":25,"props":165,"children":166},{},[167],{"type":23,"value":168},"2. 推理融合算子：新增接入MoeInitRoutingQuant、MultiLatentAttention等面向稀疏MoE计算的融合算子，及Combine/Dispatch等通信优化算子，降低算子下发和等待时延。",{"type":17,"tag":25,"props":170,"children":171},{},[172],{"type":23,"value":173},"3. 模型量化：改进了W8A8静态量化，降低量化推理的运行时开销，并提升Function Call等场景的精度。新增支持W4A16量化推理，支持单台Atlas 800I A2 (64GB)服务器部署DeepSeek-V3/R1模型。",{"type":17,"tag":25,"props":175,"children":176},{},[177],{"type":23,"value":178},"通过叠加上述性能优化技术，2台Atlas 800I A2（64GB）部署DeepSeek-R1/V3 W8A8量化推理，在Decode时延≤100ms约束条件下，256序列输入/输出非首Token吞吐可达2600token/s以上，进入开源推理方案第一梯队。",{"type":17,"tag":25,"props":180,"children":181},{},[182],{"type":17,"tag":83,"props":183,"children":185},{"alt":7,"src":184},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/cfb1d7d1254649938f650edaf7c94d24.png",[],{"type":17,"tag":25,"props":187,"children":188},{},[189,190],{"type":23,"value":91},{"type":17,"tag":93,"props":191,"children":194},{"href":192,"rel":193},"https://www.mindspore.cn/vllm_mindspore/docs/zh-CN/master/index.html",[97],[195],{"type":23,"value":192},{"type":17,"tag":18,"props":197,"children":199},{"id":198},"强化学习训推性能提升",[200],{"type":17,"tag":59,"props":201,"children":202},{},[203],{"type":23,"value":204},"——强化学习训推性能提升——",{"type":17,"tag":65,"props":206,"children":208},{"id":207},"_4-支持推理均匀采样提高数据生成端到端吞吐",[209],{"type":17,"tag":59,"props":210,"children":211},{},[212],{"type":23,"value":213},"4 支持推理均匀采样，提高数据生成端到端吞吐",{"type":17,"tag":25,"props":215,"children":216},{},[217],{"type":23,"value":218},"传统的强化学习经验收集过程中，不同的dp处理自己对应的问题，但是由于不同问题之间的回答长度是严重不均衡的，所以会引起较大的“等待空泡“，从而导致端到端性能差，甚至引起进程因等待时间过长而中断的问题。MindSpore RLHF最新版本中采用了推理均匀采样技术，如下图所示，我们将不同问题均匀分配在各个dp上，从分布上尽量让各个推理实例的负载尽量均衡，从而可以大量减少不同dp之间的等待时间，使得系统能更加稳定运行，在推理性能上约有5-10%的收益。",{"type":17,"tag":25,"props":220,"children":221},{},[222],{"type":17,"tag":83,"props":223,"children":225},{"alt":7,"src":224},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/30840b537a704abf8ff224574416dc7d.png",[],{"type":17,"tag":65,"props":227,"children":229},{"id":228},"_5-支持强化学习动态packing训练实现训练吞吐翻倍",[230],{"type":17,"tag":59,"props":231,"children":232},{},[233],{"type":23,"value":234},"5 支持强化学习动态packing训练，实现训练吞吐翻倍",{"type":17,"tag":25,"props":236,"children":237},{},[238],{"type":23,"value":239},"在预训练和微调阶段，packing训练已经被证明可以较快地提升端到端训练速度，并且对训练的效果的影响甚微。在MindSpore RLHF最新版本中提出了算法等价的动态packing训练方案，如下图所示，其主要功能是在loss函数等价的前提下，会根据预先设置的“最大pack长度”和“最大pack数量”，动态地将样本拼接到一起，从而可以最大化缩减样本的数量，且不对样本做截断（可能会影响样本质量）。在实际测试中，动态packing可以减少40%左右的训练时间，且训练效果没有下降。",{"type":17,"tag":25,"props":241,"children":242},{},[243],{"type":17,"tag":83,"props":244,"children":246},{"alt":7,"src":245},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/d9b75c59dfd848d4816b511aadc76065.png",[],{"type":17,"tag":65,"props":248,"children":250},{"id":249},"_6-支持6d并行权重重排技术实现任意模型并行策略下的权重重排",[251],{"type":17,"tag":59,"props":252,"children":253},{},[254],{"type":23,"value":255},"6 支持6D并行权重重排技术，实现任意模型并行策略下的权重重排",{"type":17,"tag":25,"props":257,"children":258},{},[259],{"type":23,"value":260},"MindSpore RLHF权重重排通过昇思MindSpore训练网络的Layout信息和推理网络的Layout信息来进行重排布算子列表的推导。Layout信息是指通过昇思MindSpore内部一种表达Tensor排布的方式，通过设备矩阵Device Matrix以及分片的映射Tensor Map来描述当前权重在哪些维度被切分，各个切片分部到哪些Rank上。Layout信息可以表达现有的通用并行策略，如DP、TP、CP、PP、优化器并行、EP等6D并行。",{"type":17,"tag":25,"props":262,"children":263},{},[264],{"type":23,"value":265},"如下图所示，当前的6D并行的权重重排方案，相比于业界case-by-case手写的重排，在泛化性上有较大优势，只要能通过昇思MindSpore自带的Layout来表达的切分策略，都可以通过一段统一代码进行重排列表的推导并执行，完成在线重排布。",{"type":17,"tag":25,"props":267,"children":268},{},[269],{"type":17,"tag":83,"props":270,"children":272},{"alt":7,"src":271},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/69232a518a734b56ab91b75c3c7a4fa3.png",[],{"type":17,"tag":65,"props":274,"children":276},{"id":275},"_7-支持强化学习断点续训实现灵活的训练调试部署",[277],{"type":17,"tag":59,"props":278,"children":279},{},[280],{"type":23,"value":281},"7 支持强化学习断点续训，实现灵活的训练调试、部署",{"type":17,"tag":25,"props":283,"children":284},{},[285],{"type":23,"value":286},"在强化学习中，随着模型规模和集群规模增大，训练也会像微调和预训练那样可能出现训练中断情况，因此。构建针对强化学习的断点续训就非常重要了。在MindSpore RLHF最新版本中实现了针对强化学习场景的断点续训，在推理、训练、reference模型的加载上，我们采用的策略是，首先加载训练的权重和优化器权重，保证其训练状态一致；然后利用训练权重和6D权重重排技术，将权重排布到推理和ref模型上，从而极大减少模型加载的IO时间。当前实现的断点续训功能可以完全对接中断前的loss和训练状态，实现真正意义上的“续训”。",{"type":17,"tag":25,"props":288,"children":289},{},[290,291],{"type":23,"value":91},{"type":17,"tag":93,"props":292,"children":295},{"href":293,"rel":294},"https://gitee.com/mindspore/mindrlhf",[97],[296],{"type":23,"value":293},{"type":17,"tag":18,"props":298,"children":300},{"id":299},"工具效率提升",[301],{"type":17,"tag":59,"props":302,"children":303},{},[304],{"type":23,"value":305},"——工具效率提升——",{"type":17,"tag":65,"props":307,"children":309},{"id":308},"_8-提供在线监控平台msmonitor实现快速性能诊断",[310],{"type":17,"tag":59,"props":311,"children":312},{},[313],{"type":23,"value":314},"8 提供在线监控平台（msMonitor），实现快速性能诊断",{"type":17,"tag":25,"props":316,"children":317},{},[318],{"type":23,"value":319},"在AI计算领域，随着模型规模不断扩大，训练性能优化已成为开发者面临的关键挑战。特别是在大规模分布式训练场景下，传统性能监测方案存在明显不足：其一，采用被动式监测策略，往往在性能抖动发生后才能触发数据采集，导致问题定位存在显著延迟；其二，面对训练过程中产生的海量性能数据（通常达数百GB量级），传统方案的解析和转储效率低下，进一步延长了问题诊断周期。这些缺陷不仅影响排障效率，更会造成计算资源浪费。",{"type":17,"tag":25,"props":321,"children":322},{},[323],{"type":23,"value":324},"昇思MindSpore2.7版本新增了MindSpore Profiler接入在线监控平台功能，用户在使用MindSpore Profiler框架集群训练场景下能够通过在线监控平台的monitor功能（常态监测）实时观察到训练的性能劣化点，实现性能问题的初步定位，后续可以通过在线监控平台的Profiler trace dump功能（精准采集）采集完整的性能数据，分析、定位性能瓶颈点，从而帮助开发者实现更高效的模型性能优化。",{"type":17,"tag":25,"props":326,"children":327},{},[328],{"type":23,"value":329},"通过 \"常态监测+精准采集\" 的组合策略，msMonitor既能满足集群长稳训练时的实时监测需求，又能针对性能瓶颈进行定向分析，显著提升模型训练效率。",{"type":17,"tag":25,"props":331,"children":332},{},[333,334],{"type":23,"value":91},{"type":17,"tag":93,"props":335,"children":338},{"href":336,"rel":337},"https://gitee.com/ascend/mstt/tree/master/msmonitor",[97],[339],{"type":23,"value":336},{"type":17,"tag":65,"props":341,"children":343},{"id":342},"_9-msprobe工具支持静态图模块级dump与自动比对提升问题定位效率",[344],{"type":17,"tag":59,"props":345,"children":346},{},[347],{"type":23,"value":348},"9 msprobe工具支持静态图模块级Dump与自动比对，提升问题定位效率",{"type":17,"tag":25,"props":350,"children":351},{},[352],{"type":23,"value":353},"针对大模型场景下静态图精度定位困难问题，msprobe工具新增支持静态图模块级Dump与自动比对功能。如下图所示，首先通过msprobe工具用户可对网络模块的正反向输入输出进行真实数据Dump或max、min、mean、l2norm统计量Dump；进而根据mapping文件建立MindSpore Transformers与Megatron网络模块名称的对应关系；最后，借助可视化工具可将网络层级关系直观展现出来并计算出与标杆数据的相关精度指标，方便快速找到产生精度问题的模块，有效提升昇思MindSpore静态图下同框架与跨框架精度问题定位效率。",{"type":17,"tag":25,"props":355,"children":356},{},[357],{"type":17,"tag":83,"props":358,"children":360},{"alt":7,"src":359},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/08/13/f93f571d008f4ea89e17960b24886a33.png",[],{"title":7,"searchDepth":362,"depth":362,"links":363},4,[364,366,367,368,369,370,371,372,373],{"id":67,"depth":365,"text":73},2,{"id":102,"depth":365,"text":108},{"id":142,"depth":365,"text":148},{"id":207,"depth":365,"text":213},{"id":228,"depth":365,"text":234},{"id":249,"depth":365,"text":255},{"id":275,"depth":365,"text":281},{"id":308,"depth":365,"text":314},{"id":342,"depth":365,"text":348},"markdown","content:version-updates:zh:3811.md","content","version-updates/zh/3811.md","version-updates/zh/3811","md",1776506145535]