[{"data":1,"prerenderedAt":579},["ShallowReactive",2],{"content-query-0Poyo2MnG1":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":573,"_id":574,"_source":575,"_file":576,"_stem":577,"_extension":578},"/version-updates/zh/3601","zh",false,"","昇思MindSpore 2.5版本正式发布，提升动态图执行性能，增强静态图泛化可用性，并降低大模型推理成本与时延","经过昇思MindSpore开源社区开发者们几个月的开发与贡献，现正式发布昇思MindSpore2.5版本。","2025-02-12","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/3ca71968112240329508415a2e90a0ee.png","version-updates",{"type":14,"children":15,"toc":536},"root",[16,24,29,34,39,44,49,60,71,76,81,89,100,105,116,121,126,133,138,151,162,167,178,183,190,195,204,215,225,230,235,242,247,254,264,273,278,283,288,295,300,307,318,323,332,337,342,347,356,361,366,375,380,391,400,405,410,417,422,431,436,441,451,460,471,476,483,488,498,509,514,521,526],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"昇思mindspore-25版本正式发布提升动态图执行性能增强静态图泛化可用性并降低大模型推理成本与时延",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":23,"value":9},{"type":17,"tag":25,"props":30,"children":31},{},[32],{"type":23,"value":33},"其中动态图补齐view和in-place功能并优化动态shape能力，提升动态图执行性能；通过完善图算融合，增强静态图O1模式的泛化可用性；新增支持不占资源的仿真模拟集群执行流程，提高调优效率，同时Atlas A2上的已有功能平滑迁移到超节点，并发挥互联优势，甜点场景性能可达2.9x Atlas A2，不断提升框架易用性。",{"type":17,"tag":25,"props":35,"children":36},{},[37],{"type":23,"value":38},"在大模型推理方面，金箍棒提供低比特权重量化和动态量化算法降低推理成本，并结合图算融合优化技术，降低推理整网时延提升吞吐量，同时支持DiT文生图模型以存代算及Gate算法降低端到端时延，实现大模型推理性能提升。",{"type":17,"tag":25,"props":40,"children":41},{},[42],{"type":23,"value":43},"在工具效率提升方面，msprobe工具新增分级可视化构图比对，实现快速分析精度问题，同时Profiler实现轻量化打点，支持集群场景问题快速定界。",{"type":17,"tag":25,"props":45,"children":46},{},[47],{"type":23,"value":48},"下面就为大家详细解读昇思2.5版本的关键特性。",{"type":17,"tag":50,"props":51,"children":53},"h3",{"id":52},"框架易用性提升",[54],{"type":17,"tag":55,"props":56,"children":57},"strong",{},[58],{"type":23,"value":59},"——框架易用性提升——",{"type":17,"tag":50,"props":61,"children":63},{"id":62},"_1-动态图补齐view和in-place功能提升tensor索引性能平均34倍",[64,66],{"type":23,"value":65},"1 ",{"type":17,"tag":55,"props":67,"children":68},{},[69],{"type":23,"value":70},"动态图补齐view和in-place功能，提升Tensor索引性能平均3.4倍",{"type":17,"tag":25,"props":72,"children":73},{},[74],{"type":23,"value":75},"在AI框架中，对Tensor（张量）的操作在普通计算类操作的基础上，存在两种特殊的操作：view（视图）操作 和 in-place（原地）操作。view操作是指创建一个新的张量，它与原始张量共享相同的数据存储，但具有不同的形状或排列方式，换句话说view操作不会复制数据，而是通过不同的视角来解释现有的数据，这使得view操作非常高效，因为它们避免了不必要的内存分配和数据复制；in-place操作是指直接修改输入张量的内容，而不创建新的张量，这种操作通常会在函数名后加上下划线_来表示它是in-place操作，例如，add_()是add()的in-place版本。",{"type":17,"tag":25,"props":77,"children":78},{},[79],{"type":23,"value":80},"Tensor的索引操作是建立在view和in-place操作之上的一个复杂操作，昇思MindSpore 2.5版本动态图已经补齐view操作和in-place操作能力，提升了Tensor的索引操作的性能，如下图所示，不同场景下Tensor索引性能平均提升3.4倍。",{"type":17,"tag":25,"props":82,"children":83},{},[84],{"type":17,"tag":85,"props":86,"children":88},"img",{"alt":7,"src":87},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/27b29f6e951547998b56ad11efc65047.png",[],{"type":17,"tag":50,"props":90,"children":92},{"id":91},"_2-反向完善动态shape能力提升动态图执行性能30",[93,95],{"type":23,"value":94},"2 ",{"type":17,"tag":55,"props":96,"children":97},{},[98],{"type":23,"value":99},"反向完善动态shape能力，提升动态图执行性能30%",{"type":17,"tag":25,"props":101,"children":102},{},[103],{"type":23,"value":104},"昇思MindSpore动态图在动态shape场景下，反向执行先构建完整的IR图，进而拆分成单算子进行执行，该流程下需要先构建整图再进行拆分，存在冗余的操作。针对该场景，昇思MindSpore2.5版本反向执行流程优化成建立逻辑连接关系，直接通过连接关系进行执行，从而优化冗余操作，提升动态shape场景下动态图执行性能，SDXL网络和OpenSora网络端到端提升30%。",{"type":17,"tag":50,"props":106,"children":108},{"id":107},"_3-完善图算融合增强静态图o1模式的泛化可用性",[109,111],{"type":23,"value":110},"3 ",{"type":17,"tag":55,"props":112,"children":113},{},[114],{"type":23,"value":115},"完善图算融合，增强静态图O1模式的泛化可用性",{"type":17,"tag":25,"props":117,"children":118},{},[119],{"type":23,"value":120},"昇思MindSpore2.3版本首次对外发布支持静态图O(n)多级编译，其中O1模式主要是在O0基础上增加了图算融合优化支持，用于对训练性能要求更高的模型场景。",{"type":17,"tag":25,"props":122,"children":123},{},[124],{"type":23,"value":125},"经过持续优化完善以及大范围测试验证，昇思MindSpore2.5版本中O1模式在泛化可用性上已经可以满足大部分场景需求。基于Atlas A2进行典型网络测试，如下图所示，使能O1模式可实现平均约10%的整网性能加速效果，具体收益跟网络结构、算子使用、张量Shape等相关。",{"type":17,"tag":25,"props":127,"children":128},{},[129],{"type":17,"tag":85,"props":130,"children":132},{"alt":7,"src":131},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/eccdcae523e246cb81dbf944df73c829.png",[],{"type":17,"tag":25,"props":134,"children":135},{},[136],{"type":23,"value":137},"当O1模式使能之后，图算融合可以在静态图编译过程中自动识别可融合子图并进行融合替换。相比手工融合，图算融合具有简单易用、泛化性好等优势。",{"type":17,"tag":25,"props":139,"children":140},{},[141,143],{"type":23,"value":142},"参考链接：",{"type":17,"tag":144,"props":145,"children":149},"a",{"href":146,"rel":147},"https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.JitConfig.html",[148],"nofollow",[150],{"type":23,"value":146},{"type":17,"tag":50,"props":152,"children":154},{"id":153},"_4-超节点功能平滑迁移全面发挥互联优势",[155,157],{"type":23,"value":156},"4 ",{"type":17,"tag":55,"props":158,"children":159},{},[160],{"type":23,"value":161},"超节点功能平滑迁移，全面发挥互联优势",{"type":17,"tag":25,"props":163,"children":164},{},[165],{"type":23,"value":166},"昇思MindSpore在Atlas A2上的已有功能无需修改即可平滑迁移到Atlas A3，充分发挥Atlas A3的硬件能力，全面支持模型的训练和推理流程。在超节点甜点场景上，昇思MindSpore使能高维张量并行、RingAttention等亲和特性，其中高维张量并行在通信节省基础上进一步实现超节点硬件亲和的Matmul计算shape切分策略优化、充分释放超节点硬件性能，支持千亿稠密Llama模型性能提升10%~20%。充分发挥互联优势，典型千亿稀疏模型长序列性能Atlas A3可达2.9x Atlas A2，支持M级tokens的序列长度训练。",{"type":17,"tag":50,"props":168,"children":170},{"id":169},"_5-新增支持不占资源的仿真模拟集群执行流程提高调优效率",[171,173],{"type":23,"value":172},"5 ",{"type":17,"tag":55,"props":174,"children":175},{},[176],{"type":23,"value":177},"新增支持不占资源的仿真模拟集群执行流程，提高调优效率",{"type":17,"tag":25,"props":179,"children":180},{},[181],{"type":23,"value":182},"通常在训练时为了提升设备的算力或显存利用率，往往需要反复调试并行策略、重计算、负载均衡等相关超参，对于动辄千卡万卡的大集群来说，这种反复调试成本是非常昂贵的。而昇思MindSpore2.5版本新增支持仿真模拟执行的功能，可以在不占用卡执行的情况下，直接模拟出任意卡的图编译结果以及显存占用情况，如下图所示，用户可以根据模拟执行的显存结果来调整上述超参，调整完后再复用到生产集群上，一键拉起大集群训练，从而减少调试过程中的资源占用，大幅提升调试调优效率。",{"type":17,"tag":25,"props":184,"children":185},{},[186],{"type":17,"tag":85,"props":187,"children":189},{"alt":7,"src":188},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/b8707730e21e45dd9081e1662529177c.png",[],{"type":17,"tag":25,"props":191,"children":192},{},[193],{"type":23,"value":194},"用户可以根据自己的需求，通过环境变量export MS_SIMULATION_LEVEL=0/1/2/3设置模拟执行的级别。0代表仅模型编译，用户可以关注模型编译时间；1在0的基础上记录了算子的输入、输出显存信息，用户可以结合显存统计或者memory_tracker进行显存分析；2在1的基础上增加了算子workspace显存信息，需要占用期望模拟的卡数；3在2的基础上增加了当前卡的计算算子执行过程，用户可以进行性能调优。",{"type":17,"tag":50,"props":196,"children":198},{"id":197},"大模型推理性能提升",[199],{"type":17,"tag":55,"props":200,"children":201},{},[202],{"type":23,"value":203},"——大模型推理性能提升——",{"type":17,"tag":50,"props":205,"children":207},{"id":206},"_6-金箍棒支持低比特权重量化和动态量化算法降低推理成本",[208,210],{"type":23,"value":209},"6 ",{"type":17,"tag":55,"props":211,"children":212},{},[213],{"type":23,"value":214},"金箍棒支持低比特权重量化和动态量化算法，降低推理成本",{"type":17,"tag":216,"props":217,"children":219},"h4",{"id":218},"_61-金箍棒新增awq和gptq算法并提供4bit权重量化推理能力缩减40推理时延和60参数量",[220],{"type":17,"tag":55,"props":221,"children":222},{},[223],{"type":23,"value":224},"6.1 金箍棒新增AWQ和GPTQ算法，并提供4bit权重量化推理能力，缩减40%推理时延和60%参数量",{"type":17,"tag":25,"props":226,"children":227},{},[228],{"type":23,"value":229},"AWQ（Activation-Aware Weight Quantization）是一种低比特权重量化算法，基于激活值分布挑选显著权重，并且考虑到硬件效率，通过缩放的方式来保护显著权重，实现了硬件友好的高精度权重量化算法；与AWQ算法功能相似，GPTQ（Gradient-based Post-training Quantization）算法也是一种低比特权重量化算法，其核心思想是对某个block内的所有参数逐个量化，每个参数量化后，需要适当调整这个block内其他未量化的参数，以弥补量化造成的精度损失。",{"type":17,"tag":25,"props":231,"children":232},{},[233],{"type":23,"value":234},"在昇思MindSpore2.5版本中，金箍棒复现了这两个权重量化算法，并优化了4bit权重量化的推理性能。我们在昇腾Atlas 800I A2硬件上使用8卡Tensor并行推理进行性能和精度测试，结果如下表所示，AWQ和GPTQ在BoolQ，SQuAD1.1和WikiText2数据集上实现了精度几乎无损的A16W4量化。",{"type":17,"tag":25,"props":236,"children":237},{},[238],{"type":17,"tag":85,"props":239,"children":241},{"alt":7,"src":240},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/0f91c1b8f4514121bb6482c477da470f.png",[],{"type":17,"tag":25,"props":243,"children":244},{},[245],{"type":23,"value":246},"针对Llama2 70B网络，如下图所示，4bit权重量化在batch_size小于8的情况下最多可以获得48.7%的时延收益；但当batch_size大于等于16时，受限于昇腾硬件架构，4ibt权重量化在时延上会产生负收益。",{"type":17,"tag":25,"props":248,"children":249},{},[250],{"type":17,"tag":85,"props":251,"children":253},{"alt":7,"src":252},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/8f8c6c32d98648b99bebd90c0fbd5cad.png",[],{"type":17,"tag":25,"props":255,"children":256},{},[257,258],{"type":23,"value":142},{"type":17,"tag":144,"props":259,"children":262},{"href":260,"rel":261},"https://gitee.com/mindspore/golden-stick/tree/master/mindspore_gs/ptq/ptq#gptq%E7%AE%97%E6%B3%95",[148],[263],{"type":23,"value":260},{"type":17,"tag":216,"props":265,"children":267},{"id":266},"_62-金箍棒新增激活动态量化算法提升8bit量化的精度",[268],{"type":17,"tag":55,"props":269,"children":270},{},[271],{"type":23,"value":272},"6.2 金箍棒新增激活动态量化算法，提升8bit量化的精度",{"type":17,"tag":25,"props":274,"children":275},{},[276],{"type":23,"value":277},"一些对精度十分敏感的模型和任务，即使通过SmoothQuant等异常值抑制技术仍然难以满足精度要求，此时可以通过牺牲一些8bit量化的性能收益，通过动态量化来进一步降低量化的精度损失。",{"type":17,"tag":25,"props":279,"children":280},{},[281],{"type":23,"value":282},"与权重量化不同，在离线量化阶段算法不能获取真实的激活值，只能通过校准集近似统计激活的分布，这会带来额外的量化饱和误差。激活动态量化是指在推理过程中实时统计激活的分布来进行量化推理，实现更小的量化精度损失。",{"type":17,"tag":25,"props":284,"children":285},{},[286],{"type":23,"value":287},"在昇思MindSpore2.5版本中，金箍棒提供了激活动态量化算法，结合权重静态量化和SmoothQuant异常值抑制技术，可以提供精度几乎无损的8bit量化能力。我们在昇腾Atlas 800I A2硬件上使用8卡Tensor并行进行测试，结果如下表所示，激活动态量化算法在C-Eval和SQuAD1.1数据集上实现精度无损的A8W8量化。",{"type":17,"tag":25,"props":289,"children":290},{},[291],{"type":17,"tag":85,"props":292,"children":294},{"alt":7,"src":293},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/38e434ef770542ccbdc29a3afd405dc8.png",[],{"type":17,"tag":25,"props":296,"children":297},{},[298],{"type":23,"value":299},"我们针对RMSNorm和DynamicQuant算子做了融合优化，在Llama2 57B网络，如下图所示，输入batch_size范围[1, 16]，seq_length范围[512, 2048]，获得端到端耗时缩减3.8%~8.1%。",{"type":17,"tag":25,"props":301,"children":302},{},[303],{"type":17,"tag":85,"props":304,"children":306},{"alt":7,"src":305},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/8c05198943b64ea29c32c3b5439008c9.png",[],{"type":17,"tag":50,"props":308,"children":310},{"id":309},"_7-结合图算融合优化技术降低推理整网时延提升吞吐量",[311,313],{"type":23,"value":312},"7 ",{"type":17,"tag":55,"props":314,"children":315},{},[316],{"type":23,"value":317},"结合图算融合优化技术，降低推理整网时延，提升吞吐量",{"type":17,"tag":25,"props":319,"children":320},{},[321],{"type":23,"value":322},"昇思MindSpore2.5版本使用图算融合技术针对性处理多类量化场景，减少额外量化操作带来的下发耗时和内存访问，并通过昇腾亲和的融合算子实现提升计算效率，与量化压缩、PrefillFlatten技术协同，配合Atlas A2及Atals推理系列产品达成降低整网时延、提升吞吐的优化效果。",{"type":17,"tag":216,"props":324,"children":326},{"id":325},"_71-融合隐藏量化计算开销整网加速3-8",[327],{"type":17,"tag":55,"props":328,"children":329},{},[330],{"type":23,"value":331},"7.1 融合隐藏量化计算开销，整网加速3-8%",{"type":17,"tag":25,"props":333,"children":334},{},[335],{"type":23,"value":336},"PagedAttention支持使用量化的KVCache作为输入，并在算子内融合反量化操作，利用昇腾硬件Cube/Vector特性实现多计算流水并行，使Cube/Vector计算时延互相掩盖，明显提升算子运行性能，相较浮点输入性能提升10%以上。",{"type":17,"tag":25,"props":338,"children":339},{},[340],{"type":23,"value":341},"同时我们提供以RmsNorm为主体的融合pattern，将前向Add和后向量化计算融合成更大的Vector算子，利用多路融合并行、UB Bank冲突优化、Inplace内存复用等关键技术，融合前后算子性能提升10-30%。上述融合同时支持Quant算子（Per Channel量化）和DynamicQuant算子（Per Token激活动态量化）。",{"type":17,"tag":25,"props":343,"children":344},{},[345],{"type":23,"value":346},"另外，此前版本已支持的MatMul并行融合及后向融合，在昇思MindSpore2.5版本中亦支持了量化数据类型的输入。",{"type":17,"tag":216,"props":348,"children":350},{"id":349},"_72-prefillflatten负载均衡吞吐量提升5",[351],{"type":17,"tag":55,"props":352,"children":353},{},[354],{"type":23,"value":355},"7.2 PrefillFlatten负载均衡，吞吐量提升5%",{"type":17,"tag":25,"props":357,"children":358},{},[359],{"type":23,"value":360},"大模型推理场景处理序列数据时，通常会使用填充（padding）来使序列长度一致以便于批处理，无法避免地会增加冗余计算量。",{"type":17,"tag":25,"props":362,"children":363},{},[364],{"type":23,"value":365},"昇思MindSpore2.5版本通过PrefillFlatten方法，将输入序列以真实长度进行拼接，无需填充到统一长度；针对这一优化，Attentoin计算模块（FlashAttention、PagedAttention、ApplyRotaryPosEmb算子）结合昇腾芯片特性，在算子内对输入序列真实长度进行排序，根据每个 Batch 的计算量动态分配不同的核数，确保较长的序列可以分摊到不同计算单元上，可在减少计算量的同时确保每个计算单元的负载均衡，同时优化Vector计算流水并行任务，提升UB利用率，进一步提高整体计算效率10%以上。",{"type":17,"tag":216,"props":367,"children":369},{"id":368},"_73-flexformat融合实现矩阵乘优化整网加速5-7",[370],{"type":17,"tag":55,"props":371,"children":372},{},[373],{"type":23,"value":374},"7.3 FlexFormat融合实现矩阵乘优化，整网加速5-7%",{"type":17,"tag":25,"props":376,"children":377},{},[378],{"type":23,"value":379},"Atlas推理硬件上Cube计算所需特殊Format与原生Format间的频繁转换会引入大量开销，且同一算子使用不同Format实现的性能亦有较大差异。针对此问题，昇思MindSpore2.5版本实现了更为灵活的Format选择及算子融合优化方案，结合高效的UB调度、Buffer复用等技术，使Cube计算可基本掩盖Format转换耗时，从而大幅提升性能。该方案优化支持浮点类型、量化类型及稀疏量化等多类MatMul算子，单算子融合前后性能提升3-40%，端到端性能提升5-7%。",{"type":17,"tag":50,"props":381,"children":383},{"id":382},"_8-支持dit文生图模型以存代算及gate算法降低端到端时延32",[384,386],{"type":23,"value":385},"8 ",{"type":17,"tag":55,"props":387,"children":388},{},[389],{"type":23,"value":390},"支持DiT文生图模型以存代算及Gate算法，降低端到端时延32%",{"type":17,"tag":216,"props":392,"children":394},{"id":393},"_81-以存代算算法降低注意力机制计算量端到端加速24",[395],{"type":17,"tag":55,"props":396,"children":397},{},[398],{"type":23,"value":399},"8.1 以存代算算法降低注意力机制计算量，端到端加速24%",{"type":17,"tag":25,"props":401,"children":402},{},[403],{"type":23,"value":404},"主流的文本生成图像模型采用了基于注意力机制的多步迭代扩散去噪方法。在多步迭代过程中，相邻的时间步骤间通常存在冗余计算，这些计算具有高度相似性。通过识别和复用这些冗余计算的结果，可以在保持精度的同时减少计算量。",{"type":17,"tag":25,"props":406,"children":407},{},[408],{"type":23,"value":409},"基于该思想并借鉴Delta-Cache的设计理念，昇思MindSpore 2.5版本引入了针对DiT文生图模型的以存代算（Cache）算法。如下图所示，该方法对前一个时间步（xt）中特定两个位置特征值（⑦-⑥）的偏移量进行缓存，并在下一个时间步（xt-1）中将这个偏移量直接应用到B1层的输入上，从而跳过了从B1到B3的计算过程。",{"type":17,"tag":25,"props":411,"children":412},{},[413],{"type":17,"tag":85,"props":414,"children":416},{"alt":7,"src":415},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/82dd7ac32f3d46a6b46ea2e8472f719e.png",[],{"type":17,"tag":25,"props":418,"children":419},{},[420],{"type":23,"value":421},"不同于Delta-Cache所采用的部署策略，我们对参数进行了重新调整，以更好地适应不同的模型结构。在SD3模型上的测试结果显示，当生成分辨率为1024*1024像素的图片时，该算法能够在精度几乎无损的前提下减少约24%的端到端推理延迟。",{"type":17,"tag":216,"props":423,"children":425},{"id":424},"_82-叠加gate算法推理额外加速10",[426],{"type":17,"tag":55,"props":427,"children":428},{},[429],{"type":23,"value":430},"8.2 叠加Gate算法，推理额外加速10%",{"type":17,"tag":25,"props":432,"children":433},{},[434],{"type":23,"value":435},"主流的文生图模型大多采用了无分类器引导（Classifier-Free Guidance, CFG）技术，其特点是在每次迭代过程中执行两次生成过程：一次是无条件引导，另一次是有条件的文本引导。实验发现，在迭代后期专注于提升图像质量的过程中，无条件引导对生成结果的影响相对较小。",{"type":17,"tag":25,"props":437,"children":438},{},[439],{"type":23,"value":440},"基于这一观察，昇思MindSpore 2.5版本引入了Gate算法，允许在采用CFG技术的文本生成图像模型中，在特定的时间步停用无条件引导计算。此外，Gate算法可以与前述的以存代算（Cache）算法协同使用，进一步优化性能。在SD3模型上应用Gate算法后，相较于仅使用以存代算（Cache）算法的情况，推理时延降低了额外的10%，达成总计约32%的推理加速。",{"type":17,"tag":25,"props":442,"children":443},{},[444,445],{"type":23,"value":142},{"type":17,"tag":144,"props":446,"children":449},{"href":447,"rel":448},"https://github.com/mindspore-lab/mindone/tree/master/examples/dit_infer_acceleration",[148],[450],{"type":23,"value":447},{"type":17,"tag":50,"props":452,"children":454},{"id":453},"工具效率提升",[455],{"type":17,"tag":55,"props":456,"children":457},{},[458],{"type":23,"value":459},"——工具效率提升——",{"type":17,"tag":50,"props":461,"children":463},{"id":462},"_9-msprobe工具新增分级可视化构图比对实现快速分析精度问题",[464,466],{"type":23,"value":465},"9 ",{"type":17,"tag":55,"props":467,"children":468},{},[469],{"type":23,"value":470},"msprobe工具新增分级可视化构图比对，实现快速分析精度问题",{"type":17,"tag":25,"props":472,"children":473},{},[474],{"type":23,"value":475},"针对大模型场景精度问题定位效率低，精度数据呈现不直观的问题，msprobe工具新增支持昇思MindSpore场景分级可视化构图比对，实现模型各个层级的精度数据比对，方便用户理解模型结构，快速分析精度问题。如下图所示，用户可以选择单图构建查看模型结构，也可以选择双图比对，实现昇思MindSpore与PyTorch的跨框架比对。可视化构图比对展示了模型的层级结构，并且每个节点都展示了输入输出数据信息、堆栈信息等，支持按节点名称搜索和按节点颜色进行精度筛选。",{"type":17,"tag":25,"props":477,"children":478},{},[479],{"type":17,"tag":85,"props":480,"children":482},{"alt":7,"src":481},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/cf641ef4e5a54e1796117149ce83d065.png",[],{"type":17,"tag":25,"props":484,"children":485},{},[486],{"type":23,"value":487},"msprobe工具支持基于模型结构的精度可视化对齐和分析，一键找到模型实现差异、精度异常节点，大大提升精度对比分析效率。",{"type":17,"tag":25,"props":489,"children":490},{},[491,492],{"type":23,"value":142},{"type":17,"tag":144,"props":493,"children":496},{"href":494,"rel":495},"https://gitee.com/ascend/mstt/tree/master/debug/accuracy_tools/msprobe#/ascend/mstt/blob/master/debug/accuracy_tools/msprobe/./docs/22.visualization_MindSpore.md",[148],[497],{"type":23,"value":494},{"type":17,"tag":50,"props":499,"children":501},{"id":500},"_10-profiler实现轻量化打点支持集群场景问题快速定界",[502,504],{"type":23,"value":503},"10 ",{"type":17,"tag":55,"props":505,"children":506},{},[507],{"type":23,"value":508},"Profiler实现轻量化打点，支持集群场景问题快速定界",{"type":17,"tag":25,"props":510,"children":511},{},[512],{"type":23,"value":513},"针对大集群场景传统Profiler流程重、数据量大的问题，昇思MindSpore2.5版本提供轻量化Profiler能力，帮助大集群场景轻量化获取模型关键指标性能数据。如下图所示，用户可通过mstx.mark、mstx.range_start、mstx.range_end接口自定义打点，同时支持通信算子的内置打点，用户开启轻量化打点功能，通讯算子前后将自动实现打点。所有的打点任务由runtime下发至device侧，可呈现打点任务在host侧和device侧的时间点或时间片。",{"type":17,"tag":25,"props":515,"children":516},{},[517],{"type":17,"tag":85,"props":518,"children":520},{"alt":7,"src":519},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/12/e7586ec9c1e04ec99cac8577d3154be8.png",[],{"type":17,"tag":25,"props":522,"children":523},{},[524],{"type":23,"value":525},"轻量化打点可支撑昇思MindSpore2.5大集群训练业务场景，提供大集群场景少量数据即可定位问题边界能力。",{"type":17,"tag":25,"props":527,"children":528},{},[529,530],{"type":23,"value":142},{"type":17,"tag":144,"props":531,"children":534},{"href":532,"rel":533},"https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.profiler.mstx.html?highlight=mstx#mindspore.profiler.mstx",[148],[535],{"type":23,"value":532},{"title":7,"searchDepth":537,"depth":537,"links":538},4,[539,541,543,545,547,549,551,552,557,563,568,569,571],{"id":52,"depth":540,"text":59},3,{"id":62,"depth":540,"text":542},"1 动态图补齐view和in-place功能，提升Tensor索引性能平均3.4倍",{"id":91,"depth":540,"text":544},"2 反向完善动态shape能力，提升动态图执行性能30%",{"id":107,"depth":540,"text":546},"3 完善图算融合，增强静态图O1模式的泛化可用性",{"id":153,"depth":540,"text":548},"4 超节点功能平滑迁移，全面发挥互联优势",{"id":169,"depth":540,"text":550},"5 新增支持不占资源的仿真模拟集群执行流程，提高调优效率",{"id":197,"depth":540,"text":203},{"id":206,"depth":540,"text":553,"children":554},"6 金箍棒支持低比特权重量化和动态量化算法，降低推理成本",[555,556],{"id":218,"depth":537,"text":224},{"id":266,"depth":537,"text":272},{"id":309,"depth":540,"text":558,"children":559},"7 结合图算融合优化技术，降低推理整网时延，提升吞吐量",[560,561,562],{"id":325,"depth":537,"text":331},{"id":349,"depth":537,"text":355},{"id":368,"depth":537,"text":374},{"id":382,"depth":540,"text":564,"children":565},"8 支持DiT文生图模型以存代算及Gate算法，降低端到端时延32%",[566,567],{"id":393,"depth":537,"text":399},{"id":424,"depth":537,"text":430},{"id":453,"depth":540,"text":459},{"id":462,"depth":540,"text":570},"9 msprobe工具新增分级可视化构图比对，实现快速分析精度问题",{"id":500,"depth":540,"text":572},"10 Profiler实现轻量化打点，支持集群场景问题快速定界","markdown","content:version-updates:zh:3601.md","content","version-updates/zh/3601.md","version-updates/zh/3601","md",1776506145416]