[{"data":1,"prerenderedAt":369},["ShallowReactive",2],{"content-query-3Em2q2dydD":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":363,"_id":364,"_source":365,"_file":366,"_stem":367,"_extension":368},"/technology-blogs/zh/2026-2-9","zh",false,"","MindSpore Shard：深度解读算子级工具","深入理解 MindSpore 的算子级任务","2026-2-9","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/11/28/8e0e0150508a4c5ba4287fa3bec8ea3f.png","technology-blogs","技术解读",{"type":15,"children":16,"toc":347},"root",[17,25,31,36,43,48,63,68,73,79,84,95,100,105,123,128,133,140,145,158,164,169,182,187,192,205,211,216,221,226,239,249,254,259,272,280,285,290,295,300,308,314,320,325,331,336,342],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"mindspore-shard深度解读算子级工具",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"如果把训练大模型比作指挥去庞大的军队，那么MindSpore的shard接口就是你手中的指挥棒。它让你跳过繁琐的粗俗，直接告诉系统：“这个关键阵地（算子），我用这种细节的特定队形（切分策略）去攻克！”",{"type":18,"tag":26,"props":32,"children":33},{},[34],{"type":24,"value":35},"本指南将带你深入理解 MindSpore 的算子级任务（Operator-level Parallelism），不仅知其然，更知其所以然。",{"type":18,"tag":37,"props":38,"children":40},"h2",{"id":39},"_01-为什么需要shard",[41],{"type":24,"value":42},"01 为什么需要Shard？",{"type":18,"tag":26,"props":44,"children":45},{},[46],{"type":24,"value":47},"在工具训练的世界里，通常有两种补充：",{"type":18,"tag":49,"props":50,"children":51},"ul",{},[52,58],{"type":18,"tag":53,"props":54,"children":55},"li",{},[56],{"type":24,"value":57},"辅助驾驶（自动并行）：你什么都不用管，系统决定帮你怎么切分模型。虽然方便，但成本模型（代价模型）并不总是完美的，有时它选择的“路”虽然理论代价低，但并不符合你的专家直觉。",{"type":18,"tag":53,"props":59,"children":60},{},[61],{"type":24,"value":62},"纯手动挡（数据并行）：你可以简单地把数据分给各张卡，模型本身不动。这对于超大模型来说，显存根本不够用。",{"type":18,"tag":26,"props":64,"children":65},{},[66],{"type":24,"value":67},"碎片就像是半自动驾驶。你拥有“专家介入”的权力——对于那些你最了解、最关键的算子（比如巨大的矩阵乘法），你可以手动指定切分方式；而对于剩下的琐碎算子，则相当于MindSpore自动推导。",{"type":18,"tag":26,"props":69,"children":70},{},[71],{"type":24,"value":72},"一句话总结：shard让你在“操控力”和“便利性”之间找到了完美的平衡。",{"type":18,"tag":37,"props":74,"children":76},{"id":75},"_02-深度揭密从策略到执行的旅程",[77],{"type":24,"value":78},"02 深度揭密：从策略到执行的旅程",{"type":18,"tag":26,"props":80,"children":81},{},[82],{"type":24,"value":83},"shard不是简单地给算子打个标签，它触发了MindSpore内部一套复杂的策略传播与图编译机制。让我们拆解一下这个过程。",{"type":18,"tag":85,"props":86,"children":88},"div",{"style":87},"text-align: center;",[89],{"type":18,"tag":90,"props":91,"children":94},"img",{"src":92,"style":93,"alt":7},"/category/information/technology-blogs/banner/2026-2-9/1.jpg","display: block;margin: 0 auto;max-width:70%",[],{"type":18,"tag":26,"props":96,"children":97},{},[98],{"type":24,"value":99},"2.1第一阶段：锚点入户（Anchor Injection）",{"type":18,"tag":26,"props":101,"children":102},{},[103],{"type":24,"value":104},"当你调用shard(fn, in_strategy)时，你实际上是在计算图上打出的几个坚不可摧的节点。",{"type":18,"tag":49,"props":106,"children":107},{},[108,113,118],{"type":18,"tag":53,"props":109,"children":110},{},[111],{"type":24,"value":112},"绝对权威：这些被shard标记的子，其输入和输出的张量布局（Layout）被永久锁定。在C++的图优化阶段，系统会识别这些原算语（prim::kPrimShard），并通过SetInputLayout并将SetOutputLayout策略绑定到对应的图节点上。",{"type":18,"tag":53,"props":114,"children":115},{},[116],{"type":24,"value":117},"硬约束：在后续的所有优化过程中，无论系统觉得其他策略如何诱导（比如通信开销更小），它都绝不敢修改你指定的策略。这就相当于你在地图上钉钉下的子，路怎么修都，但有几点必须经过可以。",{"type":18,"tag":53,"props":119,"children":120},{},[121],{"type":24,"value":122},"种子池（Seed Pool）：这些成功设置了策略的算子会被存入一个特殊的集合（configured_ops），它们将成为下一阶段算法的根本驱动源。",{"type":18,"tag":26,"props":124,"children":125},{},[126],{"type":24,"value":127},"2.2 第二阶段：切分传播（Sharding Propagation）",{"type":18,"tag":26,"props":129,"children":130},{},[131],{"type":24,"value":132},"这是shard机制的核心魔法。系统不仅要满足你的要求，还要让整个图“跑通”。MindSpore使用了一种基于BFS（广度优先搜索）的传播算法来实现这一目标。",{"type":18,"tag":134,"props":135,"children":137},"h3",{"id":136},"循环效应",[138],{"type":24,"value":139},"循环效应：",{"type":18,"tag":26,"props":141,"children":142},{},[143],{"type":24,"value":144},"算法从种子池中的算子开始，向图的完成扩散（BFS）：",{"type":18,"tag":49,"props":146,"children":147},{},[148,153],{"type":18,"tag":53,"props":149,"children":150},{},[151],{"type":24,"value":152},"顺流传播（Forward）：遍历当前算子的输出边。如果上游算子B未配置策略，系统会根据上游算子A的输出布局，为B选择一个最匹配的输入策略。",{"type":18,"tag":53,"props":154,"children":155},{},[156],{"type":24,"value":157},"逆流传播（Backward）：遍历当前算子C的输入边。如果上游算子C未配置策略，系统会根据下游算子D的输入需求，反向推导C的策略输出。",{"type":18,"tag":134,"props":159,"children":161},{"id":160},"最小代价决策greedy-cost-minimization",[162],{"type":24,"value":163},"最小代价决策（Greedy Cost Minimization）：",{"type":18,"tag":26,"props":165,"children":166},{},[167],{"type":24,"value":168},"在传播过程中，当系统需要为相邻的未配置算子选择策略时，它遵循一个核心原则：最小化重排布代价。",{"type":18,"tag":49,"props":170,"children":171},{},[172,177],{"type":18,"tag":53,"props":173,"children":174},{},[175],{"type":24,"value":176},"零通信优先：如果存在一种策略，使得数据不需要在卡间传输能够直接被下游使用（布局完全匹配），那么就不必犹豫地选择它。",{"type":18,"tag":53,"props":178,"children":179},{},[180],{"type":24,"value":181},"最小通信次之：如果必须传输，则计算所有备选策略的重排布代价（Redistribution Cost），选择通信量最小的那个。",{"type":18,"tag":26,"props":183,"children":184},{},[185],{"type":24,"value":186},"2.3 第三阶段：与桥梁建设的冲突解决（Conflict Resolution）",{"type":18,"tag":26,"props":188,"children":189},{},[190],{"type":24,"value":191},"现实往往不完美。如果你的策略和模型的自然结构发生了冲突，或者你手动指定了两个相邻算子使用完全不同的策略，会发生什么？",{"type":18,"tag":49,"props":193,"children":194},{},[195,200],{"type":18,"tag":53,"props":196,"children":197},{},[198],{"type":24,"value":199},"自动插入转换器（Redistribution）：\nMindSpore不会报错，而是会充当“和事佬”。\n假设上游算子A是“行切分”数据，而你强转换行下游规定算子B必须接收“负责列切分”数据。\n系统会自动在A和B之间插入一组通信算子（如AllToAll ）。这组算子在运行时把AllGather数据Permute从卡A搬运到卡B，完成布局的。",{"type":18,"tag":53,"props":201,"children":202},{},[203],{"type":24,"value":204},"代价权衡：\n虽然系统能解决冲突，但转换是有代价的（时间、带宽）。shard的艺术就在于：不仅要指定策略，还要尽量减少这种不必要的转换。",{"type":18,"tag":37,"props":206,"children":208},{"id":207},"_03-实战心法如何用好shard",[209],{"type":24,"value":210},"03 实战心法：如何用好Shard？",{"type":18,"tag":26,"props":212,"children":213},{},[214],{"type":24,"value":215},"理解了原理，我们来看看怎么用。这里不罗列代码，而是讲“心法”。",{"type":18,"tag":26,"props":217,"children":218},{},[219],{"type":24,"value":220},"3.1 心法一：抓住“大鱼”，放过“虾米”",{"type":18,"tag":26,"props":222,"children":223},{},[224],{"type":24,"value":225},"不要尝试再次给每个算子都分片。",{"type":18,"tag":49,"props":227,"children":228},{},[229,234],{"type":18,"tag":53,"props":230,"children":231},{},[232],{"type":24,"value":233},"大鱼：计算量大、参数多的算子（如MatMul，Conv2D）。这些是性能极限，值得你手动优化（比如做模型玩具）。",{"type":18,"tag":53,"props":235,"children":236},{},[237],{"type":24,"value":238},"虾米：激活函数（ReLU）、逐元素操作（Add）。这些算子计算极快，通常紧随上游策略即可（数据工具），不需要你操心。",{"type":18,"tag":240,"props":241,"children":243},"pre",{"code":242},"# [实战示例] 抓大放小\nclass Net(nn.Cell):\n    def __init__(self):\n        super().__init__()\n        self.dense = nn.Dense(64, 64) # 大鱼：矩阵乘法\n        self.relu = nn.ReLU()         # 虾米：激活函数\n    \n    def construct(self, x):\n        # 1. 只有 Dense 这种重计算算子值得我们手动切分\n        # 我们给它配置模型并行策略（假设4卡，参数切4份）\n        x = shard(self.dense, in_strategy=((4, 1),), parameter_plan={\"self.dense.weight\": (1, 4)})(x)\n        \n        # 2. ReLU 很轻，不要管它。\n        # MindSpore 会自动推导：既然上游 Dense 输出了切分后的数据，ReLU 就直接复用这个策略，零通信代价！\n        x = self.relu(x) \n        return x\n",[244],{"type":18,"tag":245,"props":246,"children":247},"code",{"__ignoreMap":7},[248],{"type":24,"value":242},{"type":18,"tag":26,"props":250,"children":251},{},[252],{"type":24,"value":253},"3.2 心法二：顺势而为，减少“搬运”",{"type":18,"tag":26,"props":255,"children":256},{},[257],{"type":24,"value":258},"设计策略时，要顺应数据的流动方向。",{"type":18,"tag":49,"props":260,"children":261},{},[262,267],{"type":18,"tag":53,"props":263,"children":264},{},[265],{"type":24,"value":266},"坏情况：第一层用模型任务（切参数），第二层突然强行切回数据任务（切Batch），第三层又切回模型任务。这会导致每层之间都处于疯狂通信（AllToAll），训练极慢。",{"type":18,"tag":53,"props":268,"children":269},{},[270],{"type":24,"value":271},"好案例：连续的几个层都保持模型工具，直到必须聚合时（比如损失计算前）再统一转回数据工具。",{"type":18,"tag":240,"props":273,"children":275},{"code":274},"# [实战示例] 顺势而为\n# 假设我们构建一个多层感知机 (MLP)\nclass MLP(nn.Cell):\n    def __init__(self):\n        super().__init__()\n        self.fc1 = nn.Dense(128, 128)\n        self.fc2 = nn.Dense(128, 128)\n    \n    def construct(self, x):\n        # Good: 连续使用模型并行，中间不需要转来转去\n        # 第一层：输入(4,1) -> 权重(1,4) -> 输出(4,1) [注意：MatMul会自动推导输出策略]\n        x = shard(self.fc1, in_strategy=((4, 1),), parameter_plan={\"self.fc1.weight\": (1, 4)})(x)\n       \n        # 第二层：继续接收(4,1)的输入。因为上游输出是(4,1)，这里不需要任何通信！\n        x = shard(self.fc2, in_strategy=((4, 1),), parameter_plan={\"self.fc2.weight\": (1, 4)})(x)\n        return x\n",[276],{"type":18,"tag":245,"props":277,"children":278},{"__ignoreMap":7},[279],{"type":24,"value":274},{"type":18,"tag":26,"props":281,"children":282},{},[283],{"type":24,"value":284},"3.3 心法三：利用布局提升优势",{"type":18,"tag":26,"props":286,"children":287},{},[288],{"type":24,"value":289},"不要在代码里写满(4, 1), (8, 1)这种数字天书。使用Layout给维度起名字。",{"type":18,"tag":26,"props":291,"children":292},{},[293],{"type":24,"value":294},"把设备维度命名为dp(Data Parallel) 和mp(Model Parallel)。",{"type":18,"tag":26,"props":296,"children":297},{},[298],{"type":24,"value":299},"代码里写的layout(\"dp\", \"mp\")，一眼就能看出是“数据维走dp，模型维走mp”。",{"type":18,"tag":240,"props":301,"children":303},{"code":302},"# [实战示例] 提升可读性\nfrom mindspore.parallel import Layout\n\n# 定义布局：8卡，4x2\nlayout = Layout((4, 2), (\"dp\", \"mp\"))\n\ndef attention_score(q, k):\n    return ops.matmul(q, k)\n\n# 不用 Layout: \n# shard(attention_score, in_strategy=((4, 1, 2), (4, 2, 1)))  # 谁知道哪个维度对应什么？\n\n# 使用 Layout: \n# 假设 q: [Batch, Seq, Head], k: [Batch, Head, Seq]\n# dp=Batch维度, mp=Head维度\nin_strategy = (\n    layout(\"dp\", \"None\", \"mp\"),  # q: Batch切dp, Head切mp\n    layout(\"dp\", \"mp\", \"None\")   # k: Batch切dp, Head切mp\n)\nshard(attention_score, in_strategy=in_strategy) # 清晰明了！\n",[304],{"type":18,"tag":245,"props":305,"children":306},{"__ignoreMap":7},[307],{"type":24,"value":302},{"type":18,"tag":37,"props":309,"children":311},{"id":310},"_04-常见误区与避坑",[312],{"type":24,"value":313},"04 常见误区与避坑",{"type":18,"tag":134,"props":315,"children":317},{"id":316},"自动不是全能",[318],{"type":24,"value":319},"“自动”不是“全能”：",{"type":18,"tag":26,"props":321,"children":322},{},[323],{"type":24,"value":324},"虽然叫AUTO_PARALLEL，但如果你分片得不合理（比如切分份数不能整除卡数），系统也没法帮“圆”回来，会直接报错。",{"type":18,"tag":134,"props":326,"children":328},{"id":327},"pynative-的遗憾",[329],{"type":24,"value":330},"PyNative 的遗憾：",{"type":18,"tag":26,"props":332,"children":333},{},[334],{"type":24,"value":335},"目前shard强依赖于静态图编译技术（因为要分析全图做传播），所以在 PyNative 模式（动态图）下暂时无法使用。",{"type":18,"tag":134,"props":337,"children":339},{"id":338},"牵一发而动全身",[340],{"type":24,"value":341},"牵一发而动全身：",{"type":18,"tag":26,"props":343,"children":344},{},[345],{"type":24,"value":346},"你在网络中间修改了一个算子的策略，可能会导致整个网络的策略发生“蝴蝶效应”般的剧变。如果不确定，建议先在小规模子网中验证。",{"title":7,"searchDepth":348,"depth":348,"links":349},4,[350,352,357,358],{"id":39,"depth":351,"text":42},2,{"id":75,"depth":351,"text":78,"children":353},[354,356],{"id":136,"depth":355,"text":139},3,{"id":160,"depth":355,"text":163},{"id":207,"depth":351,"text":210},{"id":310,"depth":351,"text":313,"children":359},[360,361,362],{"id":316,"depth":355,"text":319},{"id":327,"depth":355,"text":330},{"id":338,"depth":355,"text":341},"markdown","content:technology-blogs:zh:2026-2-9.md","content","technology-blogs/zh/2026-2-9.md","technology-blogs/zh/2026-2-9","md",1776506119728]