[{"data":1,"prerenderedAt":357},["ShallowReactive",2],{"content-query-OyPMtz4wyo":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":351,"_id":352,"_source":353,"_file":354,"_stem":355,"_extension":356},"/news/zh/3782","zh",false,"","开源首发，昇思MindSpore支持盘古Pro一键部署，内附手把手教程","近日，华为于2025年5月28日发布的大语言模型盘古Pro 正式开源。","2025-06-30","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/04/33afcecee99e49e1ab3ebe89e1fd18f7.png","news",{"type":14,"children":15,"toc":348},"root",[16,24,30,35,47,52,57,69,80,88,93,101,106,114,126,131,139,147,152,157,165,170,178,189,194,202,207,217,222,230,235,250,258,263,271,279,284,292,297,305,310,318,323,331,343],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"开源首发昇思mindspore支持盘古pro一键部署内附手把手教程",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":23,"value":29},"近日，华为于2025年5月28日发布的大语言模型盘古Pro 正式开源。依托对MoE的完备支持，昇思MindSpore支持盘古Pro MoE的开源首发，并已将MindSpore版的盘古Pro MoE推理代码上传至开源社区，实现分钟级、一键式的vLLM服务化部署。",{"type":17,"tag":25,"props":31,"children":32},{},[33],{"type":23,"value":34},"模型卡片：",{"type":17,"tag":25,"props":36,"children":37},{},[38],{"type":17,"tag":39,"props":40,"children":44},"a",{"href":41,"rel":42},"https://gitee.com/mindspore/vllm-mindspore/blob/pangu-pro-moe/docs/model%5C_cards/pangu/pangu%5C_pro%5C_moe.md",[43],"nofollow",[45],{"type":23,"value":46},"https://gitee.com/mindspore/vllm-mindspore/blob/pangu-pro-moe/docs/model\\_cards/pangu/pangu\\_pro\\_moe.md",{"type":17,"tag":25,"props":48,"children":49},{},[50],{"type":23,"value":51},"混合专家模型（Mixture of Expert）能够以较低的计算成本支持更大的参数规模，已成为大语言模型的重要发展方向。2025年5月28日，华为发布了总参数量 720 亿、激活参数量160亿的盘古Pro MoE模型。盘古Pro MoE模型使用了创新的分组混合专家模型（Mixture of Grouped Experts, MoGE）架构，在专家选择阶段对专家进行分组，并约束各组分别激活等量专家，从而实现专家负载均衡和提升昇腾平台计算效率。",{"type":17,"tag":25,"props":53,"children":54},{},[55],{"type":23,"value":56},"昇思MindSpore此前已支持DeepSeek-V3/R1、Qwen3-235B、GLM-Z1、MiniCPM4等40余款主流大语言模型，具备对混合专家模型的完备支持能力，并支持通过vLLM进行服务化部署，有效提升系统吞吐率。通过算子融合、模型并行等优化，以及对盘古Pro MoE模型的权重、激活和KVCache的量化支持，当前使用vLLM+MindSpore在昇腾Atlas 800I A2已完成部署验证，同时支持昇腾Atlas 300I Duo。",{"type":17,"tag":25,"props":58,"children":59},{},[60],{"type":17,"tag":61,"props":62,"children":63},"strong",{},[64],{"type":17,"tag":61,"props":65,"children":66},{},[67],{"type":23,"value":68},"# 01",{"type":17,"tag":25,"props":70,"children":71},{},[72],{"type":17,"tag":61,"props":73,"children":74},{},[75],{"type":17,"tag":61,"props":76,"children":77},{},[78],{"type":23,"value":79},"技术特性",{"type":17,"tag":25,"props":81,"children":82},{},[83],{"type":17,"tag":61,"props":84,"children":85},{},[86],{"type":23,"value":87},"1、支持vLLM 0.8.3 V0/V1版本的核心特性",{"type":17,"tag":25,"props":89,"children":90},{},[91],{"type":23,"value":92},"昇思MindSpore社区开发了vLLM-MindSpore开源插件，支持使用vLLM部署MindSpore推理模型。该方案在vLLM连续性批调度、KVCache分页管理、Chunked Prefill、Prefix Cache等服务特性的基础上，叠加MindSpore即时编译、图算融合、混合量化等推理加速能力，实现了昇腾AI处理器上大模型推理的极致性能与快速部署。",{"type":17,"tag":25,"props":94,"children":95},{},[96],{"type":17,"tag":97,"props":98,"children":100},"img",{"alt":7,"src":99},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/04/b500f0e8b7694336ba91df0b3be1671b.png",[],{"type":17,"tag":25,"props":102,"children":103},{},[104],{"type":23,"value":105},"当前vLLM-MindSpore主干分支已适配vLLM v0.8.3版本，支持V0/V1架构。",{"type":17,"tag":25,"props":107,"children":108},{},[109],{"type":17,"tag":61,"props":110,"children":111},{},[112],{"type":23,"value":113},"2、量化推理",{"type":17,"tag":25,"props":115,"children":116},{},[117,119],{"type":23,"value":118},"昇思MindSpore社区与华为诺亚实验室、泰勒实验室联合打造了模型量化压缩算法套件金箍棒（",{"type":17,"tag":39,"props":120,"children":123},{"href":121,"rel":122},"https://gitee.com/mindspore/golden-stick%EF%BC%89%EF%BC%8C%E6%94%AF%E6%8C%81SmoothQuant%E3%80%81AWQ%E3%80%81GPTQ%E7%AD%89%E4%B8%9A%E7%95%8C%E4%B8%BB%E6%B5%81%E5%90%8E%E9%87%8F%E5%8C%96%E7%AE%97%E6%B3%95%E4%BB%A5%E5%8F%8A%E5%A4%9A%E7%A7%8D%E8%87%AA%E7%A0%94%E9%87%8F%E5%8C%96%E7%AE%97%E6%B3%95%E3%80%82%E4%B8%BA%E4%BA%86%E6%8F%90%E5%8D%87%E9%87%8F%E5%8C%96%E6%8E%A8%E7%90%86%E6%95%88%E7%8E%87%E5%92%8C%E9%99%8D%E4%BD%8E%E7%B2%BE%E5%BA%A6%E6%8D%9F%E5%A4%B1%EF%BC%8C%E4%BD%BF%E7%94%A8%E8%87%AA%E7%A0%94%E6%B7%B7%E5%90%88%E7%B2%BE%E5%BA%A6%E9%87%8F%E5%8C%96%E7%AE%97%E6%B3%95%EF%BC%8C%E8%87%AA%E5%8A%A8%E6%90%9C%E7%B4%A2%E6%9C%80%E4%BC%98%E9%87%8F%E5%8C%96%E7%AD%96%E7%95%A5%EF%BC%8C%E5%8F%AF%E5%AE%9E%E7%8E%B0%E7%9B%98%E5%8F%A4%E7%BD%91%E7%BB%9C%E5%87%A0%E4%B9%8E%E7%B2%BE%E5%BA%A6%E6%97%A0%E6%8D%9F%E7%9A%848bit%E9%87%8F%E5%8C%96%EF%BC%88A8W8%EF%BC%89%EF%BC%8C%E4%B9%9F%E5%8F%AF%E8%BF%9B%E4%B8%80%E6%AD%A5%E5%AF%B9%E9%87%8F%E5%8C%96%E5%90%8E%E7%9A%84%E6%9D%83%E9%87%8D%E8%BF%9B%E8%A1%8C%E7%B2%BE%E5%BA%A6%E8%A1%A5%E5%81%BF%EF%BC%8C%E5%AE%9E%E7%8E%B04bit%E9%87%8F%E5%8C%96%E3%80%82",[43],[124],{"type":23,"value":125},"https://gitee.com/mindspore/golden-stick），支持SmoothQuant、AWQ、GPTQ等业界主流后量化算法以及多种自研量化算法。为了提升量化推理效率和降低精度损失，使用自研混合精度量化算法，自动搜索最优量化策略，可实现盘古网络几乎精度无损的8bit量化（A8W8），也可进一步对量化后的权重进行精度补偿，实现4bit量化。",{"type":17,"tag":25,"props":127,"children":128},{},[129],{"type":23,"value":130},"除了对模型参数和激活值量化以外，还可对KVCache进行了量化，以节省显存和提升Batch Size。金箍棒套件支持静态量化与动态量化两种KVCache模式。由于vLLM暂未支持KVCache动态量化参数的管理，因此在是使用vLLM部署盘古Pro MoE时，仅可使用KVCache静态量化。",{"type":17,"tag":25,"props":132,"children":133},{},[134],{"type":17,"tag":97,"props":135,"children":138},{"alt":136,"src":137},"https://modelbest.feishu.cn/space/api/box/stream/download/asynccode/?code=YjkwYjgyYTkxZGVkZTczNGQyZWYxYjRlMTViNjNjZThfOFlhOGoyekFRSTA2c3paY0w0OVFTa1Z4bzJ1RVJ3RDlfVG9rZW46UHc5ZGJBSnNLb0dyVTV4dkE4SWNtazdJbjFmXzE3NDkxODAyNjQ6MTc0OTE4Mzg2NF9WNA","https://mmbiz.qpic.cn/sz_mmbiz_gif/FBnLNACvkWDlSZUicJULHShCKFTniaOghBdricbDGgErqfmXQuRCiaoHvibe9g97ojtlnQwTfhiaWf8IBgwticgp439Mw/640?wx_fmt=gif&from=appmsg&tp=webp&wxfrom=5&wx_lazy=1",[],{"type":17,"tag":25,"props":140,"children":141},{},[142],{"type":17,"tag":61,"props":143,"children":144},{},[145],{"type":23,"value":146},"3、多种自注意力算子及向量排布",{"type":17,"tag":25,"props":148,"children":149},{},[150],{"type":23,"value":151},"昇思MindSpore在盘古Pro MoE推理模型中支持了多种Attention算子，可在不同应用场景下获取最优计算性能：使能Prefix Cache、Chunked Prefill的场景，可选用Paged Attention算子；其它场景，Prefill阶段可选用PromptFlashAttention算子，Decode阶段可选用IncreFlashAttention算子。",{"type":17,"tag":25,"props":153,"children":154},{},[155],{"type":23,"value":156},"此外，在大Batch Size场景，昇思MindSpore支持自适应选择NZ格式的Matmul算子，实现单算子性能提升40%。在昇腾300I Duo上的自注意力计算部分也采用NZ格式，因此vLLM-MindSpore插件还支持了NZ格式的KVCache管理。",{"type":17,"tag":25,"props":158,"children":159},{},[160],{"type":17,"tag":61,"props":161,"children":162},{},[163],{"type":23,"value":164},"4、并行推理",{"type":17,"tag":25,"props":166,"children":167},{},[168],{"type":23,"value":169},"昇思MindSpore支持TP、EP、DP、SP等多种并行策略，此次使用TP（Attention） + EP（MoE）加速盘古Pro MoE模型推理，后续还将引入DP、SP等并行加速策略。",{"type":17,"tag":25,"props":171,"children":172},{},[173],{"type":17,"tag":61,"props":174,"children":175},{},[176],{"type":23,"value":177},"# 02",{"type":17,"tag":25,"props":179,"children":180},{},[181],{"type":17,"tag":61,"props":182,"children":183},{},[184],{"type":17,"tag":61,"props":185,"children":186},{},[187],{"type":23,"value":188},"部署指南",{"type":17,"tag":25,"props":190,"children":191},{},[192],{"type":23,"value":193},"盘古Pro MoE模型推理建议使用1台（8卡） Atlas 800I A2（64G）服务器（基于BF16权重）。昇思MindSpore提供了盘古Pro MoE推理专用的Docker容器镜像，供开发者快速体验。",{"type":17,"tag":25,"props":195,"children":196},{},[197],{"type":17,"tag":61,"props":198,"children":199},{},[200],{"type":23,"value":201},"1、下载模型权重",{"type":17,"tag":25,"props":203,"children":204},{},[205],{"type":23,"value":206},"执行以下命令将模型权重的自定义下载路径 /home/work/PanguProMoE 添加到白名单：",{"type":17,"tag":208,"props":209,"children":211},"pre",{"code":210},"\nexport HUB_WHITE_LIST_PATHS=/home/work/PanguProMoE\n",[212],{"type":17,"tag":213,"props":214,"children":215},"code",{"__ignoreMap":7},[216],{"type":23,"value":210},{"type":17,"tag":25,"props":218,"children":219},{},[220],{"type":23,"value":221},"执行以下 Python 脚本，从魔乐社区下载昇思 MindSpore 版本的 盘古Pro MoE模型权重文件至指定路径 /home/work/PanguProMoE 。下载的文件包含模型配置、模型权重和分词模型，占用约 150GB 的磁盘空间：",{"type":17,"tag":208,"props":223,"children":225},{"code":224},"\nfrom openmind_hub import snapshot_download\n \nsnapshot_download(\n    repo_id=\"MindSpore-Lab/Pangu-Pro-MoE\",\n    local_dir=\"/home/work/PanguProMoE\",\n    local_dir_use_symlinks=False\n) \n",[226],{"type":17,"tag":213,"props":227,"children":228},{"__ignoreMap":7},[229],{"type":23,"value":224},{"type":17,"tag":25,"props":231,"children":232},{},[233],{"type":23,"value":234},"注意事项：",{"type":17,"tag":236,"props":237,"children":238},"ul",{},[239,245],{"type":17,"tag":240,"props":241,"children":242},"li",{},[243],{"type":23,"value":244},"/home/work/PanguProMoE 可修改为自定义路径，需要确保该路径有足够的磁盘空间（约 150GB）。",{"type":17,"tag":240,"props":246,"children":247},{},[248],{"type":23,"value":249},"下载时间可能因网络环境而异，建议在稳定的高速网络环境下下载，10MBps网速下，预计下载时间4小时。",{"type":17,"tag":25,"props":251,"children":252},{},[253],{"type":17,"tag":61,"props":254,"children":255},{},[256],{"type":23,"value":257},"2、下载昇思 MindSpore盘古Pro MoE推理容器镜像",{"type":17,"tag":25,"props":259,"children":260},{},[261],{"type":23,"value":262},"执行以下 Shell 命令，拉取昇思 MindSpore 盘古Pro MoE推理容器镜像：",{"type":17,"tag":208,"props":264,"children":266},{"code":265},"\ndocker pull swr.cn-central-221.ovaijisuan.com/mindsporelab/pangu_pro_moe_mindspore-infer:800-A2-20250623\n",[267],{"type":17,"tag":213,"props":268,"children":269},{"__ignoreMap":7},[270],{"type":23,"value":265},{"type":17,"tag":25,"props":272,"children":273},{},[274],{"type":17,"tag":61,"props":275,"children":276},{},[277],{"type":23,"value":278},"3、启动容器",{"type":17,"tag":25,"props":280,"children":281},{},[282],{"type":23,"value":283},"执行以下命令，创建并启动容器：",{"type":17,"tag":208,"props":285,"children":287},{"code":286},"docker run -it --privileged --name=pangu_pro_moe --net=host \\\n   --shm-size 500g \\\n   --device=/dev/davinci0 \\\n   --device=/dev/davinci1 \\\n   --device=/dev/davinci2 \\\n   --device=/dev/davinci3 \\\n   --device=/dev/davinci4 \\\n   --device=/dev/davinci5 \\\n   --device=/dev/davinci6 \\\n   --device=/dev/davinci7 \\\n   --device=/dev/davinci_manager \\\n   --device=/dev/hisi_hdc \\\n   --device /dev/devmm_svm \\\n   -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \\\n   -v /usr/local/Ascend/add-ons:/usr/local/Ascend/add-ons \\\n   -v /usr/local/sbin:/usr/local/sbin \\\n   -v /usr/local/sbin/npu-smi:/usr/local/sbin/npu-smi \\\n   -v /etc/hccn.conf:/etc/hccn.conf \\\n   -v /home:/home \\\n   swr.cn-central-221.ovaijisuan.com/mindsporelab/pangu_pro_moe_mindspore-infer:800-A2-20250623 \\\n   /bin/bash\n",[288],{"type":17,"tag":213,"props":289,"children":290},{"__ignoreMap":7},[291],{"type":23,"value":286},{"type":17,"tag":25,"props":293,"children":294},{},[295],{"type":23,"value":296},"后续所有操作均在容器内进行。",{"type":17,"tag":25,"props":298,"children":299},{},[300],{"type":17,"tag":61,"props":301,"children":302},{},[303],{"type":23,"value":304},"4、启动推理服务",{"type":17,"tag":25,"props":306,"children":307},{},[308],{"type":23,"value":309},"执行以下shell命令启动推理服务：",{"type":17,"tag":208,"props":311,"children":313},{"code":312},"\nvllm-mindspore serve \"/home/work/PanguProMoE\" --trust-remote-code --tensor-parallel-size=8 --gpu-memory-utilization=0.9 --max-num-batched-tokens=2048 --max-num-seqs=512 --block-size=128 --max-model-len=32768 \n",[314],{"type":17,"tag":213,"props":315,"children":316},{"__ignoreMap":7},[317],{"type":23,"value":312},{"type":17,"tag":25,"props":319,"children":320},{},[321],{"type":23,"value":322},"执行以下命令，发送推理请求进行测试：",{"type":17,"tag":208,"props":324,"children":326},{"code":325},"curl http://localhost:8000/v1/completions -H \"Content-type: application/json\" -d '{\"model\": \"/home/work/PanguProMoE\", \"prompt\": \"[unused9]系统：[unused10][unused9]用户：请简单介绍一个北京的景点[unused10][unused9]助手：\", \"max_tokens\": 1024, \"temperature\": 0.0}' &\n",[327],{"type":17,"tag":213,"props":328,"children":329},{"__ignoreMap":7},[330],{"type":23,"value":325},{"type":17,"tag":25,"props":332,"children":333},{},[334,336],{"type":23,"value":335},"本文档提供的模型代码和镜像，当前仅限用于测试和体验昇思MindSpore盘古Pro MoE模型的推理服务化部署，不建议用于生产环境。如遇使用问题，欢迎反馈至Issue（",{"type":17,"tag":39,"props":337,"children":340},{"href":338,"rel":339},"https://gitee.com/mindspore/mindformers/issues/new%EF%BC%89%E3%80%82",[43],[341],{"type":23,"value":342},"https://gitee.com/mindspore/mindformers/issues/new）。",{"type":17,"tag":25,"props":344,"children":345},{},[346],{"type":23,"value":347},"基于昇思MindSpore对MoE架构的深度优化与vLLM的极致融合，盘古Pro MoE的开源标志着大模型在高效部署领域取得重大突破。本次开源不仅验证了昇腾AI处理器对千亿级稀疏大模型的全面支撑能力，更通过容器化封装、量化压缩与并行策略优化，实现分钟级一键开箱部署，为开发者提供开箱即用的MoE模型服务化体验。未来，昇思将持续支持主流大模型演进，并根据开源情况面向全体开发者提供镜像与支持。",{"title":7,"searchDepth":349,"depth":349,"links":350},4,[],"markdown","content:news:zh:3782.md","content","news/zh/3782.md","news/zh/3782","md",1776506089254]