[{"data":1,"prerenderedAt":647},["ShallowReactive",2],{"content-query-G4oimn6gnx":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":641,"_id":642,"_source":643,"_file":644,"_stem":645,"_extension":646},"/technology-blogs/zh/2025-12-11","zh",false,"","昇思人工智能框架峰会 | MindSpore Transformers套件架构升级，实现LLM模型天级迁移，工作量降低85%+","对MindSpore Transformers套件的架构升级进行了深入解读","2025-12-11","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/06/13/ca51743e8e31470a9d8d989f4b463985.png","technology-blogs","实践",{"type":15,"children":16,"toc":638},"root",[17,25,31,58,66,74,79,105,116,121,128,133,140,145,152,162,170,175,180,190,207,214,222,237,244,252,260,268,276,281,286,291,296,301,309,314,319,327,335,340,345,353,361,366,371,379,391,398,406,419,424,433,449,461,470,475,483,491,504,512,519,531,539,544,552,557,565,578,586,594,633],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"昇思人工智能框架峰会-mindspore-transformers套件架构升级实现llm模型天级迁移工作量降低85",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"据悉，昇思MindSpore开源社区将于 2025 年 12 月 25日在杭州举办昇思人工智能框架峰会。本次峰会的昇思人工智能框架技术发展与行业实践论坛将讨论到昇思MindSpore大模型套件技术进展与实践，MindSpore Transformers SIG的核心贡献者将在昇思开发者动手实践workshop设立开发者动手实践体验，带领开发者体验使用昇思MindSpore Transformers大模型套件实现高效训推。本文对MindSpore Transformers套件的架构升级进行了深入解读，揭示其如何实现迁移效率的提升。",{"type":18,"tag":26,"props":32,"children":33},{},[34,36,42,44,49,51,56],{"type":24,"value":35},"为应对大模型开发中生态割裂、迁移复杂与并行编程难度高的挑战，",{"type":18,"tag":37,"props":38,"children":39},"strong",{},[40],{"type":24,"value":41},"MindSpore Transformers套件完成Mcore架构重大升级",{"type":24,"value":43},"。本次升级通过",{"type":18,"tag":37,"props":45,"children":46},{},[47],{"type":24,"value":48},"极简化的模型迁移开发范式",{"type":24,"value":50},"与",{"type":18,"tag":37,"props":52,"children":53},{},[54],{"type":24,"value":55},"原生支持Hugging Face生态的零修改复用",{"type":24,"value":57},"两大核心技术革新，显著降低大模型在昇腾平台上的开发与部署门槛，实现从开源模型到高效训练、推理的端到端敏捷链路。",{"type":18,"tag":26,"props":59,"children":60},{},[61],{"type":18,"tag":37,"props":62,"children":63},{},[64],{"type":24,"value":65},"# 01",{"type":18,"tag":26,"props":67,"children":68},{},[69],{"type":18,"tag":37,"props":70,"children":71},{},[72],{"type":24,"value":73},"模型迁移开发范式革新——从“重复造轮子”到“配置化搭建复用”",{"type":18,"tag":26,"props":75,"children":76},{},[77],{"type":24,"value":78},"传统模型迁移是一项繁重且低效的工作：开发者不仅需要花费数周时间从零开始重写模型的配置、核心代码和分词器，还要在不同但结构相似的模型间进行大量的重复实现，导致“重复造轮子”。此外，训练与推理接口的高度耦合使得代码维护困难，针对某个主力模型的深度优化也难以被其他模型复用，整体迭代和共享效率低下。",{"type":18,"tag":26,"props":80,"children":81},{},[82,84,89,91,96,98,103],{"type":24,"value":83},"如今，全新的MCore架构通过深度拥抱开源生态与模块化设计，彻底重构了这一流程（如图1）。我们建立了",{"type":18,"tag":37,"props":85,"children":86},{},[87],{"type":24,"value":88},"模板化、声明式的模型开发范式",{"type":24,"value":90},"，将开发重心从编写大量代码转向灵活的配置定义；并对Transformer核心组件进行了",{"type":18,"tag":37,"props":92,"children":93},{},[94],{"type":24,"value":95},"标准化接口抽象",{"type":24,"value":97},"，确保各项高性能优化能力能在所有模型中沉淀和共享；其次，实现了对Hugging Face生态的",{"type":18,"tag":37,"props":99,"children":100},{},[101],{"type":24,"value":102},"零修改复用",{"type":24,"value":104},"，可以直接使用其模型配置与分词器。最终，用户的主要工作量被简化为轻量的配置适配，从而将模型迁移的整体工作量降低了90%，成功实现了从“周级”到“天级”的敏捷迁移。",{"type":18,"tag":106,"props":107,"children":109},"div",{"style":108},"text-align: center;",[110],{"type":18,"tag":111,"props":112,"children":115},"img",{"src":113,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-1.jpg","display: block;margin: 0 auto;max-width:70%",[],{"type":18,"tag":26,"props":117,"children":118},{},[119],{"type":24,"value":120}," \n具体而言，基于代理模式的设计思想，我们底层定义了GPTModel通用预训练模型基类，封装了绝大多数同构模型（如Qwen、DeepSeek、Llama）的共性结构。外层以代理方式实现Hugging Face式的表层实现（见图2），使得使用方式和Hugging Face社区一致，但降低了维护成本。",{"type":18,"tag":106,"props":122,"children":123},{"style":108},[124],{"type":18,"tag":111,"props":125,"children":127},{"src":126,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-2.jpg",[],{"type":18,"tag":26,"props":129,"children":130},{},[131],{"type":24,"value":132}," \n如图2所示，针对训练和推理两大场景，对于每个模型我们实现了训练、推理两个模型接口，并在实际任务中根据训练或推理场景，通过工厂类来获取训练或推理的模型实例。同时我们了训练和推理两套高阶transformer接口，针对于训练和推理所需的并行优化与底层算子的不同。提供的接口包含Attention、MLP、Embedding等等的Transformer典型结构（如图3），封装了高阶的并行能力，用户只需配置每种并行模式的切分数量，而无需关注接口中每个算子的切分逻辑。并统一使用了Mint高精度算子，做到了接口级的精度对齐。",{"type":18,"tag":106,"props":134,"children":135},{"style":108},[136],{"type":18,"tag":111,"props":137,"children":139},{"src":138,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-3.jpg",[],{"type":18,"tag":26,"props":141,"children":142},{},[143],{"type":24,"value":144}," \n基于此，我们引入了ModuleSpec声明式配置机制。开发者无需再深入底层，硬编码式地逐行编写或修改模型的前向传播代码。而是可以像搭积木一样，通过简洁的ModuleSpec接口，声明式地指定：“在这里使用Multi-Head Attention算法”、“在那里采用SwiGLU激活的MLP”。系统便会自动将这些标准化、模块化的组件，按照基类定义的模板组装成完整的模型。这种机制不仅让模型搭建变得灵活高效，便于快速实验不同组件组合，其清晰的模块边界更使得对Attention、Norm等单一模块进行独立的精度对齐与性能调优成为可能，大幅提升了开发调试的效率。",{"type":18,"tag":106,"props":146,"children":147},{"style":108},[148],{"type":18,"tag":111,"props":149,"children":151},{"src":150,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-4.jpg",[],{"type":18,"tag":26,"props":153,"children":154},{},[155,157],{"type":24,"value":156}," \n",{"type":18,"tag":37,"props":158,"children":159},{},[160],{"type":24,"value":161},"# 02",{"type":18,"tag":26,"props":163,"children":164},{},[165],{"type":18,"tag":37,"props":166,"children":167},{},[168],{"type":24,"value":169},"无缝对接Hugging Face生态——零修改复用模型配置与权重",{"type":18,"tag":26,"props":171,"children":172},{},[173],{"type":24,"value":174}," \nMcore架构设计的原则之一是生态友好，核心目标是与主流开源社区“说同一种语言”，实现Hugging Face生态的零修改、开箱即用，将开发者的适配成本降至最低。",{"type":18,"tag":26,"props":176,"children":177},{},[178],{"type":24,"value":179}," \nMcore架构实现了对Hugging Face模型仓库（Model Hub）的本地读取，包括对模型配置、模型权重的读取与自动转换，以及Tokenizer分词器的复用，用户仅需在Yaml配置文件中配置模型仓库的本地地址，即可加载模型配置、权重和Tokenizer分词器。",{"type":18,"tag":181,"props":182,"children":184},"pre",{"code":183},"# 示例：加载Hugging Face模型\npretrained_model_dir:   \"/path/to/Qwen3-32B\"\n",[185],{"type":18,"tag":186,"props":187,"children":188},"code",{"__ignoreMap":7},[189],{"type":24,"value":183},{"type":18,"tag":191,"props":192,"children":193},"ul",{},[194],{"type":18,"tag":195,"props":196,"children":197},"li",{},[198,200,205],{"type":24,"value":199},"  配置自动转换：通过复用Hugging Face模型的配置类configuration_model.py，用户可直接使用来自Hugging Face的config.json配置文件。Mcore通过",{"type":18,"tag":37,"props":201,"children":202},{},[203],{"type":24,"value":204},"配置装饰器机制",{"type":24,"value":206},"，在运行时补全MindSpore Transformers的特有参数，并删除无关配置参数，最后自动转换成统一的TransformerConfig，以实例化模型结构（如图5）。\n         \n   ",{"type":18,"tag":106,"props":208,"children":209},{"style":108},[210],{"type":18,"tag":111,"props":211,"children":213},{"src":212,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-5.jpg",[],{"type":18,"tag":181,"props":215,"children":217},{"code":216},"@register_mf_model_parameter(\n      mf_model_kwargs=MFModelConfig(\n          pad_token_id=151643,\n          block_size=32,\n          num_blocks=1024,\n          normalization='RMSNorm',\n          add_bias_linear=False,\n          gated_linear_unit=True,\n          use_contiguous_weight_layout_attention=False\n    ))\n@ignore_and_delete_parameter(extra_ignore_param=[\n      ('max_window_layers', NotSupportedInfo.useless),\n      ('sliding_window', NotSupportedInfo.useless),\n      ('use_sliding_window', NotSupportedInfo.useless),\n      ('layer_types', NotSupportedInfo.useless),\n])\n",[218],{"type":18,"tag":186,"props":219,"children":220},{"__ignoreMap":7},[221],{"type":24,"value":216},{"type":18,"tag":191,"props":223,"children":224},{},[225],{"type":18,"tag":195,"props":226,"children":227},{},[228,230,235],{"type":24,"value":229},"  **权重自动转换：",{"type":18,"tag":37,"props":231,"children":232},{},[233],{"type":24,"value":234},"通过架构内置的",{"type":24,"value":236},"自动化权重名称映射系统，**直接加载Hugging Face标准的safetensors权重文件。对于新增模型，仅需实现权重参数名转换的映射表，即可在加载权重时自动将Hugging Face社区模型的参数名称映射至Mcore的内部结构。用户无需关心权重的分布式切分，训练和推理场景下权重均可进行自动切分并加载。\n   ",{"type":18,"tag":106,"props":238,"children":239},{"style":108},[240],{"type":18,"tag":111,"props":241,"children":243},{"src":242,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-6.jpg",[],{"type":18,"tag":181,"props":245,"children":247},{"code":246},"def   generate_mapping(self):\n          mapping_rules = {\n              '.linear_q_down_proj.': ('.linear_qkv_down_proj.',   '.linear_q_down_proj.', 'q_down'),\n              '.linear_kv_down_proj.': ('.linear_qkv_down_proj.',   '.linear_kv_down_proj.', 'kv_down'),\n              '.linear_q_up_proj.': ('.linear_q_up_proj.', '.linear_q_up_proj.',   'q_up'),\n              '.linear_kv_up_proj.': ('.linear_kv_up_proj.', '.linear_kv_up_proj.',   'kv_up'),\n              '.linear_q.': ('.linear_qkv.', '.linear_q.', 'q'),\n              '.linear_k.': ('.linear_qkv.', '.linear_k.', 'k'),\n              '.linear_v.': ('.linear_qkv.', '.linear_v.', 'v'),\n              '.linear_kv': ('.linear_qkv.', '.linear_kv.', 'kv'),\n              '.mlp.gating.': ('.mlp.linear_fc1.', '.mlp.gating.', 'gating'),\n              '.mlp.hidden.': ('.mlp.linear_fc1.', '.mlp.hidden.', 'hidden'),\n              '.mlp.shared_experts.gating.': ('.mlp.shared_experts.linear_fc1.',   '.mlp.shared_experts.gating.', 'gating'),\n              '.mlp.shared_experts.hidden.': ('.mlp.shared_experts.linear_fc1.',   '.mlp.shared_experts.hidden.', 'hidden')\n        }\n        \n          stacked_params_mapping = []\n        for   _, mcore_name in self.weight_mapping:\n              for pattern, stacked_param in mapping_rules.items():\n                  if pattern in mcore_name:\n                      stacked_params_mapping.append(stacked_param)\n                      break\n          return stacked_params_mapping\n",[248],{"type":18,"tag":186,"props":249,"children":250},{"__ignoreMap":7},[251],{"type":24,"value":246},{"type":18,"tag":191,"props":253,"children":254},{},[255],{"type":18,"tag":195,"props":256,"children":257},{},[258],{"type":24,"value":259},"  **Tokenizer分词器复用：**MindSpore Transformers现已接入Hugging Face Tokenizer，通过读取模型仓库中的词表文件和分词器配置，进行实例化并用于数据编解码。",{"type":18,"tag":26,"props":261,"children":262},{},[263],{"type":18,"tag":37,"props":264,"children":265},{},[266],{"type":24,"value":267},"# 03",{"type":18,"tag":26,"props":269,"children":270},{},[271],{"type":18,"tag":37,"props":272,"children":273},{},[274],{"type":24,"value":275},"Mcore架构模型迁移流程解读——模型三步标准化迁移",{"type":18,"tag":26,"props":277,"children":278},{},[279],{"type":24,"value":280},"基于Mcore架构迁移一个全新的模型，开发者通常只需准备三类核心文件：",{"type":18,"tag":26,"props":282,"children":283},{},[284],{"type":24,"value":285},"1、模型配置类文件：继承Hugging Face原配置，通过装饰器补全所需配置，忽略无关配置。",{"type":18,"tag":26,"props":287,"children":288},{},[289],{"type":24,"value":290},"2、模型类文件：继承自GPTModel等基类，通常仅需百行代码定义特殊结构。",{"type":18,"tag":26,"props":292,"children":293},{},[294],{"type":24,"value":295},"3、权重参数映射文件：声明Hugging Face与Mcore间的参数名对应关系。",{"type":18,"tag":26,"props":297,"children":298},{},[299],{"type":24,"value":300}," \n以Qwen3为例，主要包含以下几类核心文件：\n ",{"type":18,"tag":191,"props":302,"children":303},{},[304],{"type":18,"tag":195,"props":305,"children":306},{},[307],{"type":24,"value":308},"  **模型配置类文件：**configuration_qwen3.py\n     ",{"type":18,"tag":26,"props":310,"children":311},{},[312],{"type":24,"value":313},"定义了Qwen3的模型配置。直接复用了Hugging Face的配置定义，并加入了装饰器声明需要补全和忽略的配置项。",{"type":18,"tag":26,"props":315,"children":316},{},[317],{"type":24,"value":318}," \n以下代码片段展示了装饰器部分：",{"type":18,"tag":181,"props":320,"children":322},{"code":321},"@MindFormerRegister.register(MindFormerModuleType.CONFIG, legacy=False, search_names='qwen3')\nclass Qwen3Config(PretrainedConfig):\n    # ...\n    @register_mf_model_parameter(\n        mf_model_kwargs=MFModelConfig(\n            pad_token_id=151643,\n            block_size=32,\n            num_blocks=1024,\n            normalization='RMSNorm',\n            add_bias_linear=False,\n            gated_linear_unit=True,\n            use_contiguous_weight_layout_attention=False\n        ))\n    @ignore_and_delete_parameter(extra_ignore_param=[\n        ('max_window_layers', NotSupportedInfo.useless),\n        ('sliding_window', NotSupportedInfo.useless),\n        ('use_sliding_window', NotSupportedInfo.useless),\n        ('layer_types', NotSupportedInfo.useless),\n    ])\n    def __init__(\n    # ...\n",[323],{"type":18,"tag":186,"props":324,"children":325},{"__ignoreMap":7},[326],{"type":24,"value":321},{"type":18,"tag":191,"props":328,"children":329},{},[330],{"type":18,"tag":195,"props":331,"children":332},{},[333],{"type":24,"value":334},"  **模型类文件：**modeling_qwen3.py、modeling_qwen3_infer.py、model_qwen3_train.py\n   ",{"type":18,"tag":26,"props":336,"children":337},{},[338],{"type":24,"value":339},"分别定义了Qwen3的模型工厂类、推理模型和训练模型。推理和训练模型使用GPTModel抽象接口和ModuleSpec机制搭建模型结构。\n ",{"type":18,"tag":26,"props":341,"children":342},{},[343],{"type":24,"value":344},"以下代码片段展示了Qwen3训练模型的构造声明部分：",{"type":18,"tag":181,"props":346,"children":348},{"code":347},"class TrainingQwen3ForCausalLM(TrainModelMixin, Qwen3PreTrainedModel):\n    def __init__(self, config: Qwen3Config):\n        super().__init__(config, auto_prefix=False)\n        config: TransformerConfig = self.convert_to_transformer_config(self.config)\n        self.model = GPTModel(\n            config=config,\n            transformer_layer_spec=get_gpt_layer_local_spec(\n                qk_layernorm=True,\n                use_contiguous_weight_layout_attention=config.use_contiguous_weight_layout_attention,\n                use_interleaved_weight_layout_mlp=config.use_interleaved_weight_layout_mlp\n            ),\n            vocab_size=config.vocab_size,\n            max_sequence_length=config.max_position_embeddings,\n            position_embedding_type=config.position_embedding_type,\n            rotary_base=self.config.rope_theta,\n            share_embeddings_and_output_weights=self.config.tie_word_embeddings,\n            post_process=self.config.post_process\n        )\n    # ...\n",[349],{"type":18,"tag":186,"props":350,"children":351},{"__ignoreMap":7},[352],{"type":24,"value":347},{"type":18,"tag":191,"props":354,"children":355},{},[356],{"type":18,"tag":195,"props":357,"children":358},{},[359],{"type":24,"value":360},"  **权重参数映射文件：**utils.py\n   ",{"type":18,"tag":26,"props":362,"children":363},{},[364],{"type":24,"value":365},"定义了Qwen3权重参数的映射表，映射Hugging Face模型参数和MindSpore Tranformers模型参数。\n ",{"type":18,"tag":26,"props":367,"children":368},{},[369],{"type":24,"value":370},"以下代码片段展示了Qwen3权重参数的映射表：",{"type":18,"tag":181,"props":372,"children":374},{"code":373},"weight_mapping = [\n        ('model.embed_tokens.', 'embedding.word_embeddings.'),\n        ('.self_attn.q_proj.', '.self_attention.linear_q.'),\n        ('.self_attn.k_proj.', '.self_attention.linear_k.'),\n        ('.self_attn.v_proj.', '.self_attention.linear_v.'),\n        ('.self_attn.o_proj.', '.self_attention.linear_proj.'),\n        ('.self_attn.q_norm.', '.self_attention.q_layernorm.'),\n        ('.self_attn.k_norm.', '.self_attention.k_layernorm.'),\n        ('.mlp.gate_proj.', '.mlp.gating.'),\n        ('.mlp.down_proj.', '.mlp.linear_fc2.'),\n        ('.mlp.up_proj.', '.mlp.hidden.'),\n        ('.post_attention_layernorm.', '.pre_mlp_layernorm.'),\n        ('model.norm.', 'decoder.final_layernorm.'),\n        ('lm_head.', 'output_layer.'),\n        ('model.layers.', 'decoder.layers.')\n    ]\n",[375],{"type":18,"tag":186,"props":376,"children":377},{"__ignoreMap":7},[378],{"type":24,"value":373},{"type":18,"tag":26,"props":380,"children":381},{},[382,384,389],{"type":24,"value":383}," \nMcore架构的新迁移模式将模型迁移的开发工作量降低了",{"type":18,"tag":37,"props":385,"children":386},{},[387],{"type":24,"value":388},"一个数量级",{"type":24,"value":390},"。以迁移DeepSeek-V3为例，与原有架构的代码量对比如下（单位：代码行数loc）：",{"type":18,"tag":106,"props":392,"children":393},{"style":108},[394],{"type":18,"tag":111,"props":395,"children":397},{"src":396,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-7.jpg",[],{"type":18,"tag":26,"props":399,"children":400},{},[401],{"type":18,"tag":37,"props":402,"children":403},{},[404],{"type":24,"value":405},"# 04",{"type":18,"tag":26,"props":407,"children":408},{},[409,417],{"type":18,"tag":37,"props":410,"children":411},{},[412],{"type":18,"tag":37,"props":413,"children":414},{},[415],{"type":24,"value":416},"“一键”启动Hugging Face模型微调与推理",{"type":24,"value":418},"\n ",{"type":18,"tag":26,"props":420,"children":421},{},[422],{"type":24,"value":423},"开发完成上面章节介绍的三类文件后，可以通过MindSpore Transformers的通用流程，读取Hugging Face下载的模型仓库，“一键”快速拉起微调和推理任务。下面以Qwen3-0.6B为例，展示了拉起微调和推理的具体步骤：\n ",{"type":18,"tag":26,"props":425,"children":426},{},[427,432],{"type":18,"tag":37,"props":428,"children":429},{},[430],{"type":24,"value":431},"1、前置准备",{"type":24,"value":418},{"type":18,"tag":26,"props":434,"children":435},{},[436,438,447],{"type":24,"value":437},"请参考安装指南（",{"type":18,"tag":439,"props":440,"children":444},"a",{"href":441,"rel":442},"https://www.mindspore.cn/mindformers/docs/zh-CN/r1.7.0/installation.html%EF%BC%89%E5%87%86%E5%A4%87MindSpore",[443],"nofollow",[445],{"type":24,"value":446},"https://www.mindspore.cn/mindformers/docs/zh-CN/r1.7.0/installation.html）准备MindSpore",{"type":24,"value":448}," Transformers的运行环境，选择1.7.0版本的MindSpore Transformers，安装配套版本的依赖软件。",{"type":18,"tag":26,"props":450,"children":451},{},[452,454],{"type":24,"value":453},"从Hugging Face下载Qwen3-0.6B（",{"type":18,"tag":439,"props":455,"children":458},{"href":456,"rel":457},"https://huggingface.co/Qwen/Qwen3-0.6B/tree/main%EF%BC%89%E4%BB%93%E5%BA%93%E8%87%B3%E6%9C%AC%E5%9C%B0%E3%80%82",[443],[459],{"type":24,"value":460},"https://huggingface.co/Qwen/Qwen3-0.6B/tree/main）仓库至本地。",{"type":18,"tag":26,"props":462,"children":463},{},[464,469],{"type":18,"tag":37,"props":465,"children":466},{},[467],{"type":24,"value":468},"2、启动微调任务",{"type":24,"value":418},{"type":18,"tag":26,"props":471,"children":472},{},[473],{"type":24,"value":474},"执行以下命令启动微调任务：",{"type":18,"tag":181,"props":476,"children":478},{"code":477},"cd mindformers\n# 单卡训练（通过命令直接修改yaml，关闭权重自动切分和并行模式）\npython run_mindformer.py \\\n--config configs/qwen3/finetune_qwen3.yaml \\\n--auto_trans_ckpt False \\\n--use_parallel False\n",[479],{"type":18,"tag":186,"props":480,"children":481},{"__ignoreMap":7},[482],{"type":24,"value":477},{"type":18,"tag":181,"props":484,"children":486},{"code":485},"cd mindformers\n# 8卡训练\nbash scripts/msrun_launcher.sh \"run_mindformer.py \\\n--config configs/qwen3/finetune_qwen3.yaml\" 8\n",[487],{"type":18,"tag":186,"props":488,"children":489},{"__ignoreMap":7},[490],{"type":24,"value":485},{"type":18,"tag":26,"props":492,"children":493},{},[494,496,502],{"type":24,"value":495},"上述命令执行完毕后，多卡训练任务将在后台执行，过程日志保存在",{"type":18,"tag":186,"props":497,"children":499},{"className":498},[],[500],{"type":24,"value":501},"./output/msrun_log",{"type":24,"value":503},"下，使用以下命令可实时查看训练状态：",{"type":18,"tag":181,"props":505,"children":507},{"code":506},"tail -f ./output/msrun_log/worker_0.log\n",[508],{"type":18,"tag":186,"props":509,"children":510},{"__ignoreMap":7},[511],{"type":24,"value":506},{"type":18,"tag":106,"props":513,"children":514},{"style":108},[515],{"type":18,"tag":111,"props":516,"children":518},{"src":517,"style":114,"alt":7},"/category/information/technology-blogs/banner/2025-12-11-8.jpg",[],{"type":18,"tag":26,"props":520,"children":521},{},[522,524],{"type":24,"value":523},"更多训练的相关说明请参考训练指南（",{"type":18,"tag":439,"props":525,"children":528},{"href":526,"rel":527},"https://www.mindspore.cn/mindformers/docs/zh-CN/master/guide/llm_training.html%EF%BC%89%E3%80%82",[443],[529],{"type":24,"value":530},"https://www.mindspore.cn/mindformers/docs/zh-CN/master/guide/llm_training.html）。",{"type":18,"tag":26,"props":532,"children":533},{},[534],{"type":18,"tag":37,"props":535,"children":536},{},[537],{"type":24,"value":538},"3、启动推理任务",{"type":18,"tag":26,"props":540,"children":541},{},[542],{"type":24,"value":543},"准备推理任务的配置文件predict_qwen3.yaml。执行以下命令启动单卡推理，支持在命令参数中直接修改yaml文件中的配置。其中设置pretrained_model_dir为步骤1中下载的Qwen3-0.6B仓库地址。",{"type":18,"tag":181,"props":545,"children":547},{"code":546},"python run_mindformer.py \\\n--config configs/qwen3/predict_qwen3.yaml \\\n--pretrained_model_dir /path/to/Qwen3-0.6B \\\n--predict_data '帮助我制定一份去上海的旅游攻略'\n",[548],{"type":18,"tag":186,"props":549,"children":550},{"__ignoreMap":7},[551],{"type":24,"value":546},{"type":18,"tag":26,"props":553,"children":554},{},[555],{"type":24,"value":556},"上述命令执行完毕后，日志会打印在控制台。出现如下结果，证明推理成功。",{"type":18,"tag":181,"props":558,"children":560},{"code":559},"- INFO - output result is: [{'text_generation_text': ['帮助我制定一份去上海的旅游攻略，包括景点、交通、住宿、美食、活动']}]\n",[561],{"type":18,"tag":186,"props":562,"children":563},{"__ignoreMap":7},[564],{"type":24,"value":559},{"type":18,"tag":26,"props":566,"children":567},{},[568,570,577],{"type":24,"value":569},"更多推理的相关说明请参考推理指南（",{"type":18,"tag":439,"props":571,"children":574},{"href":572,"rel":573},"https://www.mindspore.cn/mindformers/docs/zh-CN/r1.7.0/guide/inference.html%EF%BC%89%E3%80%82%E4%BA%A6%E5%8F%AF%E5%8F%82%E8%80%83%E6%9C%8D%E5%8A%A1%E5%8C%96%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97%EF%BC%88https://www.mindspore.cn/mindformers/docs/zh-CN/r1.7.0/guide/deployment.html%EF%BC%89%E8%BF%9B%E8%A1%8C%E6%A8%A1%E5%9E%8B%E9%83%A8%E7%BD%B2%EF%BC%88%EF%BC%89%E3%80%82",[443],[575],{"type":24,"value":576},"https://www.mindspore.cn/mindformers/docs/zh-CN/r1.7.0/guide/inference.html）。亦可参考服务化部署指南（https://www.mindspore.cn/mindformers/docs/zh-CN/r1.7.0/guide/deployment.html）进行模型部署（）。",{"type":24,"value":418},{"type":18,"tag":26,"props":579,"children":580},{},[581],{"type":18,"tag":37,"props":582,"children":583},{},[584],{"type":24,"value":585},"# 05",{"type":18,"tag":26,"props":587,"children":588},{},[589],{"type":18,"tag":37,"props":590,"children":591},{},[592],{"type":24,"value":593},"总结",{"type":18,"tag":26,"props":595,"children":596},{},[597,599,604,606,611,613,617,619,624,626,631],{"type":24,"value":598}," \nMindSpore Transformers套件的Mcore架构升级，是一次以",{"type":18,"tag":37,"props":600,"children":601},{},[602],{"type":24,"value":603},"开发者效率",{"type":24,"value":605},"和",{"type":18,"tag":37,"props":607,"children":608},{},[609],{"type":24,"value":610},"生态兼容性",{"type":24,"value":612},"为核心的系统性工程。通过实现与Hugging Face的",{"type":18,"tag":37,"props":614,"children":615},{},[616],{"type":24,"value":102},{"type":24,"value":618},"，它消除了框架迁移的主要障碍；通过提供",{"type":18,"tag":37,"props":620,"children":621},{},[622],{"type":24,"value":623},"极简的ModuleSpec模型搭建机制",{"type":24,"value":625},"，它将开发重心从重复编码转向配置化复用搭建，减少85%+迁移工作量；通过提供",{"type":18,"tag":37,"props":627,"children":628},{},[629],{"type":24,"value":630},"高阶并行接口",{"type":24,"value":632},"，它让大模型训推实现配置化并行和接口级精度对齐。\n ",{"type":18,"tag":26,"props":634,"children":635},{},[636],{"type":24,"value":637},"这套组合方案为企业和研究机构在昇思生态上快速落地、迭代大模型提供了坚实的技术底座，使其能更敏捷地响应技术变化，将资源聚焦于模型创新与应用本身。",{"title":7,"searchDepth":639,"depth":639,"links":640},4,[],"markdown","content:technology-blogs:zh:2025-12-11.md","content","technology-blogs/zh/2025-12-11.md","technology-blogs/zh/2025-12-11","md",1776506118134]