[{"data":1,"prerenderedAt":497},["ShallowReactive",2],{"content-query-vTotdlchLw":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":491,"_id":492,"_source":493,"_file":494,"_stem":495,"_extension":496},"/technology-blogs/zh/3672","zh",false,"","支持GRPO强化学习训练全流程，揭秘MindSpore RLHF套件技术细节","近年来强化学习与大模型技术的融合日益深化，在此背景下，如何高效管理训练过程中的优化器激活值与权重释放，以及实现训练权重向推理阶段的转换，已成为强化学习中的必备技术。","2025-03-31","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/bd46dc39e8954df4b4b04ad74530a187.png","technology-blogs","实践",{"type":15,"children":16,"toc":479},"root",[17,25,31,36,41,66,84,92,97,102,110,115,120,125,130,135,140,148,153,158,163,168,173,178,183,188,193,198,208,213,225,240,245,252,257,262,269,274,289,294,299,306,311,316,321,328,333,338,343,350,355,360,365,372,377,382,387,402,421,428,433,438,451,456,461,466,471],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"支持grpo强化学习训练全流程揭秘mindspore-rlhf套件技术细节",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"近年来强化学习与大模型技术的融合日益深化，在此背景下，如何高效管理训练过程中的优化器激活值与权重释放，以及实现训练权重向推理阶段的转换，已成为强化学习中的必备技术。此外，利用SPMD架构的vLLM加速技术进行推理优化，也已成为提升系统整体性能的重要策略。",{"type":18,"tag":26,"props":32,"children":33},{},[34],{"type":24,"value":35},"不久之前，MindSpore携手鹏城实验室发布并开源了基于Qwen2.5（7B，32B）模型的GRPO强化学习训练全流程和代码，实现了组件化解耦训练流程与模型定义，通过训推共部署、训练和推理权重在线快速自动重排、异构内存Swap等技术，成功构建从硬件算力、算法优化到集群调度的完整技术链条。",{"type":18,"tag":26,"props":37,"children":38},{},[39],{"type":24,"value":40},"本次将围绕MindSpore RLHF套件中的关键技术进行进一步的分析，重点解读以下特性：",{"type":18,"tag":42,"props":43,"children":44},"ul",{},[45,51,56,61],{"type":18,"tag":46,"props":47,"children":48},"li",{},[49],{"type":24,"value":50},"训推共部署",{"type":18,"tag":46,"props":52,"children":53},{},[54],{"type":24,"value":55},"多维混合并行在线快速自动重排",{"type":18,"tag":46,"props":57,"children":58},{},[59],{"type":24,"value":60},"异构内存Swap",{"type":18,"tag":46,"props":62,"children":63},{},[64],{"type":24,"value":65},"支持vLLM",{"type":18,"tag":67,"props":68,"children":70},"h3",{"id":69},"_01-grpo的算法流程实现",[71,77,79],{"type":18,"tag":72,"props":73,"children":74},"strong",{},[75],{"type":24,"value":76},"# 01",{"type":24,"value":78}," ",{"type":18,"tag":72,"props":80,"children":81},{},[82],{"type":24,"value":83},"GRPO的算法流程实现",{"type":18,"tag":26,"props":85,"children":86},{},[87],{"type":18,"tag":88,"props":89,"children":91},"img",{"alt":7,"src":90},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/179455ab85c140fdb7bc37d14038f80f.png",[],{"type":18,"tag":26,"props":93,"children":94},{},[95],{"type":24,"value":96},"图 1：GRPO算法示意图",{"type":18,"tag":26,"props":98,"children":99},{},[100],{"type":24,"value":101},"GRPO（Group Relative Policy Optimization，组相对策略优化）是DeepSeek针对数学等逻辑推理任务提出的强化学习优化算法。通过GRPO算法的大规模后训练得到的DeepSeek R1-Zero和DeepSeek R1模型在逻辑推理能力上得到了显著提升，涌现出了长思维链和反思等深度思考能力，其在数学和编程任务上的表现已超越或媲美OpenAI o1系列模型。",{"type":18,"tag":26,"props":103,"children":104},{},[105],{"type":18,"tag":72,"props":106,"children":107},{},[108],{"type":24,"value":109},"从监督学习到强化学习",{"type":18,"tag":26,"props":111,"children":112},{},[113],{"type":24,"value":114},"在监督学习（Supervised Learning）中，模型通过“模仿”海量文本数据中的统计规律，学习预测下一个token。这种模式下，模型的目标是尽可能“复现”已有数据的分布，但缺乏主动优化的方向性。而强化学习（Reinforcement Learning, RL）则不同，它让模型（称为“智能体”）在动态环境中通过“试错”学习，根据反馈信号（奖励）自主探索最佳策略，这类似于人类通过实践反馈调整行为的过程。",{"type":18,"tag":26,"props":116,"children":117},{},[118],{"type":24,"value":119},"在RL框架中，三个核心要素构成闭环：",{"type":18,"tag":26,"props":121,"children":122},{},[123],{"type":24,"value":124},"* 环境（Environment）：模型交互的对象（例如用户提问的输入、代码执行结果反馈）。",{"type":18,"tag":26,"props":126,"children":127},{},[128],{"type":24,"value":129},"* 动作（Action）：模型对环境做出的反应（例如生成一段回答或代码）。",{"type":18,"tag":26,"props":131,"children":132},{},[133],{"type":24,"value":134},"* 奖励（Reward）：一个标量函数，量化动作的质量（例如答案正确性评分或人工偏好打分）。",{"type":18,"tag":26,"props":136,"children":137},{},[138],{"type":24,"value":139},"模型的目标是学习一个策略网络（Policy Network）——以LLM为例，其输出的token概率被视为一种“动作选择策略”。通过不断与环境交互，策略网络会逐渐倾向于选择能获得更高奖励的动作（例如生成更符合人类价值观或者与标准答案一致的回答）。这一过程的关键在于策略梯度（Policy Gradient） ：用奖励信号作为权重，反向传播调整策略网络的参数，类似于传统LLM训练中的反向传播，但梯度来源于环境反馈而非固定标签。",{"type":18,"tag":26,"props":141,"children":142},{},[143],{"type":18,"tag":72,"props":144,"children":145},{},[146],{"type":24,"value":147},"GRPO算法的突破：用“相对比较”替代“价值模型”，用“规则奖励”替代“奖励模型”",{"type":18,"tag":26,"props":149,"children":150},{},[151],{"type":24,"value":152},"传统RLHF算法（如PPO）需要同时训练策略网络，一个独立的价值模型（Value Network）来估计奖励期望值，和一个独立的奖励模型（Reward model）来给予实际奖励值，这类似于在LLM旁挂载一个“评分器”模型和一个用来模拟真实环境的“奖励模拟器”模型。但这种方法存在三个问题：",{"type":18,"tag":26,"props":154,"children":155},{},[156],{"type":24,"value":157},"* 训练复杂度高，需训练三个模型，平衡其训练目标。",{"type":18,"tag":26,"props":159,"children":160},{},[161],{"type":24,"value":162},"* 价值模型对奖励的绝对数值敏感，容易因奖励尺度变化导致训练不稳定。",{"type":18,"tag":26,"props":164,"children":165},{},[166],{"type":24,"value":167},"* 作为“奖励模拟器”的奖励模型并不能完全反映真实环境奖励（如人类价值观或数学问题的回答是否正确），存在被“欺骗”（reward hacking）的风险。",{"type":18,"tag":26,"props":169,"children":170},{},[171],{"type":24,"value":172},"DeepSeek提出的GRPO创新性地引入“组内相对比较”机制，并采取基于规则的奖励：",{"type":18,"tag":26,"props":174,"children":175},{},[176],{"type":24,"value":177},"* 组（Group） ：将同一输入生成的多条候选输出（例如4-8条回答）视为一组。",{"type":18,"tag":26,"props":179,"children":180},{},[181],{"type":24,"value":182},"* 相对价值：不再依赖价值模型，而是计算组内样本间的相对优劣关系（例如A比B好，B比C差）。",{"type":18,"tag":26,"props":184,"children":185},{},[186],{"type":24,"value":187},"* 基于规则的奖励：采用基于规则的显式奖励函数（如输出回答与标准答案的最终答案是否一致，输出代码是否通过所有测试用例）而不是一个奖励模型。",{"type":18,"tag":26,"props":189,"children":190},{},[191],{"type":24,"value":192},"这样的设计消除了对独立价值模型的依赖，直接利用同一批次样本中不同策略输出的相对优势评估，既降低了显存占用，又避免了因价值模型预测偏差引发的训练波动，显著提升了训练稳定性。同时，GRPO采用基于规则的显式奖励函数，而非奖励模型。这种方法减少了奖励函数被对抗性样本“欺骗”（reward hacking）的风险，使奖励信号更透明、可解释且与任务目标强对齐。",{"type":18,"tag":26,"props":194,"children":195},{},[196],{"type":24,"value":197},"MindSpore RLHF内部通过实现GRPOTrainer、GRPOConfig进行GRPO算法的控制和流程的实现。具体的运行脚本如下：",{"type":18,"tag":199,"props":200,"children":202},"pre",{"code":201},"\nmsrun --worker_num=8 --local_worker_num=8 --master_addr=127.0.0.1 \\\n    --master_port=9190 --join=False --log_dir=./qwen2_5_one_log \\\n    examples/grpo/qwen_grpo_tutorial/grpo_one_stage.py \\\n    --\n    sft_path_infer ./model_configs/qwen_grpo/predict_qwen2_5_7b_instruct.yaml \n    \\\n    --sft_path_train ./model_configs/qwen_grpo/finetune_qwen2_5_7b.yaml \\\n    --vocab_path /{path}/vocab.json \\\n    --merges_file_path /{path}/merges.txt \\\n    --mind_dataset_dir /{path}/gsm8k_train.mindrecord \\\n    --save_data_file /{path}/grpo.mindrecord \\\n    --save_ckpt_dir /{path}/save_ckpt \\\n    --use_parallel True \\\n    --load_sft_checkpoint_infer /{path}/infer_ckpt \\\n    --load_sft_checkpoint_train /{path}/train_ckpt \\\n    --load_ref_checkpoint /{path}/ref_ckpt \\\n    --enable_compile_cache False\n",[203],{"type":18,"tag":204,"props":205,"children":206},"code",{"__ignoreMap":7},[207],{"type":24,"value":201},{"type":18,"tag":26,"props":209,"children":210},{},[211],{"type":24,"value":212},"具体的配置和参数说明可以参考：",{"type":18,"tag":26,"props":214,"children":215},{},[216],{"type":18,"tag":217,"props":218,"children":222},"a",{"href":219,"rel":220},"https://gitee.com/mindspore/mindrlhf/blob/master/examples/grpo/qwen%5C_grpo%5C_tutorial/README.md",[221],"nofollow",[223],{"type":24,"value":224},"https://gitee.com/mindspore/mindrlhf/blob/master/examples/grpo/qwen\\_grpo\\_tutorial/README.md",{"type":18,"tag":67,"props":226,"children":228},{"id":227},"_02-细粒度的模型参数offloadload机制",[229,234,235],{"type":18,"tag":72,"props":230,"children":231},{},[232],{"type":24,"value":233},"# 02",{"type":24,"value":78},{"type":18,"tag":72,"props":236,"children":237},{},[238],{"type":24,"value":239},"细粒度的模型参数offload/load机制",{"type":18,"tag":26,"props":241,"children":242},{},[243],{"type":24,"value":244},"offload-load特性通过动态调度模型参数的存储位置，优化NPU显存使用：在生成阶段，仅保留推理模型（Infer Model）和参考模型（Reference Model）的参数在NPU上以加速推理计算，而将训练模型（Train Model）的参数卸载到CPU内存中；在训练阶段，则反向操作——将推理和参考模型的参数移至CPU，仅保留训练模型的参数在NPU上以支持梯度计算和参数更新。这种按需分配策略利用NPU的计算效率同时规避其显存限制，通过“热切换”模型参数的位置（NPU/CPU），显著降低峰值显存占用（例如从同时驻留三个模型降至仅一个），从而支持更大规模的模型训练或更高批次的并行处理，尤其适用于显存资源紧张的硬件环境。",{"type":18,"tag":26,"props":246,"children":247},{},[248],{"type":18,"tag":88,"props":249,"children":251},{"alt":7,"src":250},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/c120af7cf5ff4631aab3cf97becce619.png",[],{"type":18,"tag":26,"props":253,"children":254},{},[255],{"type":24,"value":256},"图 2权重卸载加载示意图",{"type":18,"tag":26,"props":258,"children":259},{},[260],{"type":24,"value":261},"在MindSpore RLHF提供的细粒度参数offload/load机制使能后，整个GRPO的显存占用几乎和微调、预训练的显存占用相同，如下图所示。通过MindSpore RLHF的细粒度的模型参数offload/load机制，假如单模型在单机8卡可以完成训练流程，那么同样的配置的情况下，MindSpore RLHF中整个GRPO流程的训练(包含训练、推理、权重在线重排等部分)也只需要单机8卡即可。",{"type":18,"tag":26,"props":263,"children":264},{},[265],{"type":18,"tag":88,"props":266,"children":268},{"alt":7,"src":267},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/a167c3dc60224f298cfbabea08583c9d.png",[],{"type":18,"tag":26,"props":270,"children":271},{},[272],{"type":24,"value":273},"图3 通过细粒度的权重重排优化前后的内存变化",{"type":18,"tag":67,"props":275,"children":277},{"id":276},"_03-多维混合并行的权重在线重排布",[278,283,284],{"type":18,"tag":72,"props":279,"children":280},{},[281],{"type":24,"value":282},"# 03",{"type":24,"value":78},{"type":18,"tag":72,"props":285,"children":286},{},[287],{"type":24,"value":288},"多维混合并行的权重在线重排布",{"type":18,"tag":26,"props":290,"children":291},{},[292],{"type":24,"value":293},"训练和推理在执行时的并行策略一般不同，导致它们的权重切分策略也会不相同，因此没有办法直接将训练的权重赋给推理。传统方法中，会将训练权重先落盘，然后离线的将训练的权重排布倒换成推理的排布，推理加载倒换后的ckpt再进行推理。这会导致训练和推理被分割成了两个进程，性能无法达到最优。",{"type":18,"tag":26,"props":295,"children":296},{},[297],{"type":24,"value":298},"因此MindSpore RLHF套件实现了训练权重排布到推理排布的在线转换，并能够实时更新至推理网络。该套件支持多种主流并行策略间的灵活转换，包括数据并行（DP）、模型并行（MP）、零冗余优化（ZeRO）、专家并行（EP）、长序列并行（CP）以及流水线并行（PP）等。下文将详细阐述几种典型的转换场景，并分析转换过程中涉及的通信。",{"type":18,"tag":26,"props":300,"children":301},{},[302],{"type":18,"tag":88,"props":303,"children":305},{"alt":7,"src":304},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/48fd15cac0f34323a33b46cb85cb2ce3.png",[],{"type":18,"tag":26,"props":307,"children":308},{},[309],{"type":24,"value":310},"图 4流水线并行的倒换",{"type":18,"tag":26,"props":312,"children":313},{},[314],{"type":24,"value":315},"图4为流水线并行场景下的权重倒换，训练策略为dp1mp2pp2，推理策略为dp2mp2pp1.由于mp的切分策略一致，因此不需要进行mp间的倒换，只需要进行pp间的聚合。",{"type":18,"tag":26,"props":317,"children":318},{},[319],{"type":24,"value":320},"在MindSpore RLHF套件中，rank2上的参数param10通过AllReduce算子与rank0上的zero tensor进行聚合，从而将param10的权重数据聚合到rank0上，完成rank0上的pp间权重聚合。类似地，其他rank也通过AllReduce算子将对应位置的权重数据进行聚合，确保各rank上的权重数据在推理阶段能够正确分布和加载。",{"type":18,"tag":26,"props":322,"children":323},{},[324],{"type":18,"tag":88,"props":325,"children":327},{"alt":7,"src":326},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/7092d7ae14f0403cb6a9755e77e7c314.png",[],{"type":18,"tag":26,"props":329,"children":330},{},[331],{"type":24,"value":332},"图 5模型并行的倒换",{"type":18,"tag":26,"props":334,"children":335},{},[336],{"type":24,"value":337},"图5为模型并行场景下的权重倒换，训练策略为dp1mp4pp1，推理策略为dp2mp2pp1.因为pp的切分策略一致，所以仅需要进行mp间的倒换，不需要pp间的聚合。",{"type":18,"tag":26,"props":339,"children":340},{},[341],{"type":24,"value":342},"mp4倒换到mp2会在rank0-rank3之间做两次AllGather通信让每张卡都获得一份完整的权重，然后每张卡通过StridedSlice获得自己需要的那一份权重。",{"type":18,"tag":26,"props":344,"children":345},{},[346],{"type":18,"tag":88,"props":347,"children":349},{"alt":7,"src":348},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/1c3eeacab1264b578ee4c243f324f0ce.png",[],{"type":18,"tag":26,"props":351,"children":352},{},[353],{"type":24,"value":354},"图 6长序列并行的倒换",{"type":18,"tag":26,"props":356,"children":357},{},[358],{"type":24,"value":359},"图6为长序列并行的倒换，训练策略为dp1mp2cp2pp1，推理策略为dp2mp2pp1.训练需要在cp维度做聚合，从而实现从训练到推理的倒换。",{"type":18,"tag":26,"props":361,"children":362},{},[363],{"type":24,"value":364},"mp2cp2倒换到mp2会首先对rank0-3的数据做AllConcat和AllGather，其中AllConcat的意思是在非0轴做一次聚合，比如将param00和param01在1轴做拼接，最终每张卡获得一份完整的权重。然后各自通过StridedSlice获得自己需要的那一份权重。",{"type":18,"tag":26,"props":366,"children":367},{},[368],{"type":18,"tag":88,"props":369,"children":371},{"alt":7,"src":370},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/9d409e1bb6f046608ce8c2e911192752.png",[],{"type":18,"tag":26,"props":373,"children":374},{},[375],{"type":24,"value":376},"图 7专家并行的倒换",{"type":18,"tag":26,"props":378,"children":379},{},[380],{"type":24,"value":381},"图7为专家并行的倒换，训练策略为dp1mp2ep2pp1，推理策略为dp2mp2pp1.训练需要在ep维度做聚合，从而实现从训练到推理的倒换。",{"type":18,"tag":26,"props":383,"children":384},{},[385],{"type":24,"value":386},"mp2ep2倒换到mp2仅性需要进行一次rank0和rank2或者rank1和rank3的AllGather即可。每个rank上，0轴的ep维度被汇聚后，训练权重倒换成了推理权重的排布。",{"type":18,"tag":67,"props":388,"children":390},{"id":389},"_04-推理和训练共进程",[391,396,397],{"type":18,"tag":72,"props":392,"children":393},{},[394],{"type":24,"value":395},"# 04",{"type":24,"value":78},{"type":18,"tag":72,"props":398,"children":399},{},[400],{"type":24,"value":401},"推理和训练共进程",{"type":18,"tag":26,"props":403,"children":404},{},[405,407,412,414,419],{"type":24,"value":406},"训练和推理模型需要做",{"type":18,"tag":72,"props":408,"children":409},{},[410],{"type":24,"value":411},"权重重排同步",{"type":24,"value":413},"（如图1流程），为了传输的代价最小，训练和推理得部署在同一个卡集合上。共进程可以更好的使能",{"type":18,"tag":72,"props":415,"children":416},{},[417],{"type":24,"value":418},"hccl的高速互联",{"type":24,"value":420},"能力。",{"type":18,"tag":26,"props":422,"children":423},{},[424],{"type":18,"tag":88,"props":425,"children":427},{"alt":7,"src":426},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/04/03/54d25e8e8dcb406f91f1c07c5af36161.png",[],{"type":18,"tag":26,"props":429,"children":430},{},[431],{"type":24,"value":432},"图 8 MindSpore RLHF训推共进程流程示意图",{"type":18,"tag":26,"props":434,"children":435},{},[436],{"type":24,"value":437},"MindSpore RLHF中为了进行推理部分的加速，使用了vLLM。vLLM 是一个专为LLM设计的高效推理和服务框架，通过优化内存管理和注意力机制，可以显著提升LLM的推理速度、吞吐量和资源利用率，其最重要的两个优化点为：",{"type":18,"tag":42,"props":439,"children":440},{},[441,446],{"type":18,"tag":46,"props":442,"children":443},{},[444],{"type":24,"value":445},"**PagedAttention：**传统LLM推理时，KV cache占用大量显存，且因序列长度可变导致内存碎片化。PA将KVcache分块为固定大小的“页”，按需动态分配和释放显存。显存利用率提升至99%以上，且能支持更长上下文。",{"type":18,"tag":46,"props":447,"children":448},{},[449],{"type":24,"value":450},"**Continuous Batching：**实时合并多个请求的输入，无需等待固定批次大小。可以提高GPU利用率，减少等待时间。",{"type":18,"tag":26,"props":452,"children":453},{},[454],{"type":24,"value":455},"在具体实现的过程中，MIndSpore RLHF结合了vLLM最新版本的主要特性，主要做了以下两个方面的结合：",{"type":18,"tag":26,"props":457,"children":458},{},[459],{"type":24,"value":460},"1、 由于vllm0.7版本以下的版本都是子进程启动推理服务，为了实现训推共进程，需要做一些定制化适配和封装。",{"type":18,"tag":26,"props":462,"children":463},{},[464],{"type":24,"value":465},"2、 主要是继承了原始的vllm，以支持所需要的一些功能，比如取出特定计算结果、sync/offload params，weight loader的兼容等；",{"type":18,"tag":26,"props":467,"children":468},{},[469],{"type":24,"value":470},"当前MindSpore RLHF结合vLLM的训练脚本和微调脚本类似，我们增加了use_vllm来控制vllm功能的开关。训练Qwen2.5 7B，单机8卡的指令如下：",{"type":18,"tag":199,"props":472,"children":474},{"code":473},"msrun --worker_num=8 --local_worker_num=8 \\\n  --master_addr=127.0.0.1 --master_port=9188 \\\n  --join=True --log_dir=./qwen2_vllm_log \\\n  examples/qwen_grpo_tutorial/grpo_one_stage.py \\\n  --sft_path_infer./model_configs/qwen_grpo/predict_qwen2_7b_instruct.yaml \\\n  --sft_path_train ./model_configs/qwen_grpo/finetune_qwen2_7b.yaml \\\n  --vocab_path /path/to/vocab.json \\\n  --merges_file_path /path/to/merges.txt \\\n  --mind_dataset_dir /path/to/cvalues_one_1024.mindrecord \\\n  --save_data_file /path/to/grpo_1024.mindrecord \\\n  --save_ckpt_dir /path/to/ckpt/train \\\n  --use_parallel True \\\n  --enable_compile_cache False \\\n  --load_sft_checkpoint_infer \"/path/to/ckpt/infer\" \\\n  --load_sft_checkpoint_train \"/path/to/ckpt/train\" \\\n  --use_vllm 1 \\\n  --hf_config_path \"/path/to/config.json\"\n",[475],{"type":18,"tag":204,"props":476,"children":477},{"__ignoreMap":7},[478],{"type":24,"value":473},{"title":7,"searchDepth":480,"depth":480,"links":481},4,[482,485,487,489],{"id":69,"depth":483,"text":484},3,"# 01 GRPO的算法流程实现",{"id":227,"depth":483,"text":486},"# 02 细粒度的模型参数offload/load机制",{"id":276,"depth":483,"text":488},"# 03 多维混合并行的权重在线重排布",{"id":389,"depth":483,"text":490},"# 04 推理和训练共进程","markdown","content:technology-blogs:zh:3672.md","content","technology-blogs/zh/3672.md","technology-blogs/zh/3672","md",1776506133072]