[{"data":1,"prerenderedAt":614},["ShallowReactive",2],{"content-query-eaXmG2JqB8":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":608,"_id":609,"_source":610,"_file":611,"_stem":612,"_extension":613},"/technology-blogs/zh/3621","zh",false,"","重生之大腿带我征服 MindSpore（二）： Tensor 失忆了，请别让他被坏设备拐走了！","开场白","2025-02-18","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/f395fcd2f7e44368aacf25582d6f75dc.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":590},"root",[17,25,85,90,131,143,148,155,184,190,207,244,249,278,286,303,310,315,338,343,353,365,370,375,380,391,398,402,418,423,430,435,442,447,454,459,466,471,476,483,488,495,500,507,512,516,532,539,544,549,554,559,564,568,572,576,581,585],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"重生之大腿带我征服-mindspore二-tensor-失忆了请别让他被坏设备拐走了",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"ol",{},[29,34,65,70,75,80],{"type":18,"tag":30,"props":31,"children":32},"li",{},[33],{"type":24,"value":9},{"type":18,"tag":30,"props":35,"children":36},{},[37,39],{"type":24,"value":38},"访谈环节Tensor 为什么和设备有关\n",{"type":18,"tag":26,"props":40,"children":41},{},[42,55,60],{"type":18,"tag":30,"props":43,"children":44},{},[45,47],{"type":24,"value":46},"Tensor 在设备上的分布怎么计算\n",{"type":18,"tag":26,"props":48,"children":49},{},[50],{"type":18,"tag":30,"props":51,"children":52},{},[53],{"type":24,"value":54},"形式化表达",{"type":18,"tag":30,"props":56,"children":57},{},[58],{"type":24,"value":59},"从具体分布倒推设备矩阵",{"type":18,"tag":30,"props":61,"children":62},{},[63],{"type":24,"value":64},"问题分析实战",{"type":18,"tag":30,"props":66,"children":67},{},[68],{"type":24,"value":69},"自由提问环节",{"type":18,"tag":30,"props":71,"children":72},{},[73],{"type":24,"value":74},"大腿带躺环节",{"type":18,"tag":30,"props":76,"children":77},{},[78],{"type":24,"value":79},"免责声明",{"type":18,"tag":30,"props":81,"children":82},{},[83],{"type":24,"value":84},"致谢",{"type":18,"tag":86,"props":87,"children":88},"h2",{"id":9},[89],{"type":24,"value":9},{"type":18,"tag":91,"props":92,"children":93},"p",{},[94,96,102,104,109,111,116,118,122,124,129],{"type":24,"value":95},"【",{"type":18,"tag":97,"props":98,"children":99},"strong",{},[100],{"type":24,"value":101},"主持人致辞",{"type":24,"value":103},"】: 大家好，欢迎大家莅临本期的《重生之大腿带我征服 MindSpore》栏目。今天，我们很荣幸地邀请到并行大神-->小章哥哥，带领我们探索在 mindspore 中tensor 的分布策略，欢迎小章哥哥！！ 【小章",{"type":18,"tag":97,"props":105,"children":106},{},[107],{"type":24,"value":108},"哥哥",{"type":24,"value":110},"】: 咳咳咳，谢谢大家。 【",{"type":18,"tag":97,"props":112,"children":113},{},[114],{"type":24,"value":115},"观众",{"type":24,"value":117},"】：鼓掌！ 【小章",{"type":18,"tag":97,"props":119,"children":120},{},[121],{"type":24,"value":108},{"type":24,"value":123},"】：你这个栏目的名字为什么这么奇怪，为什么叫 《 Tensor 失忆了，请别让他被坏设备拐走了！》，我大概理解你的意思，就是有点奇怪。 【",{"type":18,"tag":97,"props":125,"children":126},{},[127],{"type":24,"value":128},"主持人",{"type":24,"value":130},"】：嗯嗯，是的，我回去看看一下。 其实是因为我是《标题党》",{"type":18,"tag":91,"props":132,"children":133},{},[134,136,141],{"type":24,"value":135},"============【",{"type":18,"tag":97,"props":137,"children":138},{},[139],{"type":24,"value":140},"快速进入正题",{"type":24,"value":142},"】 ========== 【主持人】本期栏目聚焦于在多个设备的场景，需要对某一个数据进行切分时，计算该部分数据归属于哪一个设备。也就是说，假如大家是一个 Tensor，面临着切分的情况，即需要把自己分给若干个设备，如果去错了设备，就会导致计算错误。因此，请不要让某个设备把不属于自己的 Tensor 带回家！ （强行点题）",{"type":18,"tag":86,"props":144,"children":146},{"id":145},"访谈环节",[147],{"type":24,"value":145},{"type":18,"tag":149,"props":150,"children":152},"h3",{"id":151},"tensor-为什么和设备有关",[153],{"type":24,"value":154},"Tensor 为什么和设备有关",{"type":18,"tag":91,"props":156,"children":157},{},[158,159,164,166,170,172,176,178,182],{"type":24,"value":95},{"type":18,"tag":97,"props":160,"children":161},{},[162],{"type":24,"value":163},"问题1",{"type":24,"value":165},"】：小章哥哥，在 mindspore 中，Tensor 和设备有什么关系呢？它不是就是在某台机器上申请的，为什么会和设备有关呢？ 【小章",{"type":18,"tag":97,"props":167,"children":168},{},[169],{"type":24,"value":108},{"type":24,"value":171},"】：咳（战术清嗓），是这样的，如果你是在单卡上跑程序的话，那么你的理解是对的。但是，在大模型训练的时候呢，往往一张卡是存不下这个模型下的所有 tensor 的，这个卡你就理解为设备，比如910A，910B什么的。然后呢，当你模型很大的时候，就需要对它里面的 tensor 做一些拆分，可能是某一个 tensor 不同卡上，这个叫做模型并行。也可以是连续几层的 tensor 在某一张卡，然后另外几层的 tensor 在另外一张卡，这个就叫做流水行并行。这样呢，你就要知道哪些 tensor 会被放到哪些设备上，然后把它的计算结果整合起来。 【",{"type":18,"tag":97,"props":173,"children":174},{},[175],{"type":24,"value":128},{"type":24,"value":177},"】：噢噢，就是说某一个 tensor 可能被拆分到多个设备上，这个就是模型并行。然后连续几层的tensor 被拆分到多个设备上，这个就是流水并行。 【小章",{"type":18,"tag":97,"props":179,"children":180},{},[181],{"type":24,"value":108},{"type":24,"value":183},"】：差不多吧。",{"type":18,"tag":149,"props":185,"children":187},{"id":186},"tensor-在设备上的分布怎么计算",[188],{"type":24,"value":189},"Tensor 在设备上的分布怎么计算",{"type":18,"tag":91,"props":191,"children":192},{},[193,194,199,201,205],{"type":24,"value":95},{"type":18,"tag":97,"props":195,"children":196},{},[197],{"type":24,"value":198},"问题2",{"type":24,"value":200},"】：小章哥哥，既然一个模型的 tensor 可能在多个设备上进行分布，那么某个tensor 会被分配到哪个设备上，这个应该怎么算呢？ 【小章",{"type":18,"tag":97,"props":202,"children":203},{},[204],{"type":24,"value":108},{"type":24,"value":206},"】：我举一个简单的例子吧，模型并行呢，就是说要把一个权重分开放到不同的设备上，先算一下2张卡的情况。MatMul 是现在大模型里面最常用的算子之一了，这里我就结合 matmul 来讲了。MatMul 不是有2个输入吗，我们记成 XX 和 WW, 那matmul 的输出可以记成 Y=XWY=XW。",{"type":18,"tag":91,"props":208,"children":209},{},[210,212,219,221,227,229,234,236,242],{"type":24,"value":211},"我们一共有2张卡，那就有 Y=[X1,X2][W1W2]=X1W1+X2W2Y=[X1,X2][W1W2]=X1W1+X2W2, 这里的 X 做的是列切，就是说一个shape是 ",{"type":18,"tag":213,"props":214,"children":216},"code",{"className":215},[],[217],{"type":24,"value":218},"[512,512]",{"type":24,"value":220}," 的向量，会被切成2个 ",{"type":18,"tag":213,"props":222,"children":224},{"className":223},[],[225],{"type":24,"value":226},"[512, 256]",{"type":24,"value":228},"的向量。W 做的是行切，是一个 shape 是 ",{"type":18,"tag":213,"props":230,"children":232},{"className":231},[],[233],{"type":24,"value":218},{"type":24,"value":235}," 的向量被切成shape 是 ",{"type":18,"tag":213,"props":237,"children":239},{"className":238},[],[240],{"type":24,"value":241},"[256, 512]",{"type":24,"value":243}," 的两个向量。",{"type":18,"tag":91,"props":245,"children":246},{},[247],{"type":24,"value":248},"那咱们一共两张卡，X1,X2,W1,W2X1,X2,W1,W2 怎么放呢？ 可以看到 X1,W1X1,W1 会进行计算，X2,W2X2,W2 会进行计算，那么很自然地，我们就会认为 X1,W1X1,W1 会放在 0卡，X2,W2X2,W2 就会放到 1卡。因为不可能你去别的卡拿数据，也就是做通信，然后再运算。",{"type":18,"tag":91,"props":250,"children":251},{},[252,253,257,259,263,265,269,271,276],{"type":24,"value":95},{"type":18,"tag":97,"props":254,"children":255},{},[256],{"type":24,"value":128},{"type":24,"value":258},"】：噢噢，这个懂了。但是这个是手算的，有没通用的公式或者算法啥的。 【",{"type":18,"tag":97,"props":260,"children":261},{},[262],{"type":24,"value":115},{"type":24,"value":264},"】：（内心OS：就这就这？） 【小章",{"type":18,"tag":97,"props":266,"children":267},{},[268],{"type":24,"value":108},{"type":24,"value":270},"】：（内心OS：哼哼，好戏才开始） 好的，这个大家都懂了吧，接下来咱们弄一个难一点的。 还是一个 MatMul，但是输入X 的切分策略是(2,4)，横切两刀，纵切4刀。输入W的切分是(4,2)，横切4刀，纵切两刀。那照这样呢，就需要 16 张卡对吧，那我们就把 X 和 W 的分布画一下（",{"type":18,"tag":97,"props":272,"children":273},{},[274],{"type":24,"value":275},"注意，后面会用该分布来计算坐标",{"type":24,"value":277},"）：",{"type":18,"tag":91,"props":279,"children":280},{},[281],{"type":18,"tag":282,"props":283,"children":285},"img",{"alt":7,"src":284},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/23aec8ecbacc4c4ab14cb2804e5e7560.png",[],{"type":18,"tag":91,"props":287,"children":288},{},[289,290,295,297,301],{"type":24,"value":95},{"type":18,"tag":97,"props":291,"children":292},{},[293],{"type":24,"value":294},"某观众",{"type":24,"value":296},"】：（这是弄啥嘞？）X-0 -> W-0, X-1 -> X-1, ... , 【小章",{"type":18,"tag":97,"props":298,"children":299},{},[300],{"type":24,"value":108},{"type":24,"value":302},"】：现在我们要给 X 和 W 找到对应的设备的 rank，我们先画把 rank 按顺序写出来：",{"type":18,"tag":91,"props":304,"children":305},{},[306],{"type":18,"tag":282,"props":307,"children":309},{"alt":7,"src":308},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/e617637b04094eb3baf2d368d2300c59.png",[],{"type":18,"tag":91,"props":311,"children":312},{},[313],{"type":24,"value":314},"注意到，前面我们给 XX 和 WW 设置的分布分别是（2，4），（4，2）。因为这里的 4是相关维（即对应相乘），所以可以缩写成 （2，4，2），这个就叫做设备矩阵(device-matrix). 然后我们再把对应的3列进行扩写。 X-map 就是对应3列中的前2列，W-map 就是后2列，然后我们就可以填充 X_map 和 W_map，这两个map 组装起来的值也叫做张量映射（tensor map）,维度和 device-matrix 一致。",{"type":18,"tag":91,"props":316,"children":317},{},[318,319,323,325,329,331,336],{"type":24,"value":95},{"type":18,"tag":97,"props":320,"children":321},{},[322],{"type":24,"value":294},{"type":24,"value":324},"】：神游太虚~ 【",{"type":18,"tag":97,"props":326,"children":327},{},[328],{"type":24,"value":294},{"type":24,"value":330},"】：嚯，这位爷注意力惊人呐~ 【 ",{"type":18,"tag":97,"props":332,"children":333},{},[334],{"type":24,"value":335},"===== 敲敲小黑板 =====",{"type":24,"value":337},"】",{"type":18,"tag":91,"props":339,"children":340},{},[341],{"type":24,"value":342},"好了，有了这张表呢，我们就算一下Tensor 每一个部分的位置吧。",{"type":18,"tag":91,"props":344,"children":345},{},[346,351],{"type":18,"tag":97,"props":347,"children":348},{},[349],{"type":24,"value":350},"以 X-5为例",{"type":24,"value":352},"，X的分布是（2，4），他的坐标就是（1，1），所以我们需要找到 X_map 为 （1，1）的格子，给他填上 X-5。可以看到，rank 10 和 rank 11 这里的高2维是 （1，1），所以我们 X-5 对应的格子就是 rank 10，rank 11.",{"type":18,"tag":91,"props":354,"children":355},{},[356,358,363],{"type":24,"value":357},"然后咱们再算一下 ",{"type":18,"tag":97,"props":359,"children":360},{},[361],{"type":24,"value":362},"W-3",{"type":24,"value":364},"，W-3的排列是（4，2），所以它的坐标也是（1，1），我们找到 W_map 为（1，1）的格子，分别是 rank 3和 rank11.所以W-3 放的位置就是 rank 3和 rank 11.",{"type":18,"tag":366,"props":367,"children":368},"h4",{"id":54},[369],{"type":24,"value":54},{"type":18,"tag":91,"props":371,"children":372},{},[373],{"type":24,"value":374},"那么，有没有形式化的表述呢？当然是有的。对于设备矩阵 [m1,m2,m3][m1,m2,m3], tensor 的索引 [k1,k2,k3][k1,k2,k3], 对于张量映射对应的高维 tensor，其对应的索引为 k1,k2k1,k2, 则其对应的 rank 索引为 k1∗(m2∗m3)+k2∗m3+[0...k3]k1∗(m2∗m3)+k2∗m3+[0...k3]。",{"type":18,"tag":91,"props":376,"children":377},{},[378],{"type":24,"value":379},"接下来，我们做一下验算。以X-5 为例，其坐标为 [1,1], 其索引为 1∗（4∗2）+1∗2+[0,1]=[10,11]1∗（4∗2）+1∗2+[0,1]=[10,11].",{"type":18,"tag":91,"props":381,"children":382},{},[383,385,389],{"type":24,"value":384},"类比地，对于低维 tensor，其索引为 k2,k3k2,k3, 其对应的 rank 索引为 [0−k1]∗(m2∗m3)+k2∗m3+k3[0−k1]∗(m2∗m3)+k2∗m3+k3。我们以 W-3 为例做一下演算，其坐标为 [1, 1], 则有其rank 索引为 [0,1]∗(2∗4)+1∗2+1[0,1]∗(2∗4)+1∗2+1 = [3, 11]。 【小章",{"type":18,"tag":97,"props":386,"children":387},{},[388],{"type":24,"value":108},{"type":24,"value":390},"】：当然，这里我只是做了简单的讲解，深度比较浅，有兴趣继续探索的同学可以参考者自己查阅 mindspore 的源码。 这样，我们就可以得到如下的表，得到 X 和W 每个部分在设备上的分布了",{"type":18,"tag":91,"props":392,"children":393},{},[394],{"type":18,"tag":282,"props":395,"children":397},{"alt":7,"src":396},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/8f8244b9512441eaa02e09f90ff27bb0.png",[],{"type":18,"tag":149,"props":399,"children":400},{"id":59},[401],{"type":24,"value":59},{"type":18,"tag":91,"props":403,"children":404},{},[405,406,410,412,416],{"type":24,"value":95},{"type":18,"tag":97,"props":407,"children":408},{},[409],{"type":24,"value":128},{"type":24,"value":411},"】小章哥哥！这个tensor_map 有啥用啊，目前我们都没用过呀。 【小章",{"type":18,"tag":97,"props":413,"children":414},{},[415],{"type":24,"value":108},{"type":24,"value":417},"】别急，来了（哼哼）。",{"type":18,"tag":91,"props":419,"children":420},{},[421],{"type":24,"value":422},"我们目前的格式矩阵通常都是按顺序的，例如这种排布：",{"type":18,"tag":91,"props":424,"children":425},{},[426],{"type":18,"tag":282,"props":427,"children":429},{"alt":7,"src":428},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/36285eaf56b14b35a59afee8944c000c.png",[],{"type":18,"tag":91,"props":431,"children":432},{},[433],{"type":24,"value":434},"它对应的设备矩阵排布如下：",{"type":18,"tag":91,"props":436,"children":437},{},[438],{"type":18,"tag":282,"props":439,"children":441},{"alt":7,"src":440},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/877fb3914d3646e7b1326136d7eeca35.png",[],{"type":18,"tag":91,"props":443,"children":444},{},[445],{"type":24,"value":446},"当然，他也可以做到这样的分布，调转 tensor_map 从（1,0） 到（0,1）：",{"type":18,"tag":91,"props":448,"children":449},{},[450],{"type":18,"tag":282,"props":451,"children":453},{"alt":7,"src":452},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/8f08c517835d49cbaf4dfbf35e6c96a8.png",[],{"type":18,"tag":91,"props":455,"children":456},{},[457],{"type":24,"value":458},"对应的表格如下：",{"type":18,"tag":91,"props":460,"children":461},{},[462],{"type":18,"tag":282,"props":463,"children":465},{"alt":7,"src":464},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/b8c49adf5a724b77b2b1abe57b39e01a.png",[],{"type":18,"tag":91,"props":467,"children":468},{},[469],{"type":24,"value":470},"由于这里的 tensor-map 是(0,1) 与 device matrix 的默认编号(1,0) 不同，所以X_map 这里需要做一个映射。因此， X_map 这一列的值与设备矩阵最右侧的排列是相反的。",{"type":18,"tag":91,"props":472,"children":473},{},[474],{"type":24,"value":475},"那假如，我们想要任意分布的情况，例如如下的情况，怎么做到呢？",{"type":18,"tag":91,"props":477,"children":478},{},[479],{"type":18,"tag":282,"props":480,"children":482},{"alt":7,"src":481},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/d240abb6cd4f4b0ba47743c78a79edc2.png",[],{"type":18,"tag":91,"props":484,"children":485},{},[486],{"type":24,"value":487},"依葫芦画瓢，发现写出的设备矩阵排布是这样的，但是我们无法得到一个 tensor map 来做形式化表达！ 例如 X-4 我们期望他在 rank_1，但是 (0,1) 无论怎么调换顺序，都无法得到 4, 只能得到 0∗2+1∗1=10∗2+1∗1=1 或者 1∗2+1∗0=21∗2+1∗0=2 这两种结果。 【注】：这里代表tensor map 的高低维切换。",{"type":18,"tag":91,"props":489,"children":490},{},[491],{"type":18,"tag":282,"props":492,"children":494},{"alt":7,"src":493},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/114bc6a8e48f420088b5d4eb4f68427c.png",[],{"type":18,"tag":91,"props":496,"children":497},{},[498],{"type":24,"value":499},"那两维做不到，那我们试试三维？",{"type":18,"tag":91,"props":501,"children":502},{},[503],{"type":18,"tag":282,"props":504,"children":506},{"alt":7,"src":505},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/5ccc10ca44604ac681768daf8926b278.png",[],{"type":18,"tag":91,"props":508,"children":509},{},[510],{"type":24,"value":511},"诶，是不是可以？我们来看看 X_re_map 的实现，这个和我们默认的tensor map 的顺序就是调换了一下高维和低维的顺序。也就是说，通过tensor_map 从（2,1,0）变化到 (0,1,2)，就可以找到特定排布对应的设备矩阵和tensor map。",{"type":18,"tag":149,"props":513,"children":514},{"id":64},[515],{"type":24,"value":64},{"type":18,"tag":91,"props":517,"children":518},{},[519,520,524,526,530],{"type":24,"value":95},{"type":18,"tag":97,"props":521,"children":522},{},[523],{"type":24,"value":128},{"type":24,"value":525},"】：振章哥哥，目前大家应该消化好了相关内容，有没有相关的问题可以让大家实践一下呢？ 【小章",{"type":18,"tag":97,"props":527,"children":528},{},[529],{"type":24,"value":108},{"type":24,"value":531},"】：嗯，我也觉得可以。这里有一个切分策略的问题，看看大家能不能看出来什么问题。",{"type":18,"tag":91,"props":533,"children":534},{},[535],{"type":18,"tag":282,"props":536,"children":538},{"alt":7,"src":537},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/02/21/489cc4358c9240198d533b215245d847.png",[],{"type":18,"tag":91,"props":540,"children":541},{},[542],{"type":24,"value":543},"【现场】：很安静，只有笔在纸上摩擦的声音。。 【观众1】：（喃喃自语）matmul 的策略是 (1,8),(8,1). rank list 怎么是 0, 12, 24, ... ？ 【观众2】：这不是 8卡吧？ 超纲了~",{"type":18,"tag":91,"props":545,"children":546},{},[547],{"type":24,"value":548},"【小章哥哥】：我看大家都弄得差不多了，我们一起看下有什么问题。 这里的报错主要是创建通信组失败。可以看到根因是 matmul 算子初始化失败，他的策略配置的是(1,8),(8,1).按照预期来说，matmul 配置列切时，其只在单机内走 AllReduce，也就是说它对应的 rank_list 应该是{0-7}。",{"type":18,"tag":91,"props":550,"children":551},{},[552],{"type":24,"value":553},"并且，如果配置了dp、pp等策略的话，tp的优先级也会是最高的，这里的优先级指的是单机内的分配顺序。例如 dp=tp=pp=2 时候，tp 通信组共有4个，分贝是{0,1}, {2,3}, {4,5}, {6,7}。 dp 的通信组是{0,2}, {1,3}, {4,6}, {5,7}.",{"type":18,"tag":91,"props":555,"children":556},{},[557],{"type":24,"value":558},"只有在重复计算的情况下，tp的优先级才会变低。重复计算就是说，你有1个模型，但是你设备没有用满，导致不同的设备做重复的计算。最简单的就是 4卡的情况，matmul 配置的策略是（1，2），（2，1）,这样呢，就会导致{0,2},{1,3} 卡做重复计算。",{"type":18,"tag":91,"props":560,"children":561},{},[562],{"type":24,"value":563},"回到我们的问题，这里的原因可能是配置了96卡，但是只配置了（1，8),(8,1) 的策略，导致 tp 对应的allreduce 通信域跨机，我们把策略改成(12,8),(8,1) 就可以了。",{"type":18,"tag":86,"props":565,"children":566},{"id":69},[567],{"type":24,"value":69},{"type":18,"tag":86,"props":569,"children":570},{"id":74},[571],{"type":24,"value":74},{"type":18,"tag":86,"props":573,"children":574},{"id":79},[575],{"type":24,"value":79},{"type":18,"tag":91,"props":577,"children":578},{},[579],{"type":24,"value":580},"本文中提到的人物对话【纯属虚构】，如有雷同，纯属巧合，请【勿对号入座】。",{"type":18,"tag":86,"props":582,"children":583},{"id":84},[584],{"type":24,"value":84},{"type":18,"tag":91,"props":586,"children":587},{},[588],{"type":24,"value":589},"感谢本期嘉宾：小章哥哥 感谢本栏目的常驻嘉宾：bin总。 感谢以下工作人员：我自己。 感谢观众。。。",{"title":7,"searchDepth":591,"depth":591,"links":592},4,[593,595,604,605,606,607],{"id":9,"depth":594,"text":9},2,{"id":145,"depth":594,"text":145,"children":596},[597,599,602,603],{"id":151,"depth":598,"text":154},3,{"id":186,"depth":598,"text":189,"children":600},[601],{"id":54,"depth":591,"text":54},{"id":59,"depth":598,"text":59},{"id":64,"depth":598,"text":64},{"id":69,"depth":594,"text":69},{"id":74,"depth":594,"text":74},{"id":79,"depth":594,"text":79},{"id":84,"depth":594,"text":84},"markdown","content:technology-blogs:zh:3621.md","content","technology-blogs/zh/3621.md","technology-blogs/zh/3621","md",1776506132371]