[{"data":1,"prerenderedAt":325},["ShallowReactive",2],{"content-query-wIn3WYmG4Q":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":319,"_id":320,"_source":321,"_file":322,"_stem":323,"_extension":324},"/news/zh/450","zh",false,"","盘古α：华为联合鹏城实验室开源业界首个2000亿参数中文预训练模型","由华为诺亚方舟实验室、华为中央软件院、鹏城实验室以及北京大学相关技术团队组建的中文超大规模预训练语言「盘古α」联合攻关团队，首次基于“鹏城云脑Ⅱ”和国产MindSpore框架的自动混合并行模式实现在2048卡算力集群上的大规模分布式训练，训练出业界首个2000亿超大参数中文预训练模型“盘古α”。","2021-04-26","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/26/a9b3c4aee8c7427ba4b73b3729b49e29.png","news",{"type":14,"children":15,"toc":310},"root",[16,24,30,43,53,64,75,80,85,96,106,111,116,121,126,131,139,144,150,155,162,167,172,177,184,190,195,200,207,212,217,224,229,236,241,248,253,258,263,273,278,283,288,299],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"盘古α华为联合鹏城实验室开源业界首个2000亿参数中文预训练模型",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":23,"value":29},"4月26日，华为诺亚方舟实验室与中央软件院Mindspore团队等多部门，同鹏城实验室、北大等联合发布「盘古α」，这是业界首个2000亿参数中文预训练模型！",{"type":17,"tag":25,"props":31,"children":32},{},[33,35],{"type":23,"value":34},"代码地址：",{"type":17,"tag":36,"props":37,"children":41},"a",{"href":38,"rel":39},"https://gitee.com/mindspore/mindspore/tree/r1.2/model_zoo/official/nlp/pangu_alpha",[40],"nofollow",[42],{"type":23,"value":38},{"type":17,"tag":25,"props":44,"children":45},{},[46],{"type":17,"tag":47,"props":48,"children":50},"span",{"style":49},"font-family: ;",[51],{"type":23,"value":52}," @font-face{ font-family:\"Times New Roman\"; } @font-face{ font-family:\"宋体\"; } @font-face{ font-family:\"Calibri\"; } p.MsoNormal{ mso-style-name:正文; mso-style-parent:\"\"; margin:0pt; margin-bottom:.0001pt; mso-pagination:none; text-align:justify; text-justify:inter-ideograph; font-family:Calibri; mso-fareast-font-family:宋体; mso-bidi-font-family:'Times New Roman'; font-size:10.5000pt; mso-font-kerning:1.0000pt; } span.10{ font-family:Calibri; } span.15{ font-family:Calibri; color:rgb(0,0,255); text-decoration:underline; text-underline:single; } span.msoIns{ mso-style-type:export-only; mso-style-name:\"\"; text-decoration:underline; text-underline:single; color:blue; } span.msoDel{ mso-style-type:export-only; mso-style-name:\"\"; text-decoration:line-through; color:red; } @page{mso-page-border-surround-header:no; mso-page-border-surround-footer:no;}@page Section0{ } div.Section0{page:Section0;}",{"type":17,"tag":25,"props":54,"children":55},{},[56,58],{"type":23,"value":57},"详细技术报告Arxiv：",{"type":17,"tag":36,"props":59,"children":62},{"href":60,"rel":61},"https://arxiv.org/",[40],[63],{"type":23,"value":60},{"type":17,"tag":25,"props":65,"children":66},{},[67,69],{"type":23,"value":68},"代码/模型OpenI启智开源地址：",{"type":17,"tag":36,"props":70,"children":73},{"href":71,"rel":72},"https://git.openi.org.cn/PCL-Platform.Intelligence/PanGu-AIpha",[40],[74],{"type":23,"value":71},{"type":17,"tag":25,"props":76,"children":77},{},[78],{"type":23,"value":79},"由华为诺亚方舟实验室、华为中央软件院、鹏城实验室以及北京大学相关技术团队组建的中文超大规模预训练语言「盘古α」联合攻关团队，首次基于“鹏城云脑Ⅱ”和国产MindSpore框架的自动混合并行模式实现在2048卡算力集群上的大规模分布式训练，训练出业界首个2000亿超大参数中文预训练模型“盘古α”。 盘古α引入随机词序生成，增加预训练难度，提升模型能力。引入预测模块（Predictor），预训练阶段通过位置向量诱导输出。同时支持理解和生成任务，相比于GPT，盘古α模型设计阶段就考虑了其持续学习演化的能力，一是为了节省计算资源，还支持从顺序自回归模型过渡到随机词序自回归模型的增量训练，不同阶段的持续学习能力让模型具备随机词序的生成，具备更强的NLU能力。 不仅如此，盘古α在模型设计上还引入硬件亲和概念，是算法设计协同华为全栈式软硬件生态（MindSpore+CANN+昇腾910+ModelArts）性能和实力的一次完美亮相，牵引了超大规模自动化并行训练技术走向成熟，是国产全栈式AI基础设施支持2000亿级超大规模语言模型训练的第1次，验证了国产E级智算平台在软硬件协同优化、大规模分布式并行训练等核心关键技术的可行性，形成了国产自主可控的通用超大规模分布式训练基座及相关核心技术。",{"type":17,"tag":25,"props":81,"children":82},{},[83],{"type":23,"value":84},"盘古α模型支持丰富的场景应用，在知识问答、知识检索、知识推理、阅读理解等文本生成领域表现突出，例如：",{"type":17,"tag":25,"props":86,"children":87},{},[88,94],{"type":17,"tag":89,"props":90,"children":91},"strong",{},[92],{"type":23,"value":93},"Input:",{"type":23,"value":95}," 中国和美国和日本和法国和加拿大和澳大利亚的首都分别是哪里？",{"type":17,"tag":25,"props":97,"children":98},{},[99,104],{"type":17,"tag":89,"props":100,"children":101},{},[102],{"type":23,"value":103},"Generate:",{"type":23,"value":105}," 中国的首都是北京,美国的首都是华盛顿,日本的首都是东京,法国的首都是巴黎,澳大利亚的首都是堪培拉。",{"type":17,"tag":25,"props":107,"children":108},{},[109],{"type":23,"value":110},"亮点：",{"type":17,"tag":25,"props":112,"children":113},{},[114],{"type":23,"value":115},"l 业界首个2000亿参数中文自回归语言模型「盘古α」；",{"type":17,"tag":25,"props":117,"children":118},{},[119],{"type":23,"value":120},"l 代码、模型逐步全开源；",{"type":17,"tag":25,"props":122,"children":123},{},[124],{"type":23,"value":125},"l MindSpore超大规模自动并行技术；",{"type":17,"tag":25,"props":127,"children":128},{},[129],{"type":23,"value":130},"l 模型基于国产全栈式软硬件协同生态（MindSpore+CANN+昇腾910）;",{"type":17,"tag":25,"props":132,"children":133},{},[134],{"type":17,"tag":135,"props":136,"children":138},"img",{"alt":7,"src":137},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/f7f943b4e1bc4b80bcf2413cf30119db.jpg",[],{"type":17,"tag":25,"props":140,"children":141},{},[142],{"type":23,"value":143},"预训练模型如今已经成为深度学习研究中的一种主流范式，国内有学者认为「深度学习已经从『大炼模型』步入到『练大模型』的阶段」，通过设计先进的算法，整合尽可能多的数据，汇聚大量算力，集约化地训练大模型，供大量企业使用，这是必然趋势。",{"type":17,"tag":145,"props":146,"children":148},"h2",{"id":147},"数据集的准备",[149],{"type":23,"value":147},{"type":17,"tag":25,"props":151,"children":152},{},[153],{"type":23,"value":154},"海量语料是预训练模型研究的基础，联合团队从开源开放数据集、common crawl网页数据、电子书等收集了近80TB原始数据。",{"type":17,"tag":25,"props":156,"children":157},{},[158],{"type":17,"tag":135,"props":159,"children":161},{"alt":7,"src":160},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/b05f29e268e44fd0be09d6abcbf270e3.jpg",[],{"type":17,"tag":25,"props":163,"children":164},{},[165],{"type":23,"value":166},"搭建了面向大型语料库预处理的分布式集群，通过数据清洗过滤、去重、质量评估等处理流程，构建了一个约1.1TB的高质量中文语料数据集，经统计Token数量约为250B规模。通过对不同的开源数据集独立进行处理，完全清除了跟下游任务相关的标签信息，以保证源数据的无偏性。",{"type":17,"tag":145,"props":168,"children":170},{"id":169},"模型结构",[171],{"type":23,"value":169},{"type":17,"tag":25,"props":173,"children":174},{},[175],{"type":23,"value":176},"在transformer结构之上，增加query层。query层的基本结构与transformer层相似，通过引入了一个额外的Query layer，来预测生成下一个query Q的位置。",{"type":17,"tag":25,"props":178,"children":179},{},[180],{"type":17,"tag":135,"props":181,"children":183},{"alt":7,"src":182},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/51620cb6127e443aa80c1dc76a04041b.jpg",[],{"type":17,"tag":145,"props":185,"children":187},{"id":186},"mindspore超大规模自动并行",[188],{"type":23,"value":189},"MindSpore超大规模自动并行",{"type":17,"tag":25,"props":191,"children":192},{},[193],{"type":23,"value":194},"大集群下高效训练千亿至万亿参数模型，用户需要综合考虑参数量、计算量、计算类型、集群带宽拓扑和样本数量等才能设计出性能较优的并行切分策略，模型编码出来考虑算法以外，还需要编写大量并行切分和通信代码。",{"type":17,"tag":25,"props":196,"children":197},{},[198],{"type":23,"value":199},"MindSpore是业界首个支持全自动并行的框架，MindSpore多维度自动并行，通过数据并行、算子级模型并行、Pipeline模型并行、优化器模型并行、异构并行、重计算、高效内存复用，及拓扑感知调度，实现整体迭代时间最小（计算时间+通信时间）。编程接口高效易用，实现了算法逻辑和并行逻辑解耦，串行代码自动分布式并行。",{"type":17,"tag":25,"props":201,"children":202},{},[203],{"type":17,"tag":135,"props":204,"children":206},{"alt":7,"src":205},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/c28c16cfcd2947b89fbef3960b84a69e.jpg",[],{"type":17,"tag":145,"props":208,"children":210},{"id":209},"下游任务评估",[211],{"type":23,"value":209},{"type":17,"tag":25,"props":213,"children":214},{},[215],{"type":23,"value":216},"为了评估模型性能，团队收集了16个不同类型的中文下游任务，如下图所示：",{"type":17,"tag":25,"props":218,"children":219},{},[220],{"type":17,"tag":135,"props":221,"children":223},{"alt":7,"src":222},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/ee5cf90c3a764ccf9063f11324f6cfc8.jpg",[],{"type":17,"tag":25,"props":225,"children":226},{},[227],{"type":23,"value":228},"由于中文缺少在小样本学习领域的benchMark，研究对比了智源研究院联合发布的首个26亿参数的中文预训练语言模型「悟道·文源」CPM， 通过在1.1TB数据中策略抽样了100GB等量数据集训练了2.6B参数规模的「盘古α」模型，并在已收集的16个下游任务上进行了对比，结果如下表所示：",{"type":17,"tag":25,"props":230,"children":231},{},[232],{"type":17,"tag":135,"props":233,"children":235},{"alt":7,"src":234},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/c1792dfd26cb44478cbe3d879fabcdcb.jpg",[],{"type":17,"tag":25,"props":237,"children":238},{},[239],{"type":23,"value":240},"实验结果表明盘古α-2.6B比CPM-2.6B模型具有更强的语言学习能力，特别是在小样本学习和生成任务方面。在生成任务方面, 盘古α-2.6B比CPM-2.6B性能指标平均提升6.5个百分点。在PPL任务方面，盘古α-2.6B在OCNLI、TNEWS和 IFLYTEK任务上略弱于CPM。这一现象归因于模型使用了更大规模的词表，这使得模型在局部文本变化时对困惑度不敏感。",{"type":17,"tag":25,"props":242,"children":243},{},[244],{"type":17,"tag":135,"props":245,"children":247},{"alt":7,"src":246},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/04/27/7a957a33d7694d40b81795e246a03a01.jpg",[],{"type":17,"tag":25,"props":249,"children":250},{},[251],{"type":23,"value":252},"团队还对比了盘古α-13B和盘古α-2.6B模型在这些下游任务上的表现，在所有的生成任务和大部分的PPL任务上，13B的模型性能优于2.6B。在CMRC2018、DRCD和WebQA任务中，小样本学习比零样本学习指标高出10分以上，说明盘古α-13B模型具有较强的小样本学习能力。在NLI和文本分类任务上，盘古α-13B与盘古α-2.6B的性能相当，这些任务对于生成语言模型来说通常是困难的，而且模型改进的机会很大，这将是团队未来的工作。",{"type":17,"tag":25,"props":254,"children":255},{},[256],{"type":23,"value":257},"盘古α-200B的模型文件大小在TB级别，下游任务推理耗时耗力，还有更多优化加速的空间，团队正在共同努力完成推理和测评，尽快同步相关研究成果。",{"type":17,"tag":25,"props":259,"children":260},{},[261],{"type":23,"value":262},"部分生成举例：",{"type":17,"tag":264,"props":265,"children":267},"pre",{"code":266},"Input: 上联：欢天喜地度佳节下联：\nGenerate: 花好月圆庆团圆\n \nInput: 小红是小明的姐姐，小红的年纪比小明\nGenerate: 大\n \nInput: 四川的省会是哪里？\nGenerate: 成都,简称蓉,别称蓉城、锦城、天府之国,是四川省省会、副省级市、特大城市、中国西部地区重要的中心城市、国家重要的现代制造业基地、西南地区综合交通枢纽。\n \nInput：中国的四大发明有哪些？\nGenerate ：四大发明是：造纸术、活字印刷术、火药、指南针。\n\nInput：2+1=2，2+2=4，2+3=5，2+4=6，2+6=\nGenerate ：8,2+7=9,2+8=10\n \nInput：命题A是假命题，那么A的逆否命题是\nGenerate ：假命题。\n \nInput:阅读文章：玛丽亚·特蕾西亚广场（Maria-Theresien-Platz）是维也纳的一个大型广场，毗邻该市的环形中心大道内环路（Ringstraße），以及博物馆区（现代艺术博物馆，位于昔日皇家马厩）。\n......\n而历史较久的这两座博物馆更加令人愉快，更具文化内涵。艺术史博物馆拥有丰富的收藏，这里有北欧大师的著名作品，例如勃鲁盖尔的《巴别塔》，以及古代世界艺术丰富的收藏。在楼梯井的屋顶是奥地利著名艺术家古斯塔夫·克里姆特所作绝妙的壁画。自然史博物馆拥有丰富的蝴蝶和其他昆虫收藏，以及古代动物藏品 - 例如祁连山的马，爪哇犀牛和渡渡鸟残骸。这个博物馆的显微剧院很有名，显示微生物的幻灯片，它的两个蜘蛛蟹是日本天皇送给神圣罗马帝国皇帝弗朗茨·约瑟夫的礼物。在楼梯井可以看到描绘皇帝弗朗茨·约瑟夫、皇后玛丽亚·特蕾西亚和她的小型猎犬的绘画。\nQuestion：艺术史博物馆收藏勃鲁盖尔的哪部作品？\nGenerate：勃鲁盖尔的《巴别塔》，这是他的代表作。\n",[268],{"type":17,"tag":269,"props":270,"children":271},"code",{"__ignoreMap":7},[272],{"type":23,"value":266},{"type":17,"tag":145,"props":274,"children":276},{"id":275},"总结",[277],{"type":23,"value":275},{"type":17,"tag":25,"props":279,"children":280},{},[281],{"type":23,"value":282},"近年来，语言模型的规模越来越大，以大模型为基础探索通用智能的道路也远远没有到尽头，国内产业和学术界在对大模型的探索上也在加紧进行，超大规模模型对于AI学术研究已经产生了深刻影响，大规模的AI设备集群和通用性的软硬件生态协同越来越成为信息时代急需的基础设施，未来制约人工智能发展的不仅仅是对人才的竞争，大科学装置和对多场景应用的通用全栈式技术生态的不断发展进化，也越来越重要。",{"type":17,"tag":25,"props":284,"children":285},{},[286],{"type":23,"value":287},"借助了“鹏城云脑Ⅱ”E级智算平台，基于国产MindSpore框架和昇腾软硬件栈的2000亿参数盘古α模型是我们在超大规模AI模型上迈出的里程碑式的一步，团队第一次实现了基于华为软硬件全栈式的超大规模模型的分布式训练、在超大规模分布式训练技术，超大规模模型设计，超大规模模型优化，超大规模模型推理等方面积累了大量经验，为后续研究打下坚实基础，并指明了道路，相关成果也会逐步完全开源。团队欢迎相关领域的科研和工程人才加入，共同推进通用人工智能技术的发展和进步！ 转自 公众号：诺亚实验室",{"type":17,"tag":25,"props":289,"children":290},{},[291],{"type":17,"tag":47,"props":292,"children":293},{"style":49},[294],{"type":17,"tag":47,"props":295,"children":296},{"style":49},[297],{"type":23,"value":298}," @font-face{ font-family:\"Times New Roman\"; } @font-face{ font-family:\"宋体\"; } @font-face{ font-family:\"Calibri\"; } @font-face{ font-family:\" \"; } p.MsoNormal{ mso-style-name:正文; mso-style-parent:\"\"; margin:0pt; margin-bottom:.0001pt; mso-pagination:none; text-align:justify; text-justify:inter-ideograph; font-family:Calibri; mso-fareast-font-family:宋体; mso-bidi-font-family:'Times New Roman'; font-size:10.5000pt; mso-font-kerning:1.0000pt; } span.msoIns{ mso-style-type:export-only; mso-style-name:\"\"; text-decoration:underline; text-underline:single; color:blue; } span.msoDel{ mso-style-type:export-only; mso-style-name:\"\"; text-decoration:line-through; color:red; } @page{mso-page-border-surround-header:no; mso-page-border-surround-footer:no;}@page Section0{ } div.Section0{page:Section0;}\u003C/span",{"type":17,"tag":25,"props":300,"children":301},{},[302],{"type":17,"tag":47,"props":303,"children":304},{"style":49},[305],{"type":17,"tag":47,"props":306,"children":307},{"style":49},[308],{"type":23,"value":309}," @font-face{ font-family:\"Times New Roman\"; } @font-face{ font-family:\"宋体\"; } @font-face{ font-family:\"Calibri\"; } @font-face{ font-family:\" \"; } p.MsoNormal{ mso-style-name:正文; mso-style-parent:\"\"; margin:0pt; margin-bottom:.0001pt; mso-pagination:none; text-align:justify; text-justify:inter-ideograph; font-family:Calibri; mso-fareast-font-family:宋体; mso-bidi-font-family:'Times New Roman'; font-size:10.5000pt; mso-font-kerning:1.0000pt; } span.10{ font-family:Calibri; } span.15{ font-family:Calibri; color:rgb(0,0,255); text-decoration:underline; text-underline:single; } span.msoIns{ mso-style-type:export-only; mso-style-name:\"\"; text-decoration:underline; text-underline:single; color:blue; } span.msoDel{ mso-style-type:export-only; mso-style-name:\"\"; text-decoration:line-through; color:red; } @page{mso-page-border-surround-header:no; mso-page-border-surround-footer:no;}@page Section0{ } div.Section0{page:Section0;}",{"title":7,"searchDepth":311,"depth":311,"links":312},4,[313,315,316,317,318],{"id":147,"depth":314,"text":147},2,{"id":169,"depth":314,"text":169},{"id":186,"depth":314,"text":189},{"id":209,"depth":314,"text":209},{"id":275,"depth":314,"text":275},"markdown","content:news:zh:450.md","content","news/zh/450.md","news/zh/450","md",1776506092422]