[{"data":1,"prerenderedAt":235},["ShallowReactive",2],{"content-query-uM2prhLOjn":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":229,"_id":230,"_source":231,"_file":232,"_stem":233,"_extension":234},"/technology-blogs/zh/3076","zh",false,"","从ChatGPT到LWM再到AGI，我们处在一个黄金时代~（每月资讯更新系列）","单出一个系列文章给大家普及AI相关资讯~","2024-04-18","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/19/10c8e53e29c14047bbaaaa7d047960b5.png","technology-blogs","基础知识",{"type":15,"children":16,"toc":226},"root",[17,25,31,36,41,46,51,56,61,66,71,94,99,104,109,114,119,124,129,134,139,144,149,154,159,164,169,174,179,184,189,194,199,218],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"从chatgpt到lwm再到agi我们处在一个黄金时代每月资讯更新系列",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"A：Hello，小麦（小Mi），请播报本周AI类新闻",{"type":18,"tag":26,"props":32,"children":33},{},[34],{"type":24,"value":35},"B：1.即日起，ChatGPT3 .5版本使用无需用户注册；2.MindSpore NLP紧跟Suno的步伐推出AI生成器——MusicGen；3.国产大模型Kimi启动200w字无损上下文内测......",{"type":18,"tag":26,"props":37,"children":38},{},[39],{"type":24,"value":40},"上述这样的新闻每天都在世界的边边角角疯狂上演，与其被动在小破站的视频广告里接受这样的信息，MindSpore论坛小坛主决定主动出击，单出一个系列文章给大家普及AI相关资讯~",{"type":18,"tag":26,"props":42,"children":43},{},[44],{"type":24,"value":45},"那么首先带大家了解几个常见名词缩写：",{"type":18,"tag":26,"props":47,"children":48},{},[49],{"type":24,"value":50},"1.ChatGPT：Chat Generative Pre-trained Transformer，生成式的预训练聊天变形器，通俗来说，就是在提前训练模型的基础上并结合上下文语境生成对应的回答。该聊天产品由Open AI推出。",{"type":18,"tag":26,"props":52,"children":53},{},[54],{"type":24,"value":55},"2.LLM：Large Language Models，大语言模型。它们在海量的文本数据上进行训练，可以执行广泛的任务，比如文本总结、翻译、情感分析等等，从而更好地理解和生成人类语言。",{"type":18,"tag":26,"props":57,"children":58},{},[59],{"type":24,"value":60},"3.LWM: Large World Model，大世界模型。作为世界通用的大环境多模态自回归模型，通过在大型的多样化视频和图书数据集上的训练，实现了对语言、图像和视频的理解与生成能力。",{"type":18,"tag":26,"props":62,"children":63},{},[64],{"type":24,"value":65},"4.AGI:Artificial General Intelligence，人工通用智能。AGI可以理解为AI的2.0升级版本，能够完全模仿人类智能的行为执行复杂任务。",{"type":18,"tag":26,"props":67,"children":68},{},[69],{"type":24,"value":70},"由此，不难看出，我们真的处在一个黄金时代，多场景、多模态、多任务的世界通用模型感觉就在不远的将来，AGI再也不是梦想之谈。",{"type":18,"tag":26,"props":72,"children":73},{},[74,76,92],{"type":24,"value":75},"为了跟上这样的梦想步伐，昇思MindSpore将以",{"type":18,"tag":77,"props":78,"children":79},"strong",{},[80,82,90],{"type":24,"value":81},"每月更新",{"type":18,"tag":77,"props":83,"children":84},{},[85],{"type":18,"tag":77,"props":86,"children":87},{},[88],{"type":24,"value":89},"2",{"type":24,"value":91},"篇的频率",{"type":24,"value":93},"给大家普及多方面AI相关资讯（昇思MindSpore显然也包含在内咯~），帮助大家更好的了解这个世界！以下是本篇推出的主要资讯内容：",{"type":18,"tag":26,"props":95,"children":96},{},[97],{"type":24,"value":98},"1、革命新架构掀翻Transformer！无限上下文处理，2万亿token碾压Llama 2",{"type":18,"tag":26,"props":100,"children":101},{},[102],{"type":24,"value":103},"Transformer王座即将被取而代之！Meta、USC、CMU和UCSD联合提出了革命性新架构Megalodon，能够处理无限上下文，在2万亿token训练任务中，性能超越Llama2-7B实现了非凡的效率。这是专为有效处理「无限上下文」长度的LLM预训练，以及推理而设计的架构。",{"type":18,"tag":26,"props":105,"children":106},{},[107],{"type":24,"value":108},"2、全国高等学校计算机类课程能力提升高级研修班（2024年第三期 武汉站）",{"type":18,"tag":26,"props":110,"children":111},{},[112],{"type":24,"value":113},"全国高校计算机类课程能力提升高级研修班第三期“MindSpore大模型训推微调实践”课程，将于4月20日-4月21日在华为武汉研究所开课。本期课程内容由浅入深全面讲解昇思MindSpore自动并行理论与实践，详解ChatGLM与MoE大模型并通过实际场景运用进行展示。",{"type":18,"tag":26,"props":115,"children":116},{},[117],{"type":24,"value":118},"3、基于昇思MindSpore的大地电磁智能反演模型首开源，助力地球物理勘探加速智能化",{"type":18,"tag":26,"props":120,"children":121},{},[122],{"type":24,"value":123},"今日，华为AI4S Lab与清华大学李懋坤教授团队、华为先进计算与存储实验室合作，基于昇腾AI处理器与昇思MindSpore AI框架打造了大地电磁智能反演模型。该模型通过变分自编码器（VAE）灵活嵌入了多物理先验知识，达到了业界SOTA。该成果已被国际顶级勘探地球物理期刊《Geophysics》收录，相关代码已在昇思MindSpore Elec电磁仿真套件代码仓中开源，同时，该成果也在昇思人工智能框架峰会2024上发布亮相。",{"type":18,"tag":26,"props":125,"children":126},{},[127],{"type":24,"value":128},"4、比手动快13倍多，「机器人+AI」发现电池最佳电解质，加速材料研究",{"type":18,"tag":26,"props":130,"children":131},{},[132],{"type":24,"value":133},"美国西北太平洋国家实验室和阿贡国家实验室的研究团队，设计了一个高度自动化的工作流程，将高通量实验平台与最先进的主动学习算法相结合，可有效筛选对阳极电解质具有最佳溶解度的二元有机溶剂。除了设计用于开发高性能氧化还原液流电池的高效工作流程之外，该机器学习引导的高通量机器人平台为加速发现功能材料提供了一种强大而通用的方法。",{"type":18,"tag":26,"props":135,"children":136},{},[137],{"type":24,"value":138},"5、设计超高效疫苗，普林斯顿团队开发首个解码mRNA序列大模型",{"type":18,"tag":26,"props":140,"children":141},{},[142],{"type":24,"value":143},"普林斯顿王梦迪团队迎来了一项具有划时代意义的突破，该团队开发了世界首个解码mRNA非翻译区域序列的大模型，用于准确预测从mRNA到蛋白质的转录功能，及设计新序列用于mRNA疫苗。该研究论文目前已被《Nature Machine Intelligence》接收，意味着大语言模型可以用于预测和设计mRNA疫苗，其中新设计的序列经过实验证实远高于传统疫苗的转录效率。AI和语言模型正在颠覆生物学和制药研究中的传统方法。",{"type":18,"tag":26,"props":145,"children":146},{},[147],{"type":24,"value":148},"6、化学能力超GPT-4，首个化学领域百亿级大模型，思必驰、上交大、苏州实验室联合发布",{"type":18,"tag":26,"props":150,"children":151},{},[152],{"type":24,"value":153},"思必驰-上海交大智能人机交互联合实验室、苏州实验室共同发布了首个针对化学科学的百亿级专业化大模型ChemDFM。模型基于经典开源大模型LLaMa，引入了海量的化学基础与前沿知识，充分学习并掌握化学科学的专有语言与表达方式，最终以130亿的参数量在大多数化学相关的能力上超越了公认最强大的模型GPT-4。此外，在进一步的评测中ChemDFM显示出了其他类似模型几乎不具备的结合内部知识理解和分析陌生分子的能力，实现了在面对陌生分子或者陌生反应时,结合相关的化学知识进行推理与回答。",{"type":18,"tag":26,"props":155,"children":156},{},[157],{"type":24,"value":158},"7、论文解读：分割一切模型SAM泛化能力差？域适应策略给解决了",{"type":18,"tag":26,"props":160,"children":161},{},[162],{"type":24,"value":163},"Segment Anything Model（SAM）是最先进的图像分割基础模型，但其在多种下游任务中并非具有很强的鲁棒性与泛化性，例如在医学图像、伪装物体、添加干扰的自然图像等领域表现较差。为了应对该问题，提出了一种具有锚点正则化和低秩微调的弱监督自训练架构，以提高自适应的鲁棒性和计算效率。",{"type":18,"tag":26,"props":165,"children":166},{},[167],{"type":24,"value":168},"8、论文解读：联邦学习后门攻击的模型关键层",{"type":18,"tag":26,"props":170,"children":171},{},[172],{"type":24,"value":173},"联邦学习使多个参与方可以在数据隐私得到保护的情况下训练机器学习模型。但是由于服务器无法监控参与者在本地进行的训练过程，参与者可以篡改本地训练模型，从而对联邦学习的全局模型构成安全序隐患，如后门攻击。该论文提出通过攻击后门关键层绕过防御算法检测，从而可以控制少量的参与者进行高效的后门攻击。",{"type":18,"tag":26,"props":175,"children":176},{},[177],{"type":24,"value":178},"9、波士顿动力Atlas，再见！退役视频引数十万观众泪目，液压退出历史舞台",{"type":18,"tag":26,"props":180,"children":181},{},[182],{"type":24,"value":183},"波士顿动力的Atlas，正式宣布退役！在最后一段告别视频里，它依然在奔跑、跳跃、后空翻、甚至摔倒在地，从膝盖喷出了液压油，让屏幕前的几十万观众跟着喊疼。或许液压系统真的过时了，Atlas的退役，标志着一个时代的结束。",{"type":18,"tag":26,"props":185,"children":186},{},[187],{"type":24,"value":188},"10、只要会说话，不写代码也能开发！百度又搞了一个大动作",{"type":18,"tag":26,"props":190,"children":191},{},[192],{"type":24,"value":193},"文心大模型4.0，半年时间又提升了52.5%，在智能体、代码、多模型上也有了新进展！像人一样思考的智能体，达到了一定程度的白盒；智能代码助手Comate，让开发者们动动嘴就能开发应用了。百度认为，未来大型的AI原生应用基本都是MoE架构。通过大小模型的混用，而非单一模型去解决问题。因此，针对场景匹配，什么时候调用大模型，什么时候调用小模型，都需要技术考量。",{"type":18,"tag":26,"props":195,"children":196},{},[197],{"type":24,"value":198},"......",{"type":18,"tag":26,"props":200,"children":201},{},[202,204,209,211,216],{"type":24,"value":203},"好啦，本期资讯介绍就先到这儿啦！大模型层出不迭，精彩资讯源源不断，后续请多多关注本坛主发布的新闻动态，如有特别想关注的领域我们也可以做个",{"type":18,"tag":77,"props":205,"children":206},{},[207],{"type":24,"value":208},"问卷调查",{"type":24,"value":210},"或者",{"type":18,"tag":77,"props":212,"children":213},{},[214],{"type":24,"value":215},"投票链接",{"type":24,"value":217},"，当然给本坛主留言也行！我们月底再见~~",{"type":18,"tag":26,"props":219,"children":220},{},[221],{"type":18,"tag":222,"props":223,"children":225},"img",{"alt":7,"src":224},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/19/ae6ce08a4f2c41f1a379b4b351bdea3e.png",[],{"title":7,"searchDepth":227,"depth":227,"links":228},4,[],"markdown","content:technology-blogs:zh:3076.md","content","technology-blogs/zh/3076.md","technology-blogs/zh/3076","md",1776506125866]