[{"data":1,"prerenderedAt":242},["ShallowReactive",2],{"content-query-LgqddhMtZv":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":236,"_id":237,"_source":238,"_file":239,"_stem":240,"_extension":241},"/technology-blogs/zh/2956","zh",false,"","项目分享 | 基于MindSpore Serving实现大模型推理部署","大模型时代，作为一个开发人员更多的是关注一个大模型如何训练好、如何调整模型参数、如何才能得到一个更高的模型精度。","2024-01-17","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/19/1b2655525ce749ccb89f1a548019e2db.png","technology-blogs",{"type":14,"children":15,"toc":233},"root",[16,24,34,42,47,52,57,62,67,72,77,82,87,95,103,111,116,121,126,131,138,146,154,159,164,169,174,179,184,189,194,199,207,215,223],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"项目分享-基于mindspore-serving实现大模型推理部署",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":17,"tag":29,"props":30,"children":31},"strong",{},[32],{"type":23,"value":33},"01",{"type":17,"tag":25,"props":35,"children":36},{},[37],{"type":17,"tag":29,"props":38,"children":39},{},[40],{"type":23,"value":41},"Why MindSpore Serving",{"type":17,"tag":25,"props":43,"children":44},{},[45],{"type":23,"value":46},"大模型时代，作为一个开发人员更多的是关注一个大模型如何训练好、如何调整模型参数、如何才能得到一个更高的模型精度。而作为一个整体项目，只有项目落地才能有其真正的价值。那么如何才能够使得大模型实现落地？如何才能使大模型项目中的文件以app的形式呈现给用户？",{"type":17,"tag":25,"props":48,"children":49},{},[50],{"type":23,"value":51},"解决这个问题的一个组件就是Serving（服务），它主要解决的问题有：",{"type":17,"tag":25,"props":53,"children":54},{},[55],{"type":23,"value":56},"模型如何提交给服务；",{"type":17,"tag":25,"props":58,"children":59},{},[60],{"type":23,"value":61},"服务如何部署；",{"type":17,"tag":25,"props":63,"children":64},{},[65],{"type":23,"value":66},"服务如何呈现给用户；",{"type":17,"tag":25,"props":68,"children":69},{},[70],{"type":23,"value":71},"如何应用各种复杂场景等待",{"type":17,"tag":25,"props":73,"children":74},{},[75],{"type":23,"value":76},"昇思MindSpore Serving就是为了实现将大模型部署到生产环境而产生的。",{"type":17,"tag":25,"props":78,"children":79},{},[80],{"type":23,"value":81},"昇思MindSpore Serving是一个轻量级、高性能的服务模块，旨在帮助昇思MindSpore开发者在生产环境中高效部署在线推理服务。当用户使用昇思MindSpore完成模型训练后，导出MindIR，即可使用昇思MindSpore Serving创建该大模型的推理服务。",{"type":17,"tag":25,"props":83,"children":84},{},[85],{"type":23,"value":86},"昇思MindSpore Serving实现的是一个模型服务化的部署，也就是说模型以线上的形式部署在服务器和云上，客户通过浏览器或者客户端去访问这个服务，将需要进行推理的输入内容发送给服务器，然后服务器将推理的结果返回给用户。",{"type":17,"tag":25,"props":88,"children":89},{},[90],{"type":17,"tag":91,"props":92,"children":94},"img",{"alt":7,"src":93},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/19/c39ffe6261f14425a3322079ecaec7bd.png",[],{"type":17,"tag":25,"props":96,"children":97},{},[98],{"type":17,"tag":29,"props":99,"children":100},{},[101],{"type":23,"value":102},"02",{"type":17,"tag":25,"props":104,"children":105},{},[106],{"type":17,"tag":29,"props":107,"children":108},{},[109],{"type":23,"value":110},"Component",{"type":17,"tag":25,"props":112,"children":113},{},[114],{"type":23,"value":115},"昇思MindSpore Serving由三部分组成，分别是客户端（Client）、Master和Worker。",{"type":17,"tag":25,"props":117,"children":118},{},[119],{"type":23,"value":120},"客户端是用户节点，提供了gRPC和RESTful的访问。",{"type":17,"tag":25,"props":122,"children":123},{},[124],{"type":23,"value":125},"Master是一个管理节点，管理所有Worker的信息，包括Worker有哪些模型的信息；Master也是一个分化节点，接收到了客户端的请求之后，会根据请求的内容，结合当前管理的Worker节点的信息进行分发，将请求分发给不同的Worker执行。",{"type":17,"tag":25,"props":127,"children":128},{},[129],{"type":23,"value":130},"Worker是一个执行节点，会执行加载、模型的更新，在接收到Master转发的请求之后，会将请求进行组装和拆分，然后做前处理、推理和后处理，执行完之后将结果返回给Master，Master再将结果返回给客户端。",{"type":17,"tag":25,"props":132,"children":133},{},[134],{"type":17,"tag":91,"props":135,"children":137},{"alt":7,"src":136},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/19/6758b192c0034ef7876dcd09d361d9a3.png",[],{"type":17,"tag":25,"props":139,"children":140},{},[141],{"type":17,"tag":29,"props":142,"children":143},{},[144],{"type":23,"value":145},"03",{"type":17,"tag":25,"props":147,"children":148},{},[149],{"type":17,"tag":29,"props":150,"children":151},{},[152],{"type":23,"value":153},"Features",{"type":17,"tag":25,"props":155,"children":156},{},[157],{"type":23,"value":158},"1．简单易用：",{"type":17,"tag":25,"props":160,"children":161},{},[162],{"type":23,"value":163},"对客户端提供了gRPC和RESTful的服务，同时又提供了服务的拉起、服务的部署和客户端的访问，提供了简单的python接口，通过python接口，用户可以很方便的定制和访问部署服务，只需要一行命令就能够完成一件事。",{"type":17,"tag":25,"props":165,"children":166},{},[167],{"type":23,"value":168},"2．提供定制化的服务：",{"type":17,"tag":25,"props":170,"children":171},{},[172],{"type":23,"value":173},"对于模型来说输入和输出一般是固定的，而对于用户来说输入和输出可能是多变的，这就需要一个预处理模块，将模型的输入转为一个模型可以识别的输入。同时还需要一个后处理模块，给用户提供定制化的服务，针对模型可以定制方法classifly_top，用户根据需要去写前处理和后处理的操作。对于客户端来说只要指定模型名和方法名就能实现推理的结果。",{"type":17,"tag":25,"props":175,"children":176},{},[177],{"type":23,"value":178},"3．支持批处理：",{"type":17,"tag":25,"props":180,"children":181},{},[182],{"type":23,"value":183},"主要是针对具有batchsize维度的文本来说。batchsize实现了文本的并行，在硬件资源足够的情况下，batchsize可以很大地提高性能。对于MindSpore Serving来说，用户一次性发送的请求是不确定的，因此Serving分割和组合一个或者多个请求以匹配用户模型的batchsize。例如batchsize=2，但是有三个请求发过来，这时候就会将两个请求合并处理，到后面再拆分，这样就实现了三个请求的并行，提高了效率。",{"type":17,"tag":25,"props":185,"children":186},{},[187],{"type":23,"value":188},"4．高性能扩展：",{"type":17,"tag":25,"props":190,"children":191},{},[192],{"type":23,"value":193},"昇思MindSpore Serving所使用的算子引擎框架是昇思MindSpore框架，具有自动融合和自动并行的高性能，再加上昇思MindSpore Serving本身具有一个高性能的底层通信能力，客户端可以进行多实例组装，模型支持批处理，多模型之间支持并发，预处理和后处理支持多线程的处理。客户端和Worker可以实现扩展的，因此它也实现了一个高扩展性。",{"type":17,"tag":25,"props":195,"children":196},{},[197],{"type":23,"value":198},"04",{"type":17,"tag":25,"props":200,"children":201},{},[202],{"type":17,"tag":29,"props":203,"children":204},{},[205],{"type":23,"value":206},"Demo",{"type":17,"tag":25,"props":208,"children":209},{},[210],{"type":17,"tag":29,"props":211,"children":212},{},[213],{"type":23,"value":214},"基于昇腾训练处理器",{"type":17,"tag":25,"props":216,"children":217},{},[218],{"type":17,"tag":29,"props":219,"children":220},{},[221],{"type":23,"value":222},"start_agent.py",{"type":17,"tag":224,"props":225,"children":227},"pre",{"code":226},"from agent.agent_multi_post_method import\nfrom multiprocessing import Queue\n\nfrom config.serving_config import AgentConfig, ModelName\n\nif —name— == \"—main—\":\n    startup_queue = Queue(1024)\n    startup_agents(AgentConfig.ctx_setting,\n                   AgentConfig.inc_setting,\n                   AgentConfig.post_model_setting,\n                   len(AgentConfig.AgentPorts),\n                   AgentConfig.prefill_model,\n                   AgentConfig.decode_model,\n                   AgentConfig.argmax_model,\n                   AgentConfig.topk_model,\n                   startup_queue)\n\n    started_agents = 0\n    while True:\n        value = startup_queue.get()\n        print(\"agent : %f started\" % value)\n        started_agents = started_agents + 1\n        if started_agents >= len(AgentConfig.AgentPorts):\n        print(\"all agents started\")\n        break\n\n    #     server_app_post.init_server_app()\n    #     server_app_post.warmup_model(ModelName)\n    #     server_app_post.run_server__app()\n",[228],{"type":17,"tag":229,"props":230,"children":231},"code",{"__ignoreMap":7},[232],{"type":23,"value":226},{"title":7,"searchDepth":234,"depth":234,"links":235},4,[],"markdown","content:technology-blogs:zh:2956.md","content","technology-blogs/zh/2956.md","technology-blogs/zh/2956","md",1776506124544]