[{"data":1,"prerenderedAt":454},["ShallowReactive",2],{"content-query-5HKTJZkFmg":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":448,"_id":449,"_source":450,"_file":451,"_stem":452,"_extension":453},"/technology-blogs/zh/3564","zh",false,"","AI数据框架大横评之并行处理","开始本文的内容前，先简单回顾一下本系列的前几篇文章：","2024-12-30","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/01/08/1fa3dcf943a94b37be187125e3cdaa92.png","technology-blogs","基础知识",{"type":15,"children":16,"toc":437},"root",[17,25,31,36,41,53,58,68,73,83,88,98,103,108,113,118,129,134,148,157,162,170,175,183,196,209,214,222,228,233,254,262,274,287,295,301,306,317,325,329,337,341,349,354,362,383,404,416,424],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"ai数据框架大横评之并行处理",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":29},"h2",{"id":28},"前言",[30],{"type":24,"value":28},{"type":18,"tag":32,"props":33,"children":34},"p",{},[35],{"type":24,"value":9},{"type":18,"tag":32,"props":37,"children":38},{},[39],{"type":24,"value":40},"第一篇，我们简要对比了当前主流AI数据框架的架构设计，从中可以略微看出各家框架的主要设计理念和应用场景。",{"type":18,"tag":32,"props":42,"children":43},{},[44],{"type":18,"tag":45,"props":46,"children":50},"a",{"href":47,"rel":48},"https://www.hiascend.com/developer/blog/details/0222150949518615069",[49],"nofollow",[51],{"type":24,"value":52},"AI数据框架大横评之架构设计",{"type":18,"tag":32,"props":54,"children":55},{},[56],{"type":24,"value":57},"第二篇，我们简要对比了当前主流AI数据框架的数据加载方式。",{"type":18,"tag":32,"props":59,"children":60},{},[61],{"type":18,"tag":45,"props":62,"children":65},{"href":63,"rel":64},"https://www.hiascend.com/developer/blog/details/0286156408632279318",[49],[66],{"type":24,"value":67},"AI数据框架大横评之数据加载",{"type":18,"tag":32,"props":69,"children":70},{},[71],{"type":24,"value":72},"第三篇，我们简要对比了当前主流AI数据框架的数据处理方式。",{"type":18,"tag":32,"props":74,"children":75},{},[76],{"type":18,"tag":45,"props":77,"children":80},{"href":78,"rel":79},"https://www.hiascend.com/developer/blog/details/0238166438804397008",[49],[81],{"type":24,"value":82},"AI数据框架大横评之数据处理",{"type":18,"tag":32,"props":84,"children":85},{},[86],{"type":24,"value":87},"第四篇，我们简要对比了当前主流AI数据框架的采样器。",{"type":18,"tag":32,"props":89,"children":90},{},[91],{"type":18,"tag":45,"props":92,"children":95},{"href":93,"rel":94},"https://www.hiascend.com/developer/blog/details/02111169783744872029",[49],[96],{"type":24,"value":97},"AI数据框架大横评之采样器",{"type":18,"tag":32,"props":99,"children":100},{},[101],{"type":24,"value":102},"建议大家先阅读以上几篇文章，再开始下面的阅读。",{"type":18,"tag":26,"props":104,"children":106},{"id":105},"并行处理",[107],{"type":24,"value":105},{"type":18,"tag":32,"props":109,"children":110},{},[111],{"type":24,"value":112},"随着AI芯片算力的不断提高，网络对数据预处理效率的要求也随之升高，如果数据生产的速度跟不上网络消耗的速率，数据处理将成为整个网络训练性能的瓶颈。",{"type":18,"tag":32,"props":114,"children":115},{},[116],{"type":24,"value":117},"为了充分利用Host资源，各家AI数据框架都提供了并行处理的功能，能够充分利用CPU多核多线程的能力，提高数据处理的整体速度。",{"type":18,"tag":119,"props":120,"children":122},"h3",{"id":121},"mindspore",[123],{"type":18,"tag":124,"props":125,"children":126},"strong",{},[127],{"type":24,"value":128},"MindSpore",{"type":18,"tag":32,"props":130,"children":131},{},[132],{"type":24,"value":133},"前文已经提到，MindSpore数据框架整体是一个多节点异步流水线的结构，这里的异步指的就是并发处理。MindSpore为数据加载、Map和Batch等比较耗时的操作提供了并发处理的能力。",{"type":18,"tag":32,"props":135,"children":136},{},[137,139,146],{"type":24,"value":138},"数据加载接口以 ",{"type":18,"tag":140,"props":141,"children":143},"code",{"className":142},[],[144],{"type":24,"value":145},"mindspore.dataset.GeneratorDataset",{"type":24,"value":147}," 为例：",{"type":18,"tag":149,"props":150,"children":152},"pre",{"code":151},"class mindspore.dataset.GeneratorDataset(source, column_names=None, column_types=None, schema=None, num_samples=None,\n                                         num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None,\n                                         shard_id=None, python_multiprocessing=True, max_rowsize=None)\n",[153],{"type":18,"tag":140,"props":154,"children":155},{"__ignoreMap":7},[156],{"type":24,"value":151},{"type":18,"tag":32,"props":158,"children":159},{},[160],{"type":24,"value":161},"Map接口：",{"type":18,"tag":149,"props":163,"children":165},{"code":164},"mindspore.dataset.Dataset.map(operations, input_columns=None, output_columns=None, column_order=None,\n                              num_parallel_workers=None, python_multiprocessing=False, **kwargs)\n",[166],{"type":18,"tag":140,"props":167,"children":168},{"__ignoreMap":7},[169],{"type":24,"value":164},{"type":18,"tag":32,"props":171,"children":172},{},[173],{"type":24,"value":174},"Batch接口：",{"type":18,"tag":149,"props":176,"children":178},{"code":177},"mindspore.dataset.Dataset.batch(batch_size, drop_remainder=False, num_parallel_workers=None, \n                                python_multiprocessing=False, **kwargs)\n",[179],{"type":18,"tag":140,"props":180,"children":181},{"__ignoreMap":7},[182],{"type":24,"value":177},{"type":18,"tag":32,"props":184,"children":185},{},[186,188,194],{"type":24,"value":187},"可通过以上接口的 ",{"type":18,"tag":140,"props":189,"children":191},{"className":190},[],[192],{"type":24,"value":193},"num_parallel_workers",{"type":24,"value":195}," 参数设置并行度，默认值为8。此时将会创建对应数量的线程，并发执行加载或处理逻辑，提升吞吐率。",{"type":18,"tag":32,"props":197,"children":198},{},[199,201,207],{"type":24,"value":200},"我们知道基于Cython的Python内部存在全局解释器锁（GIL），所以即使创建了多个线程，同一时刻也只会有一个线程能够持有GIL而真正运行。所以当使用上述接口执行Python自定义逻辑时，最好通过 ",{"type":18,"tag":140,"props":202,"children":204},{"className":203},[],[205],{"type":24,"value":206},"python_multiprocessing",{"type":24,"value":208}," 参数打开多进程的功能，此时将会创建进程而不是线程来并发执行加载或处理逻辑，从而规避GIL的影响。",{"type":18,"tag":32,"props":210,"children":211},{},[212],{"type":24,"value":213},"下述代码编写了自定义数据加载逻辑和处理逻辑，并启用了多进程并发处理：",{"type":18,"tag":149,"props":215,"children":217},{"code":216},"import mindspore.dataset as ds\n\ndataset = ds.GeneratorDataset(MyDataset(), column_names=[\"data\", \"lable\"],\n                              num_parallel_workers=8, python_multiprocessing=True)\ndataset = dataset.map(process_data, input_columns=[\"data\"], num_parallel_workers=8, python_multiprocessing=True)\n",[218],{"type":18,"tag":140,"props":219,"children":220},{"__ignoreMap":7},[221],{"type":24,"value":216},{"type":18,"tag":119,"props":223,"children":225},{"id":224},"pytorch",[226],{"type":24,"value":227},"PyTorch",{"type":18,"tag":32,"props":229,"children":230},{},[231],{"type":24,"value":232},"PyTorch也具有并发处理的能力，但与MindSpore不同，由于其不存在多节点流水线的结构，数据的加载和处理都在自定义类中完成，所以其并发的粒度不再是节点，而是整个处理过程。",{"type":18,"tag":32,"props":234,"children":235},{},[236,238,244,246,252],{"type":24,"value":237},"用户可以通过 ",{"type":18,"tag":140,"props":239,"children":241},{"className":240},[],[242],{"type":24,"value":243},"torch.utils.data.DataLoader",{"type":24,"value":245}," 接口的 ",{"type":18,"tag":140,"props":247,"children":249},{"className":248},[],[250],{"type":24,"value":251},"num_workers",{"type":24,"value":253}," 参数设置数据处理的并发度，默认值为0，表示不使用并发加载，数据处理全部由主进程执行。",{"type":18,"tag":149,"props":255,"children":257},{"code":256},"class torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=None, sampler=None, batch_sampler=None, num_workers=0,\n                                  collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None,\n                                  multiprocessing_context=None, generator=None, *, prefetch_factor=None,\n                                  persistent_workers=False, pin_memory_device='')\n",[258],{"type":18,"tag":140,"props":259,"children":260},{"__ignoreMap":7},[261],{"type":24,"value":256},{"type":18,"tag":32,"props":263,"children":264},{},[265,267,272],{"type":24,"value":266},"由于整个 ",{"type":18,"tag":140,"props":268,"children":270},{"className":269},[],[271],{"type":24,"value":243},{"type":24,"value":273}," 执行的都为Python逻辑，所以其并发全部基于多进程，不存在多线程处理。",{"type":18,"tag":32,"props":275,"children":276},{},[277,279,285],{"type":24,"value":278},"下述代码使用 ",{"type":18,"tag":140,"props":280,"children":282},{"className":281},[],[283],{"type":24,"value":284},"DataLoader",{"type":24,"value":286}," 并发加载数据集：",{"type":18,"tag":149,"props":288,"children":290},{"code":289},"from torch.utils.data import DataLoader\n\nataloader = DataLoader(MyDataset(), batch_size=32, num_workers=8)\n",[291],{"type":18,"tag":140,"props":292,"children":293},{"__ignoreMap":7},[294],{"type":24,"value":289},{"type":18,"tag":119,"props":296,"children":298},{"id":297},"tensorflow",[299],{"type":24,"value":300},"TensorFlow",{"type":18,"tag":32,"props":302,"children":303},{},[304],{"type":24,"value":305},"TensorFlow与MindSpore一样都为多节点异步流水线的结构，其对数据加载、Map、Batch和Interleave接口提供了并发处理的能力。",{"type":18,"tag":32,"props":307,"children":308},{},[309,310,316],{"type":24,"value":138},{"type":18,"tag":140,"props":311,"children":313},{"className":312},[],[314],{"type":24,"value":315},"tf.data.TFRecordDataset",{"type":24,"value":147},{"type":18,"tag":149,"props":318,"children":320},{"code":319},"class tf.data.TFRecordDataset(\n    filenames,\n    compression_type=None,\n    buffer_size=None,\n    num_parallel_reads=None,\n    name=None\n)\n",[321],{"type":18,"tag":140,"props":322,"children":323},{"__ignoreMap":7},[324],{"type":24,"value":319},{"type":18,"tag":32,"props":326,"children":327},{},[328],{"type":24,"value":161},{"type":18,"tag":149,"props":330,"children":332},{"code":331},"tf.data.Dataset.map(\n    map_func, num_parallel_calls=None, deterministic=None, name=None\n)\n",[333],{"type":18,"tag":140,"props":334,"children":335},{"__ignoreMap":7},[336],{"type":24,"value":331},{"type":18,"tag":32,"props":338,"children":339},{},[340],{"type":24,"value":174},{"type":18,"tag":149,"props":342,"children":344},{"code":343},"tf.data.Dataset.batch(\n    batch_size,\n    drop_remainder=False,\n    num_parallel_calls=None,\n    deterministic=None,\n    name=None\n)\n",[345],{"type":18,"tag":140,"props":346,"children":347},{"__ignoreMap":7},[348],{"type":24,"value":343},{"type":18,"tag":32,"props":350,"children":351},{},[352],{"type":24,"value":353},"Interleave接口：",{"type":18,"tag":149,"props":355,"children":357},{"code":356},"tf.data.Dataset.interleave(\n    map_func,\n    cycle_length=None,\n    block_length=None,\n    num_parallel_calls=None,\n    deterministic=None,\n    name=None\n)\n",[358],{"type":18,"tag":140,"props":359,"children":360},{"__ignoreMap":7},[361],{"type":24,"value":356},{"type":18,"tag":32,"props":363,"children":364},{},[365,367,373,375,381],{"type":24,"value":366},"可通过数据加载接口的 ",{"type":18,"tag":140,"props":368,"children":370},{"className":369},[],[371],{"type":24,"value":372},"num_parallel_reads",{"type":24,"value":374}," 参数以及Map、Batch、Interleave接口的 ",{"type":18,"tag":140,"props":376,"children":378},{"className":377},[],[379],{"type":24,"value":380},"num_parallel_calls",{"type":24,"value":382}," 参数设置并行度，默认值为None，表示不使能并发。此时将会创建对应数量的线程，并发执行加载或处理逻辑，提升吞吐率。",{"type":18,"tag":32,"props":384,"children":385},{},[386,388,394,396,402],{"type":24,"value":387},"可能由于TensorFlow中自定义的Python逻辑并不多，所以其未提供多进程处理的能力，如果用户通过 ",{"type":18,"tag":140,"props":389,"children":391},{"className":390},[],[392],{"type":24,"value":393},"tf.data.Dataset.from_generator",{"type":24,"value":395}," 或 ",{"type":18,"tag":140,"props":397,"children":399},{"className":398},[],[400],{"type":24,"value":401},"tf.data.Dataset.map",{"type":24,"value":403}," 执行Python函数，其效果可能会受GIL的影响。",{"type":18,"tag":32,"props":405,"children":406},{},[407,408,414],{"type":24,"value":278},{"type":18,"tag":140,"props":409,"children":411},{"className":410},[],[412],{"type":24,"value":413},"TFRecordDataset",{"type":24,"value":415}," 并发加载数据集并处理：",{"type":18,"tag":149,"props":417,"children":419},{"code":418},"import tensorflow as tf\n\ndataset = tf.data.TFRecordDataset([example_path], num_parallel_reads=8)\ndataset = dataset.map(decode_fn, num_parallel_reads=8)\n",[420],{"type":18,"tag":140,"props":421,"children":422},{"__ignoreMap":7},[423],{"type":24,"value":418},{"type":18,"tag":32,"props":425,"children":426},{},[427,429,435],{"type":24,"value":428},"值得一提的是，TensorFlow还提供了自动参数调优的功能，用户可以直接将并发度设置为 ",{"type":18,"tag":140,"props":430,"children":432},{"className":431},[],[433],{"type":24,"value":434},"tf.data.AUTOTUNE",{"type":24,"value":436}," ，此时并发度将在运行中动态调整为最佳值，免除了用户调优的麻烦。",{"title":7,"searchDepth":438,"depth":438,"links":439},4,[440,442],{"id":28,"depth":441,"text":28},2,{"id":105,"depth":441,"text":105,"children":443},[444,446,447],{"id":121,"depth":445,"text":128},3,{"id":224,"depth":445,"text":227},{"id":297,"depth":445,"text":300},"markdown","content:technology-blogs:zh:3564.md","content","technology-blogs/zh/3564.md","technology-blogs/zh/3564","md",1776506131119]