[{"data":1,"prerenderedAt":587},["ShallowReactive",2],{"content-query-DD2meVHdED":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":581,"_id":582,"_source":583,"_file":584,"_stem":585,"_extension":586},"/technology-blogs/zh/1052","zh",false,"","机器内存不够怎么办？简易好用的梯度累积算法快速帮你解决！","在GPU内存不变的情况下，模型越来越大，数据的batch size智能缩小，梯度累积作为一种简单的解决方案来解决这个问题。","2022-02-25","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/03/02/382554c2d7784aef85e1521d0e0859ab.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":569},"root",[17,25,40,49,54,59,66,73,78,87,92,103,113,118,125,132,137,142,185,190,196,201,213,225,236,241,246,251,256,261,266,274,279,286,291,296,303,308,313,320,325,332,337,357,362,367,377,435,440,445,453,520,525,530],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"机器内存不够怎么办简易好用的梯度累积算法快速帮你解决",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"ul",{},[29,35],{"type":18,"tag":30,"props":31,"children":32},"li",{},[33],{"type":24,"value":34},"明天博士论文要答辩了，只有一张12G二手卡，今晚通宵要搞定10个模型实验",{"type":18,"tag":30,"props":36,"children":37},{},[38],{"type":24,"value":39},"挖槽，突然想出一个T9开天霹雳模型，加载不进去我那张12G的二手卡，感觉要错过今年上台Best Paper领奖",{"type":18,"tag":41,"props":42,"children":43},"p",{},[44],{"type":18,"tag":45,"props":46,"children":48},"img",{"alt":7,"src":47},"https://pic3.zhimg.com/80/v2-79066085607007b0caad3f4ceb6bd08e_720w.jpg",[],{"type":18,"tag":41,"props":50,"children":51},{},[52],{"type":24,"value":53},"上面出现的问题主要是机器不够、内存不够用。在深度学习训练的时候，数据的batch size大小受到GPU内存限制，batch size大小会影响模型最终的准确性和训练过程的性能。在GPU内存不变的情况下，模型越来越大，那么这就意味着数据的batch size智能缩小，这个时候，梯度累积（Gradient Accumulation）可以作为一种简单的解决方案来解决这个问题。",{"type":18,"tag":41,"props":55,"children":56},{},[57],{"type":24,"value":58},"下面这个图中橙色部分HERE就是梯度累积算法在AI系统中的大致位置，一般在AI框架/AI系统的表达层，跟算法结合比较紧密。",{"type":18,"tag":41,"props":60,"children":61},{},[62],{"type":18,"tag":45,"props":63,"children":65},{"alt":7,"src":64},"https://pic4.zhimg.com/80/v2-5d4cc97eeb8d536758c272febce834eb_720w.jpg",[],{"type":18,"tag":67,"props":68,"children":70},"h2",{"id":69},"batch-size的作用",[71],{"type":24,"value":72},"Batch size的作用",{"type":18,"tag":41,"props":74,"children":75},{},[76],{"type":24,"value":77},"训练数据的Batch size大小对训练过程的收敛性，以及训练模型的最终准确性具有关键影响。通常，每个神经网络和数据集的Batch size大小都有一个最佳值或值范围。",{"type":18,"tag":79,"props":80,"children":81},"blockquote",{},[82],{"type":18,"tag":41,"props":83,"children":84},{},[85],{"type":24,"value":86},"不同的神经网络和不同的数据集可能有不同的最佳Batch size大小。",{"type":18,"tag":41,"props":88,"children":89},{},[90],{"type":24,"value":91},"选择Batch size的时候主要考虑两个问题：",{"type":18,"tag":41,"props":93,"children":94},{},[95,101],{"type":18,"tag":96,"props":97,"children":98},"strong",{},[99],{"type":24,"value":100},"泛化性",{"type":24,"value":102},"：大的Batch size可能陷入局部最小值。陷入局部最小值则意味着神经网络将在训练集之外的样本上表现得很好，这个过程称为泛化。因此，泛化性一般表示过度拟合。",{"type":18,"tag":41,"props":104,"children":105},{},[106,111],{"type":18,"tag":96,"props":107,"children":108},{},[109],{"type":24,"value":110},"收敛速度",{"type":24,"value":112},"：小的Batch size可能导致算法学习收敛速度慢。网络模型在每个Batch的更新将会确定下一次Batch的更新起点。每次Batch都会训练数据集中，随机抽取训练样本，因此所得到的梯度是基于部分数据噪声的估计。在单次Batch中使用的样本越少，梯度估计准确度越低。换句话说，较小的Batch size可能会使学习过程波动性更大，从本质上延长算法收敛所需要的时间。",{"type":18,"tag":41,"props":114,"children":115},{},[116],{"type":24,"value":117},"考虑到上面两个主要的问题，所以在训练之前需要选择一个合适的Batch size。",{"type":18,"tag":41,"props":119,"children":120},{},[121],{"type":18,"tag":45,"props":122,"children":124},{"alt":7,"src":123},"https://pic1.zhimg.com/80/v2-00c97420a0e02d327787b30ede90150c_720w.jpg",[],{"type":18,"tag":126,"props":127,"children":129},"h3",{"id":128},"batch-size对内存的影响",[130],{"type":24,"value":131},"Batch size对内存的影响",{"type":18,"tag":41,"props":133,"children":134},{},[135],{"type":24,"value":136},"虽然传统计算机在CPU上面可以访问大量RAM，还可以利用SSD进行二级缓存或者虚拟缓存机制。但是如GPU等AI加速芯片上的内存要少得多。这个时候训练数据Batch size的大小对GPU的内存有很大影响。",{"type":18,"tag":41,"props":138,"children":139},{},[140],{"type":24,"value":141},"为了进一步理解这一点，让我们首先检查训练时候AI芯片内存中内存的内容：",{"type":18,"tag":26,"props":143,"children":144},{},[145,155,165,175],{"type":18,"tag":30,"props":146,"children":147},{},[148,153],{"type":18,"tag":96,"props":149,"children":150},{},[151],{"type":24,"value":152},"模型参数",{"type":24,"value":154},"：网络模型需要用到的权重参数和偏差。",{"type":18,"tag":30,"props":156,"children":157},{},[158,163],{"type":18,"tag":96,"props":159,"children":160},{},[161],{"type":24,"value":162},"优化器变量",{"type":24,"value":164},"：优化器算法需要的变量，例如动量momentum。",{"type":18,"tag":30,"props":166,"children":167},{},[168,173],{"type":18,"tag":96,"props":169,"children":170},{},[171],{"type":24,"value":172},"中间计算变量",{"type":24,"value":174},"：网络模型计算产生的中间值，这些值临时存储在AI加速芯片的内存中，例如，每层激活的输出。",{"type":18,"tag":30,"props":176,"children":177},{},[178,183],{"type":18,"tag":96,"props":179,"children":180},{},[181],{"type":24,"value":182},"工作区Workspace",{"type":24,"value":184},"：AI加速芯片的内核实现是需要用到的局部变量，其产生的临时内存，例如算子D=A+B/C中B/C计算时产生的局部变量。",{"type":18,"tag":41,"props":186,"children":187},{},[188],{"type":24,"value":189},"因此，Batch size越大，意味着神经网络训练的时候所需要的样本就越多，导致需要存储在AI芯片内存变量激增。在许多情况下，没有足够的AI加速芯片内存，Batch size设置得太大，就会出现OOM报错（Out Off Memor）。",{"type":18,"tag":126,"props":191,"children":193},{"id":192},"使用大batch-size的方法",[194],{"type":24,"value":195},"使用大Batch size的方法",{"type":18,"tag":41,"props":197,"children":198},{},[199],{"type":24,"value":200},"解决AI加速芯片内存限制，并运行大Batch size的一种方法是将数据Sample的Batch拆分为更小的Batch，叫做Mini-Batch。这些小Mini-Batch可以独立运行，并且在网络模型训练的时候，对梯度进行平均或者求和。主要实现有两种方式。",{"type":18,"tag":41,"props":202,"children":203},{},[204,206,211],{"type":24,"value":205},"1）",{"type":18,"tag":96,"props":207,"children":208},{},[209],{"type":24,"value":210},"数据并行",{"type":24,"value":212},"：使用多个AI加速芯片并行训练所有Mini-Batch，每份数据都在单个AI加速芯片上。累积所有Mini-Batch的梯度，结果用于在每个Epoch结束时求和更新网络参数。",{"type":18,"tag":41,"props":214,"children":215},{},[216,218,223],{"type":24,"value":217},"2）",{"type":18,"tag":96,"props":219,"children":220},{},[221],{"type":24,"value":222},"梯度累积",{"type":24,"value":224},"：按顺序执行Mini-Batch，同时对梯度进行累积，累积的结果在最后一个Mini-Batch计算后求平均更新模型变量。",{"type":18,"tag":41,"props":226,"children":227},{},[228,230,234],{"type":24,"value":229},"虽然两种技术都挺像的，解决的问题都是内存无法执行更大的Batch size，但梯度累积可以使用单个AI加速芯片就可以完成啦，而数据并行则需要多块AI加速芯片，所以手头上只有一台12G二手卡的同学们赶紧把",{"type":18,"tag":96,"props":231,"children":232},{},[233],{"type":24,"value":222},{"type":24,"value":235},"用起来。",{"type":18,"tag":67,"props":237,"children":239},{"id":238},"梯度累积原理",[240],{"type":24,"value":238},{"type":18,"tag":41,"props":242,"children":243},{},[244],{"type":24,"value":245},"梯度累积是一种训练神经网络的数据Sample样本按Batch拆分为几个小Batch的方式，然后按顺序计算。",{"type":18,"tag":41,"props":247,"children":248},{},[249],{"type":24,"value":250},"在进一步讨论梯度累积之前，我们来看看神经网络的计算过程。",{"type":18,"tag":41,"props":252,"children":253},{},[254],{"type":24,"value":255},"深度学习模型由许多相互连接的神经网络单元所组成，在所有神经网络层中，样本数据会不断向前传播。在通过所有层后，网络模型会输出样本的预测值，通过损失函数然后计算每个样本的损失值（误差）。神经网络通过反向传播，去计算损失值相对于模型参数的梯度。最后这些梯度信息用于对网络模型中的参数进行更新。",{"type":18,"tag":41,"props":257,"children":258},{},[259],{"type":24,"value":260},"优化器用于对网络模型模型权重参数更新的数学公式。以一个简单随机梯度下降(SGD)算法为例。",{"type":18,"tag":41,"props":262,"children":263},{},[264],{"type":24,"value":265},"假设Loss Function函数公式为：",{"type":18,"tag":41,"props":267,"children":268},{},[269],{"type":18,"tag":45,"props":270,"children":273},{"alt":271,"src":272},"[公式]","https://www.zhihu.com/equation?tex=Loss%28%5Ctheta%29%3D%5Cfrac%7B1%7D%7B2%7D%5Cleft%28h%28x%5E%7Bk%7D%29-y%5E%7Bk%7D%5Cright%29%5E%7B2%7D",[],{"type":18,"tag":41,"props":275,"children":276},{},[277],{"type":24,"value":278},"在构建模型时，优化器用于计算最小化损失的算法。这里SGD算法利用Loss函数来更新权重参数公式为：",{"type":18,"tag":41,"props":280,"children":281},{},[282],{"type":18,"tag":45,"props":283,"children":285},{"alt":271,"src":284},"https://www.zhihu.com/equation?tex=%5Ctheta%7Bi%7D%3D%5Ctheta_%7Bi-1%7D-lr+%2A+grad_%7Bi%7D",[],{"type":18,"tag":41,"props":287,"children":288},{},[289],{"type":24,"value":290},"其中theta是网络模型中的可训练参数（权重或偏差），lr是学习率，grad是相对于网络模型参数的损失。",{"type":18,"tag":41,"props":292,"children":293},{},[294],{"type":24,"value":295},"梯度累积则是只计算神经网络模型，但是并不及时更新网络模型的参数，同时在计算的时候累积计算时候得到的梯度信息，最后统一使用累积的梯度来对参数进行更新。",{"type":18,"tag":41,"props":297,"children":298},{},[299],{"type":18,"tag":45,"props":300,"children":302},{"alt":271,"src":301},"https://www.zhihu.com/equation?tex=accumulated%3D%5Csum_%7Bi%3D0%7D%5E%7BN%7D+grad_%7Bi%7D",[],{"type":18,"tag":41,"props":304,"children":305},{},[306],{"type":24,"value":307},"在不更新模型变量的时候，实际上是把原来的数据Batch分成几个小的Mini-Batch，每个step中使用的样本实际上是更小的数据集。",{"type":18,"tag":41,"props":309,"children":310},{},[311],{"type":24,"value":312},"在N个step内不更新变量，使所有Mini-Batch使用相同的模型变量来计算梯度，以确保计算出来得到相同的梯度和权重信息，算法上等价于使用原来没有切分的Batch size大小一样。即：",{"type":18,"tag":41,"props":314,"children":315},{},[316],{"type":18,"tag":45,"props":317,"children":319},{"alt":271,"src":318},"https://www.zhihu.com/equation?tex=%5Ctheta%7Bi%7D%3D%5Ctheta_%7Bi-1%7D-lr+%2A+%5Csum_%7Bi%3D0%7D%5E%7BN%7D+grad_%7Bi%7D",[],{"type":18,"tag":41,"props":321,"children":322},{},[323],{"type":24,"value":324},"最终在上面步骤中累积梯度会产生与使用全局Batch size大小相同的梯度总和。",{"type":18,"tag":41,"props":326,"children":327},{},[328],{"type":18,"tag":45,"props":329,"children":331},{"alt":7,"src":330},"https://pic1.zhimg.com/80/v2-16ad4a0fe34791c10c0f9d5bee8bc86c_720w.jpg",[],{"type":18,"tag":41,"props":333,"children":334},{},[335],{"type":24,"value":336},"当然在实际工程当中，关于调参和算法上有两点需要注意的：",{"type":18,"tag":79,"props":338,"children":339},{},[340],{"type":18,"tag":41,"props":341,"children":342},{},[343,348,350,355],{"type":18,"tag":96,"props":344,"children":345},{},[346],{"type":24,"value":347},"学习率 learning rate",{"type":24,"value":349},"：一定条件下，Batch size越大训练效果越好，梯度累积则模拟了batch size增大的效果，如果accumulation steps为4，则Batch size增大了4倍，根据ZOMI的经验，使用梯度累积的时候需要把学习率适当放大。 ",{"type":18,"tag":96,"props":351,"children":352},{},[353],{"type":24,"value":354},"归一化 Batch Norm",{"type":24,"value":356},"：accumulation steps为4时进行Batch size模拟放大效果，和真实Batch size相比，数据的分布其实并不完全相同，4倍Batch size的BN计算出来的均值和方差与实际数据均值和方差不太相同，因此有些实现中会使用Group Norm来代替Batch Norm。",{"type":18,"tag":67,"props":358,"children":360},{"id":359},"梯度累积实现",[361],{"type":24,"value":359},{"type":18,"tag":41,"props":363,"children":364},{},[365],{"type":24,"value":366},"正常训练一个batch的伪代码：",{"type":18,"tag":368,"props":369,"children":371},"pre",{"code":370},"for i, (images, labels) in enumerate(train_data):\n    # 1. forwared 前向计算\n    outputs = model(images)\n    loss = criterion(outputs, labels)\n\n    # 2. backward 反向传播计算梯度\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n",[372],{"type":18,"tag":373,"props":374,"children":375},"code",{"__ignoreMap":7},[376],{"type":24,"value":370},{"type":18,"tag":26,"props":378,"children":379},{},[380,391,402,413,424],{"type":18,"tag":30,"props":381,"children":382},{},[383,389],{"type":18,"tag":373,"props":384,"children":386},{"className":385},[],[387],{"type":24,"value":388},"model(images)",{"type":24,"value":390}," 输入图像和标签,前向计算。",{"type":18,"tag":30,"props":392,"children":393},{},[394,400],{"type":18,"tag":373,"props":395,"children":397},{"className":396},[],[398],{"type":24,"value":399},"criterion(outputs, labels)",{"type":24,"value":401}," 通过前向计算得到预测值，计算损失函数。",{"type":18,"tag":30,"props":403,"children":404},{},[405,411],{"type":18,"tag":373,"props":406,"children":408},{"className":407},[],[409],{"type":24,"value":410},"ptimizer.zero_grad()",{"type":24,"value":412}," 清空历史的梯度信息。",{"type":18,"tag":30,"props":414,"children":415},{},[416,422],{"type":18,"tag":373,"props":417,"children":419},{"className":418},[],[420],{"type":24,"value":421},"loss.backward()",{"type":24,"value":423}," 进行反向传播，计算当前batch的梯度。",{"type":18,"tag":30,"props":425,"children":426},{},[427,433],{"type":18,"tag":373,"props":428,"children":430},{"className":429},[],[431],{"type":24,"value":432},"optimizer.step()",{"type":24,"value":434}," 根据反向传播得到的梯度，更新网络参数。",{"type":18,"tag":41,"props":436,"children":437},{},[438],{"type":24,"value":439},"即在网络中输入一个batch的数据，就计算一次梯度，更新一次网络。",{"type":18,"tag":41,"props":441,"children":442},{},[443],{"type":24,"value":444},"使用梯度累加后：",{"type":18,"tag":368,"props":446,"children":448},{"code":447},"# 梯度累加参数\naccumulation_steps = 4\n\n\nfor i, (images, labels) in enumerate(train_data):\n    # 1. forwared 前向计算\n    outputs = model(imgaes)\n    loss = criterion(outputs, labels)\n\n    # 2.1 loss regularization loss正则化\n    loss += loss / accumulation_steps\n\n    # 2.2 backward propagation 反向传播计算梯度\n    loss.backward()\n\n    # 3. update parameters of net\n    if ((i+1) % accumulation)==0:\n        # optimizer the net\n        optimizer.step()\n        optimizer.zero_grad() # reset grdient\n",[449],{"type":18,"tag":373,"props":450,"children":451},{"__ignoreMap":7},[452],{"type":24,"value":447},{"type":18,"tag":26,"props":454,"children":455},{},[456,465,474,485,494,499,509],{"type":18,"tag":30,"props":457,"children":458},{},[459,464],{"type":18,"tag":373,"props":460,"children":462},{"className":461},[],[463],{"type":24,"value":388},{"type":24,"value":390},{"type":18,"tag":30,"props":466,"children":467},{},[468,473],{"type":18,"tag":373,"props":469,"children":471},{"className":470},[],[472],{"type":24,"value":399},{"type":24,"value":401},{"type":18,"tag":30,"props":475,"children":476},{},[477,483],{"type":18,"tag":373,"props":478,"children":480},{"className":479},[],[481],{"type":24,"value":482},"loss / accumulation_steps",{"type":24,"value":484}," loss每次更新，因此每次除以steps累积到原梯度上。",{"type":18,"tag":30,"props":486,"children":487},{},[488,493],{"type":18,"tag":373,"props":489,"children":491},{"className":490},[],[492],{"type":24,"value":421},{"type":24,"value":423},{"type":18,"tag":30,"props":495,"children":496},{},[497],{"type":24,"value":498},"多次循环伪代码步骤1-2，不清空梯度，使梯度累加在历史梯度上。",{"type":18,"tag":30,"props":500,"children":501},{},[502,507],{"type":18,"tag":373,"props":503,"children":505},{"className":504},[],[506],{"type":24,"value":432},{"type":24,"value":508}," 梯度累加一定次数后，根据所累积的梯度更新网络参数。",{"type":18,"tag":30,"props":510,"children":511},{},[512,518],{"type":18,"tag":373,"props":513,"children":515},{"className":514},[],[516],{"type":24,"value":517},"optimizer.zero_grad()",{"type":24,"value":519}," 清空历史梯度，为下一次梯度累加做准备。",{"type":18,"tag":41,"props":521,"children":522},{},[523],{"type":24,"value":524},"梯度累积就是，每次获取1个batch的数据，计算1次梯度，此时梯度不清空，不断累积，累积一定次数后，根据累积的梯度更新网络参数，然后清空所有梯度信息，进行下一次循环。",{"type":18,"tag":67,"props":526,"children":528},{"id":527},"参考文献",[529],{"type":24,"value":527},{"type":18,"tag":26,"props":531,"children":532},{},[533,538,543,557],{"type":18,"tag":30,"props":534,"children":535},{},[536],{"type":24,"value":537},"[1] Hermans, Joeri R., Gerasimos Spanakis, and Rico Möckel. \"Accumulated gradient normalization.\" Asian Conference on Machine Learning. PMLR, 2017.",{"type":18,"tag":30,"props":539,"children":540},{},[541],{"type":24,"value":542},"[2] Lin, Yujun, et al. \"Deep gradient compression: Reducing the communication bandwidth for distributed training.\" arXiv preprint arXiv:1712.01887 (2017).",{"type":18,"tag":30,"props":544,"children":545},{},[546,548],{"type":24,"value":547},"[3] ",{"type":18,"tag":549,"props":550,"children":554},"a",{"href":551,"rel":552},"https://link.zhihu.com/?target=https%3A//towardsdatascience.com/how-to-break-gpu-memory-boundaries-even-with-large-batch-sizes-7a9c27a400ce",[553],"nofollow",[555],{"type":24,"value":556},"how-to-break-gpu-memory-boundaries-even-with-large-batch-sizes",{"type":18,"tag":30,"props":558,"children":559},{},[560,562],{"type":24,"value":561},"[4] ",{"type":18,"tag":549,"props":563,"children":566},{"href":564,"rel":565},"https://link.zhihu.com/?target=https%3A//towardsdatascience.com/what-is-gradient-accumulation-in-deep-learning-ec034122cfa",[553],[567],{"type":24,"value":568},"what-is-gradient-accumulation-in-deep-learning",{"title":7,"searchDepth":570,"depth":570,"links":571},4,[572,578,579,580],{"id":69,"depth":573,"text":72,"children":574},2,[575,577],{"id":128,"depth":576,"text":131},3,{"id":192,"depth":576,"text":195},{"id":238,"depth":573,"text":238},{"id":359,"depth":573,"text":359},{"id":527,"depth":573,"text":527},"markdown","content:technology-blogs:zh:1052.md","content","technology-blogs/zh/1052.md","technology-blogs/zh/1052","md",1776506111684]