[{"data":1,"prerenderedAt":992},["ShallowReactive",2],{"content-query-QH80zVGDvk":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":986,"_id":987,"_source":988,"_file":989,"_stem":990,"_extension":991},"/technology-blogs/zh/827","zh",false,"","全网最全的混合精度训练原理","Int8、fp16、fp32的数据类型仍会长期存在，针对不同应用场景和计算单元比例不同","2021-12-08","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/08/94885c894c11463bbb0c890d3d6be98d.png","technology-blogs","开发者分享",{"type":15,"children":16,"toc":922},"root",[17,24,33,51,56,73,79,85,92,98,139,145,152,158,165,170,177,182,189,194,201,212,227,233,263,269,287,308,323,329,341,350,360,369,379,388,398,407,419,428,438,447,458,467,477,486,513,522,534,586,598,607,616,626,644,653,663,672,681,690,699,717,726,736,745,756,765,773,781,789,806,821,836,851,866,874,884,892,906,914],{"type":18,"tag":19,"props":20,"children":21},"element","h1",{"id":8},[22],{"type":23,"value":8},"text",{"type":18,"tag":25,"props":26,"children":27},"p",{},[28],{"type":18,"tag":29,"props":30,"children":32},"img",{"alt":7,"src":31},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/33ef11ec5d7b4408b690a97f1cee5c58.gif",[],{"type":18,"tag":25,"props":34,"children":35},{},[36,38,44,46],{"type":23,"value":37},"作者：",{"type":18,"tag":39,"props":40,"children":41},"strong",{},[42],{"type":23,"value":43},"ZOMI酱",{"type":23,"value":45}," ｜",{"type":18,"tag":39,"props":47,"children":48},{},[49],{"type":23,"value":50},"来源：知乎",{"type":18,"tag":25,"props":52,"children":53},{},[54],{"type":23,"value":55},"通常我们训练神经网络模型的时候默认使用的数据类型为单精度FP32。近年来，为了加快训练时间、减少网络训练时候所占用的内存，并且保存训练出来的模型精度持平的条件下，业界提出越来越多的混合精度训练的方法。这里的混合精度训练是指在训练的过程中，同时使用单精度（FP32）和半精度（FP16）。",{"type":18,"tag":57,"props":58,"children":60},"h3",{"id":59},"_01-浮点数据类型",[61,66,68],{"type":18,"tag":39,"props":62,"children":63},{},[64],{"type":23,"value":65},"01",{"type":23,"value":67}," ",{"type":18,"tag":39,"props":69,"children":70},{},[71],{"type":23,"value":72},"浮点****数据类型",{"type":18,"tag":57,"props":74,"children":76},{"id":75},"浮点数据类型主要分为双精度fp64单精度fp32半精度fp16在神经网络模型的训练过程中一般默认采用单精度fp32浮点数据类型来表示网络模型权重和其他参数在了解混合精度训练之前这里简单了解浮点数据类型",[77],{"type":23,"value":78},"浮点数据类型主要分为双精度（Fp64）、单精度（Fp32）、半精度（FP16）。在神经网络模型的训练过程中，一般默认采用单精度（FP32）浮点数据类型，来表示网络模型权重和其他参数。在了解混合精度训练之前，这里简单了解浮点数据类型。",{"type":18,"tag":57,"props":80,"children":82},{"id":81},"根据ieee二进制浮点数算术标准ieee-754的定义浮点数据类型分为双精度fp64单精度fp32半精度fp16三种其中每一种都有三个不同的位来表示fp64表示采用8个字节共64位来进行的编码存储的一种数据类型同理fp32表示采用4个字节共32位来表示fp16则是采用2字节共16位来表示如图所示",[83],{"type":23,"value":84},"根据IEEE二进制浮点数算术标准（IEEE 754）的定义，浮点数据类型分为双精度（Fp64）、单精度（Fp32）、半精度（FP16）三种，其中每一种都有三个不同的位来表示。FP64表示采用8个字节共64位，来进行的编码存储的一种数据类型；同理，FP32表示采用4个字节共32位来表示；FP16则是采用2字节共16位来表示。如图所示：",{"type":18,"tag":25,"props":86,"children":87},{},[88],{"type":18,"tag":29,"props":89,"children":91},{"alt":7,"src":90},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/e3b80da275e6409aae9a14c17a62fcef.jpg",[],{"type":18,"tag":57,"props":93,"children":95},{"id":94},"从图中可以看出与fp32相比fp16的存储空间是fp32的一半fp32则是fp16的一半主要分为三个部分",[96],{"type":23,"value":97},"从图中可以看出，与FP32相比，FP16的存储空间是FP32的一半，FP32则是FP16的一半。主要分为三个部分：",{"type":18,"tag":99,"props":100,"children":101},"ul",{},[102,115,127],{"type":18,"tag":103,"props":104,"children":105},"li",{},[106],{"type":18,"tag":57,"props":107,"children":109},{"id":108},"最高位表示符号位sign-bit",[110],{"type":18,"tag":39,"props":111,"children":112},{},[113],{"type":23,"value":114},"最高位表示符号位sign bit。",{"type":18,"tag":103,"props":116,"children":117},{},[118],{"type":18,"tag":57,"props":119,"children":121},{"id":120},"中间表示指数位exponent-bit",[122],{"type":18,"tag":39,"props":123,"children":124},{},[125],{"type":23,"value":126},"中间表示指数位exponent bit。",{"type":18,"tag":103,"props":128,"children":129},{},[130],{"type":18,"tag":57,"props":131,"children":133},{"id":132},"低位表示分数位fraction-bit",[134],{"type":18,"tag":39,"props":135,"children":136},{},[137],{"type":23,"value":138},"低位表示分数位fraction bit。",{"type":18,"tag":57,"props":140,"children":142},{"id":141},"以fp16为例子第一位符号位sign表示正负符号接着5位表示指数exponent最后10位表示分数fraction公式为",[143],{"type":23,"value":144},"以FP16为例子，第一位符号位sign表示正负符号，接着5位表示指数exponent，最后10位表示分数fraction。公式为：",{"type":18,"tag":25,"props":146,"children":147},{},[148],{"type":18,"tag":29,"props":149,"children":151},{"alt":7,"src":150},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/97056fdfbe7641b787d87246d809fb1b.png",[],{"type":18,"tag":57,"props":153,"children":155},{"id":154},"同理一个规则化的fp32的真值为",[156],{"type":23,"value":157},"同理，一个规则化的FP32的真值为：",{"type":18,"tag":25,"props":159,"children":160},{},[161],{"type":18,"tag":29,"props":162,"children":164},{"alt":7,"src":163},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/e6f91039f18f4df6aa820120de61d449.png",[],{"type":18,"tag":25,"props":166,"children":167},{},[168],{"type":23,"value":169},"一个规格化的FP64的真值为：",{"type":18,"tag":25,"props":171,"children":172},{},[173],{"type":18,"tag":29,"props":174,"children":176},{"alt":7,"src":175},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/166be96843a0457a85989f27805b6cc8.png",[],{"type":18,"tag":25,"props":178,"children":179},{},[180],{"type":23,"value":181},"FP16可以表示的最大值为 0 11110 1111111111，计算方法为：",{"type":18,"tag":25,"props":183,"children":184},{},[185],{"type":18,"tag":29,"props":186,"children":188},{"alt":7,"src":187},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/2f8d82bb7c5a41869bc554c024c61289.png",[],{"type":18,"tag":25,"props":190,"children":191},{},[192],{"type":23,"value":193},"FP16可以表示的最小值为 0 00001 0000000000，计算方法为：",{"type":18,"tag":25,"props":195,"children":196},{},[197],{"type":18,"tag":29,"props":198,"children":200},{"alt":7,"src":199},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/6f2260a33d5e471eaaf8508286b56376.png",[],{"type":18,"tag":25,"props":202,"children":203},{},[204,206,210],{"type":23,"value":205},"因此FP16的最大取值范围是[-65504 - 66504]，能表示的精度范围是",{"type":18,"tag":29,"props":207,"children":209},{"alt":7,"src":208},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/613426e443874115a5f159e13f5d6f44.png",[],{"type":23,"value":211},"，超过这个数值的数字会被直接置0。",{"type":18,"tag":57,"props":213,"children":215},{"id":214},"_02-使用fp16训练问题",[216,221,222],{"type":18,"tag":39,"props":217,"children":218},{},[219],{"type":23,"value":220},"02",{"type":23,"value":67},{"type":18,"tag":39,"props":223,"children":224},{},[225],{"type":23,"value":226},"使用FP16训练问题",{"type":18,"tag":57,"props":228,"children":230},{"id":229},"首先来看看为什么需要混合精度使用fp16训练神经网络相对比使用fp32带来的优点有",[231],{"type":23,"value":232},"首先来看看为什么需要混合精度。使用FP16训练神经网络，相对比使用FP32带来的优点有：",{"type":18,"tag":99,"props":234,"children":235},{},[236,245,254],{"type":18,"tag":103,"props":237,"children":238},{},[239],{"type":18,"tag":57,"props":240,"children":242},{"id":241},"减少内存占用fp16的位宽是fp32的一半因此权重等参数所占用的内存也是原来的一半节省下来的内存可以放更大的网络模型或者使用更多的数据进行训练",[243],{"type":23,"value":244},"**减少内存占用：**FP16的位宽是FP32的一半，因此权重等参数所占用的内存也是原来的一半，节省下来的内存可以放更大的网络模型或者使用更多的数据进行训练。",{"type":18,"tag":103,"props":246,"children":247},{},[248],{"type":18,"tag":57,"props":249,"children":251},{"id":250},"加快通讯效率针对分布式训练特别是在大模型训练的过程中通讯的开销制约了网络模型训练的整体性能通讯的位宽少了意味着可以提升通讯性能减少等待时间加快数据的流通",[252],{"type":23,"value":253},"**加快通讯效率：**针对分布式训练，特别是在大模型训练的过程中，通讯的开销制约了网络模型训练的整体性能，通讯的位宽少了意味着可以提升通讯性能，减少等待时间，加快数据的流通。",{"type":18,"tag":103,"props":255,"children":256},{},[257],{"type":18,"tag":57,"props":258,"children":260},{"id":259},"计算效率更高在特殊的ai加速芯片如华为ascend-910和310系列或者nvidia-votal架构的titan-v-and-tesla-v100的gpu上使用fp16的执行运算性能比fp32更加快",[261],{"type":23,"value":262},"**计算效率更高：**在特殊的AI加速芯片如华为Ascend 910和310系列，或者NVIDIA VOTAL架构的Titan V and Tesla V100的GPU上，使用FP16的执行运算性能比FP32更加快。",{"type":18,"tag":57,"props":264,"children":266},{"id":265},"但是使用fp16同样会带来一些问题其中最重要的是1精度溢出和2舍入误差",[267],{"type":23,"value":268},"但是使用FP16同样会带来一些问题，其中最重要的是1）精度溢出和2）舍入误差。",{"type":18,"tag":57,"props":270,"children":272},{"id":271},"数据溢出数据溢出比较好理解fp16的有效数据表示范围为-fp32的有效数据表示范围",[273,275,279,281,285],{"type":23,"value":274},"**数据溢出：**数据溢出比较好理解，FP16的有效数据表示范围为 ",{"type":18,"tag":29,"props":276,"children":278},{"alt":7,"src":277},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/f6f8b7e9f44044af9a54afe5692a0969.png",[],{"type":23,"value":280},"，FP32的有效数据表示范围 ",{"type":18,"tag":29,"props":282,"children":284},{"alt":7,"src":283},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/4ac0677986f541a38045abf480c30006.png",[],{"type":23,"value":286},"。",{"type":18,"tag":99,"props":288,"children":289},{},[290,299],{"type":18,"tag":103,"props":291,"children":292},{},[293],{"type":18,"tag":57,"props":294,"children":296},{"id":295},"可见fp16相比fp32的有效范围要窄很多使用fp16替换fp32会出现上溢overflow和下溢underflow的情况而在深度学习中需要计算网络模型中权重的梯度一阶导数因此梯度会比权重值更加小往往容易出现下溢情况",[297],{"type":23,"value":298},"可见FP16相比FP32的有效范围要窄很多，使用FP16替换FP32会出现上溢（Overflow）和下溢（Underflow）的情况。而在深度学习中，需要计算网络模型中权重的梯度（一阶导数），因此梯度会比权重值更加小，往往容易出现下溢情况。",{"type":18,"tag":103,"props":300,"children":301},{},[302],{"type":18,"tag":57,"props":303,"children":305},{"id":304},"舍入误差rounding-error指示是当网络模型的反向梯度很小一般fp32能够表示但是转换到fp16会小于当前区间内的最小间隔会导致数据溢出如000006666666在fp32中能正常表示转换到fp16后会表示成为0000067不满足fp16最小间隔的数会强制舍入",[306],{"type":23,"value":307},"**舍入误差：**Rounding Error指示是当网络模型的反向梯度很小，一般FP32能够表示，但是转换到FP16会小于当前区间内的最小间隔，会导致数据溢出。如0.00006666666在FP32中能正常表示，转换到FP16后会表示成为0.000067，不满足FP16最小间隔的数会强制舍入。",{"type":18,"tag":57,"props":309,"children":311},{"id":310},"_03-混合精度相关技术",[312,317,318],{"type":18,"tag":39,"props":313,"children":314},{},[315],{"type":23,"value":316},"03",{"type":23,"value":67},{"type":18,"tag":39,"props":319,"children":320},{},[321],{"type":23,"value":322},"混合精度相关技术",{"type":18,"tag":57,"props":324,"children":326},{"id":325},"为了想让深度学习训练可以使用fp16的好处又要避免精度溢出和舍入误差于是可以通过fp16和fp32的混合精度训练mixed-precision混合精度训练过程中可以引入权重备份weight-backup损失放大loss-scaling精度累加precision-accumulated三种相关的技术",[327],{"type":23,"value":328},"为了想让深度学习训练可以使用FP16的好处，又要避免精度溢出和舍入误差。于是可以通过FP16和FP32的混合精度训练（Mixed-Precision），混合精度训练过程中可以引入权重备份（Weight Backup）、损失放大（Loss Scaling）、精度累加（Precision Accumulated）三种相关的技术。",{"type":18,"tag":57,"props":330,"children":332},{"id":331},"_31-权重备份weight-backup",[333],{"type":18,"tag":39,"props":334,"children":335},{},[336],{"type":18,"tag":39,"props":337,"children":338},{},[339],{"type":23,"value":340},"3.1 权重备份（Weight Backup）",{"type":18,"tag":57,"props":342,"children":344},{"id":343},"权重备份主要用于解决舍入误差的问题其主要思路是把神经网络训练过程中产生的激活activations梯度-gradients中间变量等数据在训练中都利用fp16来存储同时复制一份fp32的权重参数weights用于训练时候的更新具体如下图所示",[345],{"type":18,"tag":39,"props":346,"children":347},{},[348],{"type":23,"value":349},"权重备份主要用于解决舍入误差的问题。其主要思路是把神经网络训练过程中产生的激活activations、梯度 gradients、中间变量等数据，在训练中都利用FP16来存储，同时复制一份FP32的权重参数weights，用于训练时候的更新。具体如下图所示。",{"type":18,"tag":57,"props":351,"children":352},{"id":7},[353],{"type":18,"tag":39,"props":354,"children":355},{},[356],{"type":18,"tag":29,"props":357,"children":359},{"alt":7,"src":358},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/4e354db3ebc54181a01a90a8bcecba70.jpg",[],{"type":18,"tag":57,"props":361,"children":363},{"id":362},"从图中可以了解在计算过程中所产生的权重weights激活activations梯度gradients等均使用-fp16-来进行存储和计算其中权重使用fp32额外进行备份由于在更新权重公式为",[364],{"type":18,"tag":39,"props":365,"children":366},{},[367],{"type":23,"value":368},"从图中可以了解，在计算过程中所产生的权重weights，激活activations，梯度gradients等均使用 FP16 来进行存储和计算，其中权重使用FP32额外进行备份。由于在更新权重公式为:",{"type":18,"tag":25,"props":370,"children":371},{},[372],{"type":18,"tag":39,"props":373,"children":374},{},[375],{"type":18,"tag":29,"props":376,"children":378},{"alt":7,"src":377},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/6334a12dded14e7cb9f815c8398c61a1.png",[],{"type":18,"tag":57,"props":380,"children":382},{"id":381},"深度模型中lr-x-gradent的参数值可能会非常小利用fp16来进行相加的话则很可能会出现舍入误差问题导致更新无效因此通过将权重weights拷贝成fp32格式并且确保整个更新过程是在-fp32-格式下进行的即",[383],{"type":18,"tag":39,"props":384,"children":385},{},[386],{"type":23,"value":387},"深度模型中，lr x gradent的参数值可能会非常小，利用FP16来进行相加的话，则很可能会出现舍入误差问题，导致更新无效。因此通过将权重weights拷贝成FP32格式，并且确保整个更新过程是在 fp32 格式下进行的。即：",{"type":18,"tag":25,"props":389,"children":390},{},[391],{"type":18,"tag":39,"props":392,"children":393},{},[394],{"type":18,"tag":29,"props":395,"children":397},{"alt":7,"src":396},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/3dfbe68523cb49b18bd599c57239d604.png",[],{"type":18,"tag":57,"props":399,"children":401},{"id":400},"权重用fp32格式备份一次那岂不是使得内存占用反而更高了呢是的额外拷贝一份weight的确增加了训练时候内存的占用但是实际上在训练过程中内存中分为动态内存和静态内容其中动态内存是静态内存的3-4倍主要是中间变量值和激活activations的值而这里备份的权重增加的主要是静态内存只要动态内存的值基本都是使用fp16来进行存储则最终模型与整网使用fp32进行训练相比起来-内存占用也基本能够减半",[402],{"type":18,"tag":39,"props":403,"children":404},{},[405],{"type":23,"value":406},"权重用FP32格式备份一次，那岂不是使得内存占用反而更高了呢？是的，额外拷贝一份weight的确增加了训练时候内存的占用。但是实际上，在训练过程中内存中分为动态内存和静态内容，其中动态内存是静态内存的3-4倍，主要是中间变量值和激活activations的值。而这里备份的权重增加的主要是静态内存。只要动态内存的值基本都是使用FP16来进行存储，则最终模型与整网使用FP32进行训练相比起来， 内存占用也基本能够减半。",{"type":18,"tag":57,"props":408,"children":410},{"id":409},"_32-损失缩放loss-scaling",[411],{"type":18,"tag":39,"props":412,"children":413},{},[414],{"type":18,"tag":39,"props":415,"children":416},{},[417],{"type":23,"value":418},"3.2 损失缩放（Loss Scaling）",{"type":18,"tag":57,"props":420,"children":422},{"id":421},"如图所示如果仅仅使用fp32训练模型收敛得比较好但是如果用了混合精度训练会存在网络模型无法收敛的情况原因是梯度的值太小使用fp16表示会造成了数据下溢出underflow的问题导致模型不收敛如图中灰色的部分于是需要引入损失缩放loss-scaling技术",[423],{"type":18,"tag":39,"props":424,"children":425},{},[426],{"type":23,"value":427},"如图所示，如果仅仅使用FP32训练，模型收敛得比较好，但是如果用了混合精度训练，会存在网络模型无法收敛的情况。原因是梯度的值太小，使用FP16表示会造成了数据下溢出（Underflow）的问题，导致模型不收敛，如图中灰色的部分。于是需要引入损失缩放（Loss Scaling）技术。",{"type":18,"tag":25,"props":429,"children":430},{},[431],{"type":18,"tag":39,"props":432,"children":433},{},[434],{"type":18,"tag":29,"props":435,"children":437},{"alt":7,"src":436},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/19dd3fa5bee6424aa6a8979c1269995e.jpg",[],{"type":18,"tag":57,"props":439,"children":441},{"id":440},"下面是在网络模型训练阶段-某一层的激活函数梯度分布式中其中有68的网络模型激活参数位0另外有4的精度在2-322-20这个区间内直接使用fp16对这里面的数据进行表示会截断下溢的数据所有的梯度值都会变为0",[442],{"type":18,"tag":39,"props":443,"children":444},{},[445],{"type":23,"value":446},"下面是在网络模型训练阶段， 某一层的激活函数梯度分布式中，其中有68%的网络模型激活参数位0，另外有4%的精度在2^-32~2^-20这个区间内，直接使用FP16对这里面的数据进行表示，会截断下溢的数据，所有的梯度值都会变为0。",{"type":18,"tag":57,"props":448,"children":450},{"id":449},"_1",[451],{"type":18,"tag":39,"props":452,"children":453},{},[454],{"type":18,"tag":29,"props":455,"children":457},{"alt":7,"src":456},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/060bc520f3604d47af477821b2797823.jpg",[],{"type":18,"tag":57,"props":459,"children":461},{"id":460},"为了解决梯度过小数据下溢的问题对前向计算出来的loss值进行放大操作也就是把fp32的参数乘以某一个因子系数后把可能溢出的小数位数据往前移平移到fp16能表示的数据范围内根据链式求导法则放大loss后会作用在反向传播的每一层梯度这样比在每一层梯度上进行放大更加高效",[462],{"type":18,"tag":39,"props":463,"children":464},{},[465],{"type":23,"value":466},"为了解决梯度过小数据下溢的问题，对前向计算出来的Loss值进行放大操作，也就是把FP32的参数乘以某一个因子系数后，把可能溢出的小数位数据往前移，平移到FP16能表示的数据范围内。根据链式求导法则，放大Loss后会作用在反向传播的每一层梯度，这样比在每一层梯度上进行放大更加高效。",{"type":18,"tag":25,"props":468,"children":469},{},[470],{"type":18,"tag":39,"props":471,"children":472},{},[473],{"type":18,"tag":29,"props":474,"children":476},{"alt":7,"src":475},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/47c1a06828de42b1b62e0d3a98cecf84.jpg",[],{"type":18,"tag":57,"props":478,"children":480},{"id":479},"损失放大是需要结合混合精度实现的其主要的主要思路是",[481],{"type":18,"tag":39,"props":482,"children":483},{},[484],{"type":23,"value":485},"损失放大是需要结合混合精度实现的，其主要的主要思路是：",{"type":18,"tag":99,"props":487,"children":488},{},[489,501],{"type":18,"tag":103,"props":490,"children":491},{},[492],{"type":18,"tag":57,"props":493,"children":495},{"id":494},"scale-up阶段网络模型前向计算后在反响传播前将得到的损失变化值dloss增大2k倍",[496],{"type":18,"tag":39,"props":497,"children":498},{},[499],{"type":23,"value":500},"Scale up阶段，网络模型前向计算后在反响传播前，将得到的损失变化值DLoss增大2^K倍。",{"type":18,"tag":103,"props":502,"children":503},{},[504],{"type":18,"tag":57,"props":505,"children":507},{"id":506},"scale-down阶段反向传播后将权重梯度缩2k倍恢复fp32值进行存储",[508],{"type":18,"tag":39,"props":509,"children":510},{},[511],{"type":23,"value":512},"Scale down阶段，反向传播后，将权重梯度缩2^K倍，恢复FP32值进行存储。",{"type":18,"tag":57,"props":514,"children":516},{"id":515},"动态损失缩放dynamic-loss-scaling上面提到的损失缩放都是使用一个默认值对损失值进行缩放为了充分利用fp16的动态范围可以更好地缓解舍入误差尽量使用比较大的放大倍数总结动态损失缩放算法就是每当梯度溢出时候减少损失缩放规模并且间歇性地尝试增加损失规模从而实现在不引起溢出的情况下使用最高损失缩放因子更好地恢复精度",[517],{"type":18,"tag":39,"props":518,"children":519},{},[520],{"type":23,"value":521},"动态损失缩放（Dynamic Loss Scaling）：上面提到的损失缩放都是使用一个默认值对损失值进行缩放，为了充分利用FP16的动态范围，可以更好地缓解舍入误差，尽量使用比较大的放大倍数。总结动态损失缩放算法，就是每当梯度溢出时候减少损失缩放规模，并且间歇性地尝试增加损失规模，从而实现在不引起溢出的情况下使用最高损失缩放因子，更好地恢复精度。",{"type":18,"tag":57,"props":523,"children":525},{"id":524},"动态损失缩放的算法如下",[526],{"type":18,"tag":39,"props":527,"children":528},{},[529],{"type":18,"tag":39,"props":530,"children":531},{},[532],{"type":23,"value":533},"动态损失缩放的算法如下：",{"type":18,"tag":535,"props":536,"children":537},"ol",{},[538,550,562,574],{"type":18,"tag":103,"props":539,"children":540},{},[541],{"type":18,"tag":57,"props":542,"children":544},{"id":543},"动态损失缩放的算法会从比较高的缩放因子开始如224然后开始进行训练迭代中检查数是否会溢出infsnans",[545],{"type":18,"tag":39,"props":546,"children":547},{},[548],{"type":23,"value":549},"动态损失缩放的算法会从比较高的缩放因子开始（如2^24），然后开始进行训练迭代中检查数是否会溢出（Infs/Nans）；",{"type":18,"tag":103,"props":551,"children":552},{},[553],{"type":18,"tag":57,"props":554,"children":556},{"id":555},"如果没有梯度溢出则不进行缩放继续进行迭代如果检测到梯度溢出则缩放因子会减半重新确认梯度更新情况直到数不产生溢出的范围内",[557],{"type":18,"tag":39,"props":558,"children":559},{},[560],{"type":23,"value":561},"如果没有梯度溢出，则不进行缩放，继续进行迭代；如果检测到梯度溢出，则缩放因子会减半，重新确认梯度更新情况，直到数不产生溢出的范围内；",{"type":18,"tag":103,"props":563,"children":564},{},[565],{"type":18,"tag":57,"props":566,"children":568},{"id":567},"在训练的后期loss已经趋近收敛稳定梯度更新的幅度往往小了这个时候可以允许更高的损失缩放因子来再次防止数据下溢",[569],{"type":18,"tag":39,"props":570,"children":571},{},[572],{"type":23,"value":573},"在训练的后期，loss已经趋近收敛稳定，梯度更新的幅度往往小了，这个时候可以允许更高的损失缩放因子来再次防止数据下溢。",{"type":18,"tag":103,"props":575,"children":576},{},[577],{"type":18,"tag":57,"props":578,"children":580},{"id":579},"因此动态损失缩放算法会尝试在每nn2000次迭代将损失缩放增加f倍数然后执行步骤2检查是否溢出",[581],{"type":18,"tag":39,"props":582,"children":583},{},[584],{"type":23,"value":585},"因此，动态损失缩放算法会尝试在每N（N=2000）次迭代将损失缩放增加F倍数，然后执行步骤2检查是否溢出。",{"type":18,"tag":57,"props":587,"children":589},{"id":588},"_33-精度累加precision-accumulated",[590],{"type":18,"tag":39,"props":591,"children":592},{},[593],{"type":18,"tag":39,"props":594,"children":595},{},[596],{"type":23,"value":597},"3.3 精度累加（Precision Accumulated）",{"type":18,"tag":57,"props":599,"children":601},{"id":600},"在混合精度的模型训练过程中使用fp16进行矩阵乘法运算利用fp32来进行矩阵乘法中间的累加accumulated然后再将fp32的值转化为fp16进行存储简单而言就是利用fp16进行矩阵相乘利用fp32来进行加法计算弥补丢失的精度这样可以有效减少计算过程中的舍入误差尽量减缓精度损失的问题",[602],{"type":18,"tag":39,"props":603,"children":604},{},[605],{"type":23,"value":606},"在混合精度的模型训练过程中，使用FP16进行矩阵乘法运算，利用FP32来进行矩阵乘法中间的累加（accumulated），然后再将FP32的值转化为FP16进行存储。简单而言，就是利用FP16进行矩阵相乘，利用FP32来进行加法计算弥补丢失的精度。这样可以有效减少计算过程中的舍入误差，尽量减缓精度损失的问题。",{"type":18,"tag":57,"props":608,"children":610},{"id":609},"例如在nvidia-volta-结构中带有tensor-core可以利用fp16混合精度来进行加速还能保持精度tensor-core主要用于实现fp16的矩阵相乘在利用fp16或者fp32进行累加和存储在累加阶段能够使用fp32大幅减少混合精度训练的精度损失",[611],{"type":18,"tag":39,"props":612,"children":613},{},[614],{"type":23,"value":615},"例如在Nvidia Volta 结构中带有Tensor Core，可以利用FP16混合精度来进行加速，还能保持精度。Tensor Core主要用于实现FP16的矩阵相乘，在利用FP16或者FP32进行累加和存储。在累加阶段能够使用FP32大幅减少混合精度训练的精度损失。",{"type":18,"tag":25,"props":617,"children":618},{},[619],{"type":18,"tag":39,"props":620,"children":621},{},[622],{"type":18,"tag":29,"props":623,"children":625},{"alt":7,"src":624},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/30a11046fdf94c7f8a199309d1513c82.jpg",[],{"type":18,"tag":57,"props":627,"children":629},{"id":628},"_04-混合精度训练策略automatic-mixed-precisionamp",[630],{"type":18,"tag":39,"props":631,"children":632},{},[633,638,639],{"type":18,"tag":39,"props":634,"children":635},{},[636],{"type":23,"value":637},"04",{"type":23,"value":67},{"type":18,"tag":39,"props":640,"children":641},{},[642],{"type":23,"value":643},"混合精度训练策略（Automatic Mixed Precision，AMP）",{"type":18,"tag":57,"props":645,"children":647},{"id":646},"混合精度训练有很多有意思的地方不仅仅是在深度学习另外在hpc的迭代计算场景下从迭代的开始迭代中期和迭代后期都可以使用不同的混合精度策略来提升训练性能的同时保证计算的精度以动态的混合精度达到计算和内存的最高效率比也是一个较为前言的研究方向以nvidia的apex混合精度库为例里面提供了4种策略分别是默认使用fp32进行训练的o0只优化前向计算部分o1除梯度更新部分以外都使用混合精度的o2和使用fp16进行训练的o3具体如图所示",[648],{"type":18,"tag":39,"props":649,"children":650},{},[651],{"type":23,"value":652},"混合精度训练有很多有意思的地方，不仅仅是在深度学习，另外在HPC的迭代计算场景下，从迭代的开始、迭代中期和迭代后期，都可以使用不同的混合精度策略来提升训练性能的同时保证计算的精度。以动态的混合精度达到计算和内存的最高效率比也是一个较为前言的研究方向。以NVIDIA的APEX混合精度库为例，里面提供了4种策略，分别是默认使用FP32进行训练的O0，只优化前向计算部分O1、除梯度更新部分以外都使用混合精度的O2和使用FP16进行训练的O3。具体如图所示。",{"type":18,"tag":25,"props":654,"children":655},{},[656],{"type":18,"tag":39,"props":657,"children":658},{},[659],{"type":18,"tag":29,"props":660,"children":662},{"alt":7,"src":661},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/ac802aa643894b2b8f0899a6bdf8dfca.jpg",[],{"type":18,"tag":57,"props":664,"children":666},{"id":665},"这里面比较有意思的是o1和o2策略",[667],{"type":18,"tag":39,"props":668,"children":669},{},[670],{"type":23,"value":671},"这里面比较有意思的是O1和O2策略。",{"type":18,"tag":57,"props":673,"children":675},{"id":674},"o1策略中会根据实际tensor和ops之间的关系建立黑白名单来使用fp16例如gemm和cnn卷积操作对于fp16操作特别友好的计算会把输入的数据和权重转换成fp16进行运算而softmaxbatchnorm等标量和向量在fp32操作好的计算则是继续使用fp32进行运算另外还提供了动态损失缩放dynamic-loss-scaling",[676],{"type":18,"tag":39,"props":677,"children":678},{},[679],{"type":23,"value":680},"O1策略中，会根据实际Tensor和Ops之间的关系建立黑白名单来使用FP16。例如GEMM和CNN卷积操作对于FP16操作特别友好的计算，会把输入的数据和权重转换成FP16进行运算，而softmax、batchnorm等标量和向量在FP32操作好的计算，则是继续使用FP32进行运算，另外还提供了动态损失缩放（dynamic loss scaling）。",{"type":18,"tag":57,"props":682,"children":684},{"id":683},"而o2策略中模型权重参数会转化为fp16输入的网络模型参数也转换为fp16batchnorms使用fp32另外模型权重文件复制一份fp32用于跟优化器更新梯度保持一致都是fp32另外还提供动态损失缩放dynamic-loss-scaling使用了权重备份来减少舍入误差和使用损失缩放来避免数据溢出",[685],{"type":18,"tag":39,"props":686,"children":687},{},[688],{"type":23,"value":689},"而O2策略中，模型权重参数会转化为FP16，输入的网络模型参数也转换为FP16，Batchnorms使用FP32，另外模型权重文件复制一份FP32用于跟优化器更新梯度保持一致都是FP32，另外还提供动态损失缩放（dynamic loss scaling）。使用了权重备份来减少舍入误差和使用损失缩放来避免数据溢出。",{"type":18,"tag":57,"props":691,"children":693},{"id":692},"当然上面提供的策略是跟硬件有关系并不是所有的ai加速芯片都使用这时候针对自研的ai芯片需要找到适合得到混合精度策略",[694],{"type":18,"tag":39,"props":695,"children":696},{},[697],{"type":23,"value":698},"当然上面提供的策略是跟硬件有关系，并不是所有的AI加速芯片都使用，这时候针对自研的AI芯片，需要找到适合得到混合精度策略。",{"type":18,"tag":57,"props":700,"children":702},{"id":701},"_05-实验结果",[703],{"type":18,"tag":39,"props":704,"children":705},{},[706,711,712],{"type":18,"tag":39,"props":707,"children":708},{},[709],{"type":23,"value":710},"05",{"type":23,"value":67},{"type":18,"tag":39,"props":713,"children":714},{},[715],{"type":23,"value":716},"实验结果",{"type":18,"tag":57,"props":718,"children":720},{"id":719},"从下图的accuracy结果可以看到混合精度基本没有精度损失",[721],{"type":18,"tag":39,"props":722,"children":723},{},[724],{"type":23,"value":725},"从下图的Accuracy结果可以看到，混合精度基本没有精度损失：",{"type":18,"tag":25,"props":727,"children":728},{},[729],{"type":18,"tag":39,"props":730,"children":731},{},[732],{"type":18,"tag":29,"props":733,"children":735},{"alt":7,"src":734},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/030733b7e98846828f26af439b124f61.jpg",[],{"type":18,"tag":57,"props":737,"children":739},{"id":738},"loss-scale的效果",[740],{"type":18,"tag":39,"props":741,"children":742},{},[743],{"type":23,"value":744},"Loss scale的效果：",{"type":18,"tag":57,"props":746,"children":748},{"id":747},"_2",[749],{"type":18,"tag":39,"props":750,"children":751},{},[752],{"type":18,"tag":29,"props":753,"children":755},{"alt":7,"src":754},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/e33308d404274bf2b071b8e786267580.jpg",[],{"type":18,"tag":57,"props":757,"children":759},{"id":758},"题外话前不久去x公司跟x总监聊下一代ai芯片架构的时候他认为下一代芯片可以不需要加入int8数据类型因为transformer结构目前有大一统nlp和cv等领域的趋势从设计流片到量产2年后预计transformer会取代cnn成为最流行的架构我倒是不同意这个观点目前来看神经网络的4个主要的结构mlpcnnrnntransformer都有其对应的使用场景并没有因为某一种结构的出现而推翻以前的结构只能说根据使用场景的侧重点比例有所不同我理解int8fp16fp32的数据类型在ai芯片中仍然会长期存在针对不同的应用场景和计算单元会有不同的比例",[760],{"type":18,"tag":39,"props":761,"children":762},{},[763],{"type":23,"value":764},"题外话，前不久去X公司跟X总监聊下一代AI芯片架构的时候，他认为下一代芯片可以不需要加入INT8数据类型，因为Transformer结构目前有大一统NLP和CV等领域的趋势，从设计、流片到量产，2年后预计Transformer会取代CNN成为最流行的架构。我倒是不同意这个观点，目前来看神经网络的4个主要的结构MLP、CNN、RNN、Transformer都有其对应的使用场景，并没有因为某一种结构的出现而推翻以前的结构。只能说根据使用场景的侧重点比例有所不同，我理解Int8、fp16、fp32的数据类型在AI芯片中仍然会长期存在，针对不同的应用场景和计算单元会有不同的比例。",{"type":18,"tag":25,"props":766,"children":767},{},[768],{"type":18,"tag":39,"props":769,"children":770},{},[771],{"type":23,"value":772},"参考文献：",{"type":18,"tag":25,"props":774,"children":775},{},[776],{"type":18,"tag":39,"props":777,"children":778},{},[779],{"type":23,"value":780},"[1] Micikevicius, Paulius, et al. \"Mixed precision training.\"arXiv preprint arXiv:1710.03740(2017).",{"type":18,"tag":25,"props":782,"children":783},{},[784],{"type":18,"tag":39,"props":785,"children":786},{},[787],{"type":23,"value":788},"[2] Ott, Myle, et al. \"Scaling neural machine translation.\"arXiv preprint arXiv:1806.00187(2018).",{"type":18,"tag":25,"props":790,"children":791},{},[792],{"type":18,"tag":39,"props":793,"children":794},{},[795,797],{"type":23,"value":796},"[3] en.wikipedia.org/wiki/H（",{"type":18,"tag":798,"props":799,"children":803},"a",{"href":800,"rel":801},"https://link.zhihu.com/?target=https%3A//en.wikipedia.org/wiki/Half-precision%5C_floating-point%5C_format%EF%BC%89",[802],"nofollow",[804],{"type":23,"value":805},"https://link.zhihu.com/?target=https%3A//en.wikipedia.org/wiki/Half-precision\\_floating-point\\_format）",{"type":18,"tag":25,"props":807,"children":808},{},[809],{"type":18,"tag":39,"props":810,"children":811},{},[812,814],{"type":23,"value":813},"[4] apex.amp - Apex 0.1.0 documentation.（",{"type":18,"tag":798,"props":815,"children":818},{"href":816,"rel":817},"https://nvidia.github.io/apex/amp.html#opt-levels-and-properties%EF%BC%89",[802],[819],{"type":23,"value":820},"https://nvidia.github.io/apex/amp.html#opt-levels-and-properties）",{"type":18,"tag":25,"props":822,"children":823},{},[824],{"type":18,"tag":39,"props":825,"children":826},{},[827,829],{"type":23,"value":828},"[5] Automatic Mixed Precision for Deep Learning.（",{"type":18,"tag":798,"props":830,"children":833},{"href":831,"rel":832},"https://developer.nvidia.com/automatic-mixed-precision%EF%BC%89",[802],[834],{"type":23,"value":835},"https://developer.nvidia.com/automatic-mixed-precision）",{"type":18,"tag":25,"props":837,"children":838},{},[839],{"type":18,"tag":39,"props":840,"children":841},{},[842,844],{"type":23,"value":843},"[6] Training With Mixed Precision.（",{"type":18,"tag":798,"props":845,"children":848},{"href":846,"rel":847},"https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html%EF%BC%89",[802],[849],{"type":23,"value":850},"https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html）",{"type":18,"tag":25,"props":852,"children":853},{},[854],{"type":18,"tag":39,"props":855,"children":856},{},[857,859],{"type":23,"value":858},"[7] Dreaming.O：浅谈混合精度训练.（",{"type":18,"tag":798,"props":860,"children":863},{"href":861,"rel":862},"https://zhuanlan.zhihu.com/p/103685761%EF%BC%89",[802],[864],{"type":23,"value":865},"https://zhuanlan.zhihu.com/p/103685761）",{"type":18,"tag":25,"props":867,"children":868},{},[869],{"type":18,"tag":39,"props":870,"children":871},{},[872],{"type":23,"value":873},"扫描下方二维码加入MindSpore项目↓",{"type":18,"tag":25,"props":875,"children":876},{},[877],{"type":18,"tag":39,"props":878,"children":879},{},[880],{"type":18,"tag":29,"props":881,"children":883},{"alt":7,"src":882},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/12/09/b0e13d61edfd4603bf74d404f9294c3d.jpg",[],{"type":18,"tag":25,"props":885,"children":886},{},[887],{"type":18,"tag":39,"props":888,"children":889},{},[890],{"type":23,"value":891},"MindSpore官方资料",{"type":18,"tag":25,"props":893,"children":894},{},[895],{"type":18,"tag":39,"props":896,"children":897},{},[898,900],{"type":23,"value":899},"GitHub : ",{"type":18,"tag":798,"props":901,"children":904},{"href":902,"rel":903},"https://github.com/mindspore-ai/mindspore",[802],[905],{"type":23,"value":902},{"type":18,"tag":25,"props":907,"children":908},{},[909],{"type":18,"tag":39,"props":910,"children":911},{},[912],{"type":23,"value":913},"Gitee : https : //gitee.com/mindspore/mindspore",{"type":18,"tag":25,"props":915,"children":916},{},[917],{"type":18,"tag":39,"props":918,"children":919},{},[920],{"type":23,"value":921},"官方QQ群 : 486831414",{"title":7,"searchDepth":923,"depth":923,"links":924},4,[925,928,929,930,931,932,933,934,935,936,938,939,940,941,942,943,945,946,947,949,950,951,952,953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,975,976,977,978,979,980,982,983,984,985],{"id":59,"depth":926,"text":927},3,"01 浮点****数据类型",{"id":75,"depth":926,"text":78},{"id":81,"depth":926,"text":84},{"id":94,"depth":926,"text":97},{"id":108,"depth":926,"text":114},{"id":120,"depth":926,"text":126},{"id":132,"depth":926,"text":138},{"id":141,"depth":926,"text":144},{"id":154,"depth":926,"text":157},{"id":214,"depth":926,"text":937},"02 使用FP16训练问题",{"id":229,"depth":926,"text":232},{"id":241,"depth":926,"text":244},{"id":250,"depth":926,"text":253},{"id":259,"depth":926,"text":262},{"id":265,"depth":926,"text":268},{"id":271,"depth":926,"text":944},"**数据溢出：**数据溢出比较好理解，FP16的有效数据表示范围为 ，FP32的有效数据表示范围 。",{"id":295,"depth":926,"text":298},{"id":304,"depth":926,"text":307},{"id":310,"depth":926,"text":948},"03 混合精度相关技术",{"id":325,"depth":926,"text":328},{"id":331,"depth":926,"text":340},{"id":343,"depth":926,"text":349},{"id":7,"depth":926,"text":7},{"id":362,"depth":926,"text":368},{"id":381,"depth":926,"text":387},{"id":400,"depth":926,"text":406},{"id":409,"depth":926,"text":418},{"id":421,"depth":926,"text":427},{"id":440,"depth":926,"text":446},{"id":449,"depth":926,"text":7},{"id":460,"depth":926,"text":466},{"id":479,"depth":926,"text":485},{"id":494,"depth":926,"text":500},{"id":506,"depth":926,"text":512},{"id":515,"depth":926,"text":521},{"id":524,"depth":926,"text":533},{"id":543,"depth":926,"text":549},{"id":555,"depth":926,"text":561},{"id":567,"depth":926,"text":573},{"id":579,"depth":926,"text":585},{"id":588,"depth":926,"text":597},{"id":600,"depth":926,"text":606},{"id":609,"depth":926,"text":615},{"id":628,"depth":926,"text":974},"04 混合精度训练策略（Automatic Mixed Precision，AMP）",{"id":646,"depth":926,"text":652},{"id":665,"depth":926,"text":671},{"id":674,"depth":926,"text":680},{"id":683,"depth":926,"text":689},{"id":692,"depth":926,"text":698},{"id":701,"depth":926,"text":981},"05 实验结果",{"id":719,"depth":926,"text":725},{"id":738,"depth":926,"text":744},{"id":747,"depth":926,"text":7},{"id":758,"depth":926,"text":764},"markdown","content:technology-blogs:zh:827.md","content","technology-blogs/zh/827.md","technology-blogs/zh/827","md",1776506141775]