[{"data":1,"prerenderedAt":441},["ShallowReactive",2],{"content-query-RECYN75Lbi":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":435,"_id":436,"_source":437,"_file":438,"_stem":439,"_extension":440},"/technology-blogs/zh/1703","zh",false,"","【MindSpore易点通】深度学习系列：其他优化算法","动量梯度下降法运行速度总是会快于标准的梯度下降算法，基本的思想就是计算梯度的指数加权平均数，并利用该梯度更新权重。","2022-08-11","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/08/15/2e71187f80424bf5913a48fd85e0aec5.png","technology-blogs","基础知识",{"type":15,"children":16,"toc":427},"root",[17,25,31,47,52,61,66,71,76,81,89,108,113,128,133,141,146,151,156,164,169,177,182,190,195,207,222,227,239,244,262,279,296,308,341,346,360,365,373,378,386,391,410,422],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"mindspore易点通深度学习系列其他优化算法",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"在前面几期的介绍中，我们已经学习了Mini-batch梯度下降算法、指数加权平均算法，大家是不是觉得不过瘾，别担心，今天小编一口气带来好几个！废话不多说，我们开干吧~",{"type":18,"tag":32,"props":33,"children":35},"h3",{"id":34},"动量梯度下降法",[36],{"type":18,"tag":37,"props":38,"children":39},"strong",{},[40],{"type":18,"tag":37,"props":41,"children":42},{},[43],{"type":18,"tag":37,"props":44,"children":45},{},[46],{"type":24,"value":34},{"type":18,"tag":26,"props":48,"children":49},{},[50],{"type":24,"value":51},"动量梯度下降法（Momentum），运行速度总是会快于标准的梯度下降算法，基本的思想就是计算梯度的指数加权平均数，并利用该梯度更新权重。",{"type":18,"tag":26,"props":53,"children":54},{},[55],{"type":18,"tag":56,"props":57,"children":60},"img",{"alt":58,"src":59},"cke_327296.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012520.38144517432861469623821069835991:50530814012746:2400:34486EDF5B7A7DE737C0DCB8582E4D8205CA90D231F682DB36F3FA3743D74900.png",[],{"type":18,"tag":26,"props":62,"children":63},{},[64],{"type":24,"value":65},"在处理优化成本函数时，如上图所示，红点代表最小值的位置，假设从蓝色点开始梯度下降法，无论是batch还是mini-batch下降法，都需要一步一步计算，需要很多计算步骤，浪费很多时间；但是如果使用较大的学习率（紫色箭头），结果可能又会偏离函数范围，为了避免摆动过大，所以需要选用一个较小的学习率。",{"type":18,"tag":26,"props":67,"children":68},{},[69],{"type":24,"value":70},"总体来说，纵向上我们希望学习慢一点；而横向上我们更希望加快学习，快速从左向右移，移向最小值红点。",{"type":18,"tag":26,"props":72,"children":73},{},[74],{"type":24,"value":75},"所以使用动量梯度下降法时，需要在第t次迭代的过程中，计算微分dW，db；",{"type":18,"tag":26,"props":77,"children":78},{},[79],{"type":24,"value":80},"然后重新赋值权重，减缓梯度下降的幅度；最终我们会发现纵向的摆动平均值接近于零，横向的平均值较大，因此具体计算算法为：",{"type":18,"tag":26,"props":82,"children":83},{},[84],{"type":18,"tag":56,"props":85,"children":88},{"alt":86,"src":87},"cke_360068.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012626.13503143046195968922874971554786:50530814012746:2400:C72DE2BDAAEC61F4C64EDF081013EF2A05379C769CE1743EA9C18EB5A3D0342A.png",[],{"type":18,"tag":26,"props":90,"children":91},{},[92,94,99,101,106],{"type":24,"value":93},"两个超参数学习率",{"type":18,"tag":56,"props":95,"children":98},{"alt":96,"src":97},"cke_389641.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012657.36511742895815986932919634830324:50530814012746:2400:27650D342C00EB32731520AFB0F261E6A4D1B223D8D60037A887DB14985E1DDA.png",[],{"type":24,"value":100},"和参数",{"type":18,"tag":56,"props":102,"children":105},{"alt":103,"src":104},"cke_411068.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012712.85452208843396383895227906119031:50530814012746:2400:2F16163326707FD2BAEBD5678F83C7F885D250181157402AD0BF95583F05F1DE.png",[],{"type":24,"value":107},"的设置也是需要注意技巧的，控制着指数加权平均数，常用值为0.9，VdW初始值为0，跟W拥有相同的维数，vdb的初始值也是向量零，和b是同一维数。",{"type":18,"tag":26,"props":109,"children":110},{},[111],{"type":24,"value":112},"动量梯度下降法就是这么简单，通常可以用来加快学习算法。",{"type":18,"tag":32,"props":114,"children":116},{"id":115},"rmsprop",[117],{"type":18,"tag":37,"props":118,"children":119},{},[120],{"type":18,"tag":37,"props":121,"children":122},{},[123],{"type":18,"tag":37,"props":124,"children":125},{},[126],{"type":24,"value":127},"RMSprop",{"type":18,"tag":26,"props":129,"children":130},{},[131],{"type":24,"value":132},"其实RMSprop算法（root mean square prop）也可以加速梯度下降。",{"type":18,"tag":26,"props":134,"children":135},{},[136],{"type":18,"tag":56,"props":137,"children":140},{"alt":138,"src":139},"cke_435911.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012732.04869331029913440742688977417108:50530814012746:2400:7E479C16C690C4B33B3AA348F6156EF64E214E8F002FE3921DE2B90E46B4F87A.png",[],{"type":18,"tag":26,"props":142,"children":143},{},[144],{"type":24,"value":145},"假设纵轴代表参数b，横轴代表参数W，可能有W1，W2或者其它参数，这里我们简化一下，暂时使用W表示。",{"type":18,"tag":26,"props":147,"children":148},{},[149],{"type":24,"value":150},"RMSprop算法可以一方面减缓b方向的学习，同时不会减缓横轴方向的学习。",{"type":18,"tag":26,"props":152,"children":153},{},[154],{"type":24,"value":155},"在第t次迭代中，该算法会计算mini-batch的微分dW，db，保留这个指数加权平均数：",{"type":18,"tag":26,"props":157,"children":158},{},[159],{"type":18,"tag":56,"props":160,"children":163},{"alt":161,"src":162},"cke_474171.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012803.75753513172691230019217835791467:50530814012746:2400:C65945AA47D1804AA8B4349E8022771873154371DF1590161D56C26D28CD1D76.png",[],{"type":18,"tag":26,"props":165,"children":166},{},[167],{"type":24,"value":168},"更新参数：",{"type":18,"tag":26,"props":170,"children":171},{},[172],{"type":18,"tag":56,"props":173,"children":176},{"alt":174,"src":175},"cke_495833.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012817.87701586991499638037967588878907:50530814012746:2400:9A676826DF51B10E9FD024B4938255E4BC89A4E5F28AE8923B7AC8145BA17F73.png",[],{"type":18,"tag":26,"props":178,"children":179},{},[180],{"type":24,"value":181},"由于我们在横轴方向希望学习速度快，而在垂直方向希望减缓纵轴上的摆动，所以这就需要SdW相对较小，Sdb相对较大。",{"type":18,"tag":26,"props":183,"children":184},{},[185],{"type":18,"tag":56,"props":186,"children":189},{"alt":187,"src":188},"cke_517583.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012834.50998021369141958403858336179779:50530814012746:2400:09E4B38DABD820B4A0A2457D6CDFE5719BE6D9730A9E6B695F59A891601D4F7F.png",[],{"type":18,"tag":26,"props":191,"children":192},{},[193],{"type":24,"value":194},"RMSprop算法的更新过程如图中绿色线部分一样，可以用一个的更大学习率加快学习，但其实dW实际是一个高维度的参数向量，实际使用中需要注意这点。",{"type":18,"tag":26,"props":196,"children":197},{},[198,200,205],{"type":24,"value":199},"RMSprop均方根算法，将微分先平方，最后使用平方根，同时我们在处理的时候，通常会在分母上加上一个很小的",{"type":18,"tag":56,"props":201,"children":204},{"alt":202,"src":203},"cke_539438.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012851.51879799824200638519268433583787:50530814012746:2400:38DFE120374CD79268C23BC79852DFB09DD60ACAB65AC13EA4F2E536B0FCB2D2.png",[],{"type":24,"value":206},"（10-8）以保证算法不会除以0。",{"type":18,"tag":32,"props":208,"children":210},{"id":209},"adam-优化算法",[211],{"type":18,"tag":37,"props":212,"children":213},{},[214],{"type":18,"tag":37,"props":215,"children":216},{},[217],{"type":18,"tag":37,"props":218,"children":219},{},[220],{"type":24,"value":221},"Adam 优化算法",{"type":18,"tag":26,"props":223,"children":224},{},[225],{"type":24,"value":226},"Adam（Adam optimization algorithm）优化算法就很巧妙了，是上述两个算法的结合。",{"type":18,"tag":26,"props":228,"children":229},{},[230,232,237],{"type":24,"value":231},"1.初始化：",{"type":18,"tag":56,"props":233,"children":236},{"alt":234,"src":235},"cke_561427.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012910.18690695337672895455723258774080:50530814012746:2400:91F88C95C4BD3C35F0D8E40CAD2FF466816A4600D69BE25822878E5ED0B3E8CD.png",[],{"type":24,"value":238},"；",{"type":18,"tag":26,"props":240,"children":241},{},[242],{"type":24,"value":243},"2.在第t次迭代中，用mini-batch梯度下降法计算dW，db；",{"type":18,"tag":26,"props":245,"children":246},{},[247,249,254,256,261],{"type":24,"value":248},"3.计算Momentum指数加权平均数：",{"type":18,"tag":56,"props":250,"children":253},{"alt":251,"src":252},"cke_583533.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012930.21873551428199557672272383963365:50530814012746:2400:39C0DF3952C71E9774561D8EC8FBFAF72CBBFD589272DCD3150B7B94CAC81DD8.png",[],{"type":24,"value":255},"，",{"type":18,"tag":56,"props":257,"children":260},{"alt":258,"src":259},"cke_605731.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812012946.16588519654385948176672618025198:50530814012746:2400:32A93BBA7CCA5FD01CB33C9D25E1026E5067B335EE87696BD8A4477983B5164B.png",[],{"type":24,"value":238},{"type":18,"tag":26,"props":263,"children":264},{},[265,267,272,273,278],{"type":24,"value":266},"4.RMSprop更新：",{"type":18,"tag":56,"props":268,"children":271},{"alt":269,"src":270},"cke_660368.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013015.18011278172251761846160991749909:50530814012746:2400:2A708CB601B8790736AB9814D9335B20935ED9A401175AEA857FD6932714DA31.png",[],{"type":24,"value":255},{"type":18,"tag":56,"props":274,"children":277},{"alt":275,"src":276},"cke_689413.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013029.74713222619881691413883546209275:50530814012746:2400:754D39BB6D164A5647711EC48326E08969C6DC49A6AC33F562AA39A13B31B861.png",[],{"type":24,"value":238},{"type":18,"tag":26,"props":280,"children":281},{},[282,284,289,290,295],{"type":24,"value":283},"5.偏差修正：",{"type":18,"tag":56,"props":285,"children":288},{"alt":286,"src":287},"cke_718663.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013048.73109190089182343311958103552355:50530814012746:2400:503036D218A6D47C048DF17494E59D8B45F6353FE92E879076FD52F5DF1C7A4C.png",[],{"type":24,"value":255},{"type":18,"tag":56,"props":291,"children":294},{"alt":292,"src":293},"cke_747960.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013107.24435144363394964547084971087318:50530814012746:2400:075FE904930ADCB578B8ACBDB8C0C1EE2125D58C515890584A9A162ADC0DD175.png",[],{"type":24,"value":238},{"type":18,"tag":26,"props":297,"children":298},{},[299,301,306],{"type":24,"value":300},"6.更新W和b：",{"type":18,"tag":56,"props":302,"children":305},{"alt":303,"src":304},"cke_777446.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013129.05101357934575437297365355322177:50530814012746:2400:F58ED439C949F306C32309ED44233A5DB5F373CF84F44839DFCC41E5E0F44A40.png",[],{"type":24,"value":307},"。",{"type":18,"tag":26,"props":309,"children":310},{},[311,313,318,320,325,327,332,334,339],{"type":24,"value":312},"之前我们已经提过，",{"type":18,"tag":56,"props":314,"children":317},{"alt":315,"src":316},"cke_815709.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013152.57662384810951326193194333753081:50530814012746:2400:BF6A595068038849C7A34230CB2DCB5227D4ABE6DEF6A8AE0818B257B0CFACEF.png",[],{"type":24,"value":319},"作为dW的移动平均数，通常选用0.9，超参数",{"type":18,"tag":56,"props":321,"children":324},{"alt":322,"src":323},"cke_854151.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013212.53696928198049953158982798916685:50530814012746:2400:2B0C62EF587796FDD8CB1D77115AE890AFDD6C68322BCE573F34B1B0F03A52D4.png",[],{"type":24,"value":326},"推荐使用0.999，",{"type":18,"tag":56,"props":328,"children":331},{"alt":329,"src":330},"cke_883980.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013227.78596439194037767366512777141162:50530814012746:2400:400367A87388F1C0C9AEF6EB4F4D24CDEBD0EC74A1F454DAB7C70C4DE04210AE.png",[],{"type":24,"value":333},"设置为10-8，这样我们就可以尝试不同的",{"type":18,"tag":56,"props":335,"children":338},{"alt":336,"src":337},"cke_943961.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013305.08129358153172583426803237893489:50530814012746:2400:187536EBB525E3E7E1E49A1C513ACB9DD65FD7A159A07D7E298CBAA70599FD86.png",[],{"type":24,"value":340},"，看看哪个效果最好啦。",{"type":18,"tag":26,"props":342,"children":343},{},[344],{"type":24,"value":345},"Adam算法结合了Momentum和RMSprop梯度下降法，是一种极其常用的学习算法，被证明能有效适用于不同神经网络，适用于广泛的结构。",{"type":18,"tag":32,"props":347,"children":349},{"id":348},"学习率衰减",[350],{"type":18,"tag":37,"props":351,"children":352},{},[353],{"type":18,"tag":37,"props":354,"children":355},{},[356],{"type":18,"tag":37,"props":357,"children":358},{},[359],{"type":24,"value":348},{"type":18,"tag":26,"props":361,"children":362},{},[363],{"type":24,"value":364},"学习率衰减（Learning rate decay）：随时间慢慢减少学习率，我们将之称为学习率衰减。",{"type":18,"tag":26,"props":366,"children":367},{},[368],{"type":18,"tag":56,"props":369,"children":372},{"alt":370,"src":371},"cke_974156.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013324.36601631901922303586439541333523:50530814012746:2400:C7714A0CDA14B2B6380D98FDE0628BB2ED84EC6774BD7AA3F47D4B3533A473AC.png",[],{"type":18,"tag":26,"props":374,"children":375},{},[376],{"type":24,"value":377},"假设使用mini-batch梯度下降法，mini-batch数量选择64或者128个样本，在迭代过程中会有噪音（蓝色线），所以算法不会会真正收敛，只能在附近摆动。",{"type":18,"tag":26,"props":379,"children":380},{},[381],{"type":18,"tag":56,"props":382,"children":385},{"alt":383,"src":384},"cke_1011607.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013340.53343319815061233180415281175929:50530814012746:2400:8FC9F542A6D06A568D7AFDDDB2950EA5417E8E7623F0051DA672DC382C3478F5.png",[],{"type":18,"tag":26,"props":387,"children":388},{},[389],{"type":24,"value":390},"但要慢慢减少学习率的话，在初期的时候，学习率较大，学习相对较快；但随着学习率变小，移动步伐也会变慢变小，所以曲线（绿色线）会在最小值附近的一小块区域里摆动，而不是大幅度在最小值附近摆动。",{"type":18,"tag":26,"props":392,"children":393},{},[394,396,401,403,408],{"type":24,"value":395},"学习率衰减一代就要遍历一次数据，拆分不同的mini-batch，第一次遍历训练集叫做第一代，第二次就是第二代，依此类推，将学习率设为",{"type":18,"tag":56,"props":397,"children":400},{"alt":398,"src":399},"cke_1040450.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013408.92725283230109190078571847005756:50530814012746:2400:B2C04C62D4BC219A182CE48BCA5A57FFBD0BF1D9EAE35731318AF505356A2CC2.png",[],{"type":24,"value":402},"（decay-rate称为衰减率，epoch-num为代数，",{"type":18,"tag":56,"props":404,"children":407},{"alt":405,"src":406},"cke_1074591.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013427.94222896026219458037051635357646:50530814012746:2400:AFD84F963DD84D24D6A7F0D736A6D3AE53A3E4AE8ED4DABB8168D9BBFD79C4B8.png",[],{"type":24,"value":409},"为初始学习率）。",{"type":18,"tag":26,"props":411,"children":412},{},[413,415,420],{"type":24,"value":414},"当然也可以使用其他公式，比如指数衰减，其中a0是小于1的值，如",{"type":18,"tag":56,"props":416,"children":419},{"alt":417,"src":418},"cke_1105324.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/5e4/e02/8f7/550440a1fe5e4e028f77e5cf18005adc.20220812013446.77080474685750154308354873969238:50530814012746:2400:E558254BB6B471495E8081B684C3FC2994B8AFF3DD2850A282FF845A679C0F71.png",[],{"type":24,"value":421},"，学习率呈指数下降。",{"type":18,"tag":26,"props":423,"children":424},{},[425],{"type":24,"value":426},"好啦，这次的优化算法就全部介绍到这里啦，尤其需要注意的是参数和公式的理解！下次给大家带来超参数调试的讲解，挥手十分钟再见！",{"title":7,"searchDepth":428,"depth":428,"links":429},4,[430,432,433,434],{"id":34,"depth":431,"text":34},3,{"id":115,"depth":431,"text":127},{"id":209,"depth":431,"text":221},{"id":348,"depth":431,"text":348},"markdown","content:technology-blogs:zh:1703.md","content","technology-blogs/zh/1703.md","technology-blogs/zh/1703","md",1776506115309]