[{"data":1,"prerenderedAt":662},["ShallowReactive",2],{"content-query-cK6QuDZ29b":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":656,"_id":657,"_source":658,"_file":659,"_stem":660,"_extension":661},"/technology-blogs/zh/320","zh",false,"","MindSpore大V博文之创新算法系列(三）","本篇文章会介绍深度概率学习的第三部分：神经网络与贝叶斯神经网络，并在MindSpore上进行代码的实践。","2020-10-30","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/3ae190fe33f14c4f97d0badc79c9f578.png","technology-blogs","大V博文",{"type":15,"children":16,"toc":653},"root",[17,25,31,37,50,61,74,79,84,92,97,105,117,124,135,142,153,158,165,170,178,183,190,195,213,220,225,233,243,250,255,262,267,275,325,359,366,377,384,400,408,466,473,484,489,497,507,512,517,527,532,540,552,561,566,571,579,584,592,604,609,614,619,624,636,641],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"mindspore大v博文之创新算法系列三",[23],{"type":24,"value":8},"text",{"type":18,"tag":19,"props":26,"children":28},{"id":27},"mindspore神经网络与贝叶斯神经网络",[29],{"type":24,"value":30},"MindSpore神经网络与贝叶斯神经网络",{"type":18,"tag":32,"props":33,"children":34},"p",{},[35],{"type":24,"value":36},"作者：于璠",{"type":18,"tag":32,"props":38,"children":39},{},[40,42],{"type":24,"value":41},"作者主页：",{"type":18,"tag":43,"props":44,"children":48},"a",{"href":45,"rel":46},"https://www.zhihu.com/people/yu-fan-42-9",[47],"nofollow",[49],{"type":24,"value":45},{"type":18,"tag":32,"props":51,"children":52},{},[53,55],{"type":24,"value":54},"原文链接：",{"type":18,"tag":43,"props":56,"children":59},{"href":57,"rel":58},"https://zhuanlan.zhihu.com/p/268725084",[47],[60],{"type":24,"value":57},{"type":18,"tag":32,"props":62,"children":63},{},[64,66,72],{"type":24,"value":65},"上篇文章介绍了MindSpore深度概率学习中的概率推断算法和概率模型， 本篇文章会介绍深度概率学习的第三部分：",{"type":18,"tag":67,"props":68,"children":69},"strong",{},[70],{"type":24,"value":71},"神经网络与贝叶斯神经网络",{"type":24,"value":73},"，并在MindSpore上进行代码的实践。",{"type":18,"tag":32,"props":75,"children":76},{},[77],{"type":24,"value":78},"1. 深度概率特性",{"type":18,"tag":32,"props":80,"children":81},{},[82],{"type":24,"value":83},"2. 深度概率推断算法与概率模型",{"type":18,"tag":32,"props":85,"children":86},{},[87],{"type":18,"tag":67,"props":88,"children":89},{},[90],{"type":24,"value":91},"3. 神经网络与贝叶斯神经网络",{"type":18,"tag":32,"props":93,"children":94},{},[95],{"type":24,"value":96},"4. 贝叶斯应用工具箱",{"type":18,"tag":32,"props":98,"children":99},{},[100],{"type":18,"tag":67,"props":101,"children":102},{},[103],{"type":24,"value":104},"从神经网络讲起",{"type":18,"tag":32,"props":106,"children":107},{},[108,110,115],{"type":24,"value":109},"说到神经网络想必大家都不陌生，下图就是一个典型的全连接神经网络，网络结构供四层（包括输入层和输出层），对应3个权重矩阵",{"type":18,"tag":111,"props":112,"children":114},"img",{"alt":7,"src":113},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/3da078aa67ad422b9504d923994f042f.png",[],{"type":24,"value":116},"。神经网络的训练过程就是调整三个权重矩阵的过程。",{"type":18,"tag":32,"props":118,"children":119},{},[120],{"type":18,"tag":111,"props":121,"children":123},{"alt":7,"src":122},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/0300263460cc49e78539006073354ec0.jpg",[],{"type":18,"tag":32,"props":125,"children":126},{},[127,129,133],{"type":24,"value":128},"神经网络可以看作一个条件分布模型 ",{"type":18,"tag":111,"props":130,"children":132},{"alt":7,"src":131},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/b1a1902d5878448dac50fb4237a9c002.png",[],{"type":24,"value":134},"给定输入 x ，神经网络通过权重矩阵w 为每个可能的输出 y 分配概率。权重矩阵w可以通过极大似然估计（maximum likelihood estimation，MLE）求解得到：",{"type":18,"tag":32,"props":136,"children":137},{},[138],{"type":18,"tag":111,"props":139,"children":141},{"alt":7,"src":140},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/40a92db6416d470fb4309fb0b0f15e90.jpg",[],{"type":18,"tag":32,"props":143,"children":144},{},[145,147,151],{"type":24,"value":146},"其中， ",{"type":18,"tag":111,"props":148,"children":150},{"alt":7,"src":149},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/900a34cf5248477e87a560e39720a422.png",[],{"type":24,"value":152}," 一般使用梯度下降，基于反向传播实现权重矩阵 的求解。",{"type":18,"tag":32,"props":154,"children":155},{},[156],{"type":24,"value":157},"由于神经网络容易出现过拟合现象，因此需要引入正则化，即对参数 设置先验概率，这时的模型训练可以视为最大后验估计（Maximum Posteriori, MAP）：",{"type":18,"tag":32,"props":159,"children":160},{},[161],{"type":18,"tag":111,"props":162,"children":164},{"alt":7,"src":163},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/612875763c064d5391772f1d202e8369.jpg",[],{"type":18,"tag":32,"props":166,"children":167},{},[168],{"type":24,"value":169},"神经网络已经为许多机器学习和人工智能应用提供了最先进的结果，如图像分类、目标检测和语音识别等。但是，由于神经网络是一个黑箱模型，人们难以理解它的内部工作机制和决策过程，因此很难证明黑箱模型的决策是正确的且难以控制和避免其异常行为，无法应用在自动驾驶和医疗决策等高风险领域。",{"type":18,"tag":32,"props":171,"children":172},{},[173],{"type":18,"tag":67,"props":174,"children":175},{},[176],{"type":24,"value":177},"什么是贝叶斯神经网络？",{"type":18,"tag":32,"props":179,"children":180},{},[181],{"type":24,"value":182},"贝叶斯理论提供了一种自然的方法来解释预测中的不确定性，并且能够洞察这些决策是如何做出的。将贝叶斯理论和神经网络相结合得到的贝叶斯神经网络可以帮助我们解决神经网络目前面临的许多挑战。",{"type":18,"tag":32,"props":184,"children":185},{},[186],{"type":18,"tag":111,"props":187,"children":189},{"alt":7,"src":188},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/47ad4bc604654f46a689d2c0f93ee79a.jpg",[],{"type":18,"tag":32,"props":191,"children":192},{},[193],{"type":24,"value":194},"从上图可以看出，贝叶斯神经网络与神经网络的不同之处在于，其权重参数是服从分布的随机变量，而不再是确定的值。",{"type":18,"tag":32,"props":196,"children":197},{},[198,200,205,207,211],{"type":24,"value":199},"在贝叶斯神经网络的学习过程中，模型的权重参数是基于我们已知的和可以观察到的信息推导得到的。这是逆概率问题，可以利用贝叶斯定理加以求解。模型参数的分布取决于我们观测到的数据 ",{"type":18,"tag":111,"props":201,"children":204},{"alt":202,"src":203},"[公式]","https://www.zhihu.com/equation?tex=%5Cmathcal%7BD%7D",[],{"type":24,"value":206}," ，我们称之为后验分布 ",{"type":18,"tag":111,"props":208,"children":210},{"alt":202,"src":209},"https://www.zhihu.com/equation?tex=P%28%5Cmathbf%7Bw%7D%7C%5Cmathcal%7BD%7D%29",[],{"type":24,"value":212}," 。根据贝叶斯定理，可以通过下面的公式求解 ：",{"type":18,"tag":32,"props":214,"children":215},{},[216],{"type":18,"tag":111,"props":217,"children":219},{"alt":7,"src":218},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/de45411e58bf4ad6bcb0d70a1366e3a1.jpg",[],{"type":18,"tag":32,"props":221,"children":222},{},[223],{"type":24,"value":224},"其中， 是我们对模型参数 的先验认知， 可以通过我们定义的网络计算得到。但是，这也是难解的，因此需要引入变分推理。",{"type":18,"tag":32,"props":226,"children":227},{},[228],{"type":18,"tag":67,"props":229,"children":230},{},[231],{"type":24,"value":232},"变分推理",{"type":18,"tag":32,"props":234,"children":235},{},[236,238,241],{"type":24,"value":237},"上篇文章已经介绍过什么是变分推理了，这里就不再详细展开了。简单来说，变分推理就是使用一个由一组参数 控制的分布 去逼近真正的后验分布 ",{"type":18,"tag":111,"props":239,"children":240},{"alt":202,"src":209},[],{"type":24,"value":242}," ，比如用高斯来近似的话，参数 就是均值 和方差 。这个过程可以通过最小化两个分布的Kullback-Leibler (KL)散度实现：",{"type":18,"tag":32,"props":244,"children":245},{},[246],{"type":18,"tag":111,"props":247,"children":249},{"alt":7,"src":248},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/9ccb9e0864984f689f3edf91f459dc41.jpg",[],{"type":18,"tag":32,"props":251,"children":252},{},[253],{"type":24,"value":254},"写成目标函数的形式就是：",{"type":18,"tag":32,"props":256,"children":257},{},[258],{"type":18,"tag":111,"props":259,"children":261},{"alt":7,"src":260},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/4f3973a3282a435bb30b8bff88bf6322.png",[],{"type":18,"tag":32,"props":263,"children":264},{},[265],{"type":24,"value":266},"这个目标函数可以分成两个部分：第一部分是变分后验与先验的KL散度，是复杂性代价，描述了权重和先验的契合程度；第二部分的取值依赖于训练数据，是似然代价，描述对样本的拟合程度。然而这个目标函数还是无法求解的，接下来需要采用梯度下降和各种近似。",{"type":18,"tag":32,"props":268,"children":269},{},[270],{"type":18,"tag":67,"props":271,"children":272},{},[273],{"type":24,"value":274},"无偏蒙特卡罗梯度",{"type":18,"tag":32,"props":276,"children":277},{},[278,280,284,286,290,292,296,298,302,304,308,310,314,316,319,320,323],{"type":24,"value":279},"熟悉变分自编码器（Variational Auto-Encoders，VAE）的同学都知道，VAE中引入了一个巧妙的重参数化（reparameterize）操作：对于 ",{"type":18,"tag":111,"props":281,"children":283},{"alt":202,"src":282},"https://www.zhihu.com/equation?tex=+z%5Csim~%5Cmathcal%7BN%7D%5Cleft%28+%5Cmu%2C%5Csigma+%5Cright%29",[],{"type":24,"value":285}," ，直接从 ",{"type":18,"tag":111,"props":287,"children":289},{"alt":202,"src":288},"https://www.zhihu.com/equation?tex=%5Cmathcal%7BN%7D%5Cleft%28+%5Cmu%2C%5Csigma+%5Cright%29+",[],{"type":24,"value":291}," 采样会使 ",{"type":18,"tag":111,"props":293,"children":295},{"alt":202,"src":294},"https://www.zhihu.com/equation?tex=%5Cmu+",[],{"type":24,"value":297}," 和 ",{"type":18,"tag":111,"props":299,"children":301},{"alt":202,"src":300},"https://www.zhihu.com/equation?tex=+%5Csigma+",[],{"type":24,"value":303}," 不可微。为了得到他们的梯度，将 z 重写为 ",{"type":18,"tag":111,"props":305,"children":307},{"alt":202,"src":306},"https://www.zhihu.com/equation?tex=z%3D%5Csigma%5Cepsilon%2B%5Cmu",[],{"type":24,"value":309}," ，其中 ",{"type":18,"tag":111,"props":311,"children":313},{"alt":202,"src":312},"https://www.zhihu.com/equation?tex=%5Cepsilon%5Csim%5Cmathcal%7BN%7D%5Cleft%280%2C1+%5Cright%29",[],{"type":24,"value":315}," ，这样便可以先从标准高斯分布采样出随机量，然后可导地引入使",{"type":18,"tag":111,"props":317,"children":318},{"alt":202,"src":294},[],{"type":24,"value":297},{"type":18,"tag":111,"props":321,"children":322},{"alt":202,"src":300},[],{"type":24,"value":324},"。",{"type":18,"tag":32,"props":326,"children":327},{},[328,330,334,336,340,342,346,347,351,353,357],{"type":24,"value":329},"在此基础上，论文[1]证明了给定一个随机变量 ",{"type":18,"tag":111,"props":331,"children":333},{"alt":202,"src":332},"https://www.zhihu.com/equation?tex=%5Cepsilon",[],{"type":24,"value":335}," 和概率密度 ",{"type":18,"tag":111,"props":337,"children":339},{"alt":202,"src":338},"https://www.zhihu.com/equation?tex=+q%5Cleft%28+%5Cepsilon+%5Cright%29",[],{"type":24,"value":341}," ，让 ",{"type":18,"tag":111,"props":343,"children":345},{"alt":202,"src":344},"https://www.zhihu.com/equation?tex=%5Cmathbf%7Bw%7D%3Dt%5Cleft%28+%5Ctheta%2C+%5Cepsilon+%5Cright%29",[],{"type":24,"value":309},{"type":18,"tag":111,"props":348,"children":350},{"alt":202,"src":349},"https://www.zhihu.com/equation?tex=t%5Cleft%28+%5Ctheta%2C+%5Cepsilon+%5Cright%29",[],{"type":24,"value":352}," 是一个确定性的函数。如果满足 ",{"type":18,"tag":111,"props":354,"children":356},{"alt":202,"src":355},"https://www.zhihu.com/equation?tex=q%5Cleft%28+%5Cepsilon+%5Cright%29d%5Cepsilon%3Dq%28%5Cmathbf%7Bw%7D%7C%5Ctheta%29d%5Cmathbf%7Bw%7D",[],{"type":24,"value":358}," ，则对于期望也可以使用类似操作得到可导的对期望偏导的无偏估计：",{"type":18,"tag":32,"props":360,"children":361},{},[362],{"type":18,"tag":111,"props":363,"children":365},{"alt":7,"src":364},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/c8ebae136f3345e488cc9f5e7d71e60c.png",[],{"type":18,"tag":32,"props":367,"children":368},{},[369,371,375],{"type":24,"value":370},"令函数 ",{"type":18,"tag":111,"props":372,"children":374},{"alt":202,"src":373},"https://www.zhihu.com/equation?tex=f%28%5Cmathbf%7Bw%7D%2C%5Ctheta%29%3Dlog+q%28%5Cmathbf%7Bw%7D%7C%5Ctheta%29-log+P%28%5Cmathbf%7Bw%7D%29P%28%5Cmathcal%7BD%7D%7C%5Cmathbf%7Bw%7D%29",[],{"type":24,"value":376}," ，使用蒙特卡罗采样来评估期望值，此时目标函数（1）可以近似为：",{"type":18,"tag":32,"props":378,"children":379},{},[380],{"type":18,"tag":111,"props":381,"children":383},{"alt":7,"src":382},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/2a81760ddcfb452198a5e09070547d95.jpg",[],{"type":18,"tag":32,"props":385,"children":386},{},[387,388,392,394,398],{"type":24,"value":146},{"type":18,"tag":111,"props":389,"children":391},{"alt":202,"src":390},"https://www.zhihu.com/equation?tex=%5Cmathbf%7Bw%7D%5E%7B%28i%29%7D+",[],{"type":24,"value":393}," 代表从变分后验 ",{"type":18,"tag":111,"props":395,"children":397},{"alt":202,"src":396},"https://www.zhihu.com/equation?tex=+q%28%5Cmathbf%7Bw%7D%5E%7B%28i%29%7D%7C%5Ctheta%29+",[],{"type":24,"value":399}," 采样到的第 i 个蒙特卡罗样本。",{"type":18,"tag":32,"props":401,"children":402},{},[403],{"type":18,"tag":67,"props":404,"children":405},{},[406],{"type":24,"value":407},"高斯变分后验求解",{"type":18,"tag":32,"props":409,"children":410},{},[411,413,417,419,423,425,429,431,435,437,441,443,447,449,453,455,459,460,464],{"type":24,"value":412},"假设变分后验服从高斯分布 ",{"type":18,"tag":111,"props":414,"children":416},{"alt":202,"src":415},"https://www.zhihu.com/equation?tex=+%5Cmathcal%7BN%7D%5Cleft%28+%5Cmu%2C%5Csigma+%5Cright%29+",[],{"type":24,"value":418}," ，则权重参数 ",{"type":18,"tag":111,"props":420,"children":422},{"alt":202,"src":421},"https://www.zhihu.com/equation?tex=+%5Cmathbf%7Bw%7D+",[],{"type":24,"value":424}," 可以通过对标准高斯分布 ",{"type":18,"tag":111,"props":426,"children":428},{"alt":202,"src":427},"https://www.zhihu.com/equation?tex=%5Cmathcal%7BN%7D%5Cleft%280%2C1+%5Cright%29+",[],{"type":24,"value":430}," 进行采样，然后按均值 ",{"type":18,"tag":111,"props":432,"children":434},{"alt":202,"src":433},"https://www.zhihu.com/equation?tex=+%5Cmu",[],{"type":24,"value":436}," 进行偏移，标准差 ",{"type":18,"tag":111,"props":438,"children":440},{"alt":202,"src":439},"https://www.zhihu.com/equation?tex=%5Csigma+",[],{"type":24,"value":442}," 进行缩放得到。为了让标准差 ",{"type":18,"tag":111,"props":444,"children":446},{"alt":202,"src":445},"https://www.zhihu.com/equation?tex=+%5Csigma",[],{"type":24,"value":448}," 非负，我们将它参数化为 ",{"type":18,"tag":111,"props":450,"children":452},{"alt":202,"src":451},"https://www.zhihu.com/equation?tex=%5Csigma%3Dlog%5Cleft%28+1-exp%28%5Crho%29+%5Cright%29+",[],{"type":24,"value":454}," 。此时权重 ",{"type":18,"tag":111,"props":456,"children":458},{"alt":202,"src":457},"https://www.zhihu.com/equation?tex=%5Cmathbf%7Bw%7D%3Dt%28%5Ctheta%2C%5Cepsilon%29%3D%5Cmu%2Blog%5Cleft%28+1%2Bexp%28%5Crho%29+%5Cright%29%5Ccirc%5Cepsilon",[],{"type":24,"value":309},{"type":18,"tag":111,"props":461,"children":463},{"alt":202,"src":462},"https://www.zhihu.com/equation?tex=+%5Ccirc",[],{"type":24,"value":465}," 表示点乘。优化过程的步骤如下：",{"type":18,"tag":32,"props":467,"children":468},{},[469],{"type":18,"tag":111,"props":470,"children":472},{"alt":7,"src":471},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2020/10/30/f634c1f586c64f5c85eeaa25544270c1.jpg",[],{"type":18,"tag":32,"props":474,"children":475},{},[476,478,482],{"type":24,"value":477},"其中，均值和标准差的梯度项 ",{"type":18,"tag":111,"props":479,"children":481},{"alt":202,"src":480},"https://www.zhihu.com/equation?tex=+%5Cfrac%7B%5Cpartial+f%28%5Cmathbf%7Bw%7D%2C%5Ctheta%29%7D%7B%5Cpartial+%5Cmathbf%7Bw%7D%7D+",[],{"type":24,"value":483}," 是共享的，这个梯度项就是神经网络上通常的反向传播算法所找到的梯度。因此，为了学习平均值和标准偏差，我们必须简单地计算通过反向传播发现的通常梯度，然后像上述步骤那样缩放和移动它们。",{"type":18,"tag":32,"props":485,"children":486},{},[487],{"type":24,"value":488},"好了，贝叶斯神经网络的原理就介绍到这里，接下来就和大家介绍一下在MindSpore深度概率学习库中，我们如何来构造贝叶斯神经网络。",{"type":18,"tag":32,"props":490,"children":491},{},[492],{"type":18,"tag":67,"props":493,"children":494},{},[495],{"type":24,"value":496},"MindSpore实现",{"type":18,"tag":32,"props":498,"children":499},{},[500,502],{"type":24,"value":501},"· ",{"type":18,"tag":67,"props":503,"children":504},{},[505],{"type":24,"value":506},"构造贝叶斯神经网络",{"type":18,"tag":32,"props":508,"children":509},{},[510],{"type":24,"value":511},"MindSpore深度概率学习库中的mindspore.nn.probability.bnn_layer模块中提供了ConvReparam和DenseReparam两个接口，它们是基于上面介绍的Reparameterize方法实现的贝叶斯卷积层和全连接层。",{"type":18,"tag":32,"props":513,"children":514},{},[515],{"type":24,"value":516},"利用bnn_layers模块中的ConvReparam和DenseReparam接口构建贝叶斯神经网络的方法与构建普通的神经网络相同。值得注意的是，bnn_layers中的ConvReparam和DenseReparam可以和普通的神经网络层互相组合。下面让我们看一下如何构建Bayesian LeNet。",{"type":18,"tag":518,"props":519,"children":521},"pre",{"code":520},"import mindspore.nn as nn\nfrom mindspore.nn.probability import bnn_layers\nimport mindspore.ops.operations as P\n \nclass BNNLeNet5(nn.Cell):\n \"\"\"\n bayesian Lenet network\n \n Args:\n num_class (int): Num classes. Default: 10.\n \n Returns:\n Tensor, output tensor\n Examples:\n >>> BNNLeNet5(num_class=10)\n \n \"\"\"\n def __init__(self, num_class=10):\n super(BNNLeNet5, self).__init__()\n self.num_class = num_class\n self.conv1 = bnn_layers.ConvReparam(1, 6, 5, stride=1, padding=0, has_bias=False, pad_mode=\"valid\")\n self.conv2 = bnn_layers.ConvReparam(6, 16, 5, stride=1, padding=0, has_bias=False, pad_mode=\"valid\")\n self.fc1 = bnn_layers.DenseReparam(16 * 5 * 5, 120)\n self.fc2 = bnn_layers.DenseReparam(120, 84)\n self.fc3 = bnn_layers.DenseReparam(84, self.num_class)\n self.relu = nn.ReLU()\n self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)\n self.flatten = nn.Flatten()\n self.reshape = P.Reshape()\n \n def construct(self, x):\n x = self.conv1(x)\n x = self.relu(x)\n x = self.max_pool2d(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.max_pool2d(x)\n x = self.flatten(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.relu(x)\n x = self.fc3(x)\n return x\n",[522],{"type":18,"tag":523,"props":524,"children":525},"code",{"__ignoreMap":7},[526],{"type":24,"value":520},{"type":18,"tag":32,"props":528,"children":529},{},[530],{"type":24,"value":531},"贝叶斯神经网络的训练过程与DNN基本相同，唯一不同的是将WithLossCell替换为适用于BNN的WithBNNLossCell。这是因为贝叶斯神经网络在优化是，不仅需要考虑损失函数，让网络的输出值尽可能接近真实值，还需要最小化贝叶斯层的KL散度。除了backbone和loss_fn两个参数之外，WithBNNLossCell增加了dnn_factor和bnn_factor两个参数。dnn_factor是由损失函数计算得到的网络整体损失的系数，bnn_factor是每个贝叶斯层的KL散度的系数，这两个参数是用来平衡网络整体损失和贝叶斯层的KL散度的，防止KL散度的值过大掩盖了网络整体损失。",{"type":18,"tag":518,"props":533,"children":535},{"code":534},"# loss function definition\ncriterion = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n \n# optimization definition\noptimizer = AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001)\n \nnet_with_loss = bnn_layers.WithBNNLossCell(network, criterion, dnn_factor=60000, bnn_factor=0.000001)\ntrain_bnn_network = TrainOneStepCell(net_with_loss, optimizer)\ntrain_bnn_network.set_train()\n \ntrain_set = create_dataset('./mnist_data/train', 64, 1)\ntest_set = create_dataset('./mnist_data/test', 64, 1)\n \nepoch = 10\n \nfor i in range(epoch):\n train_loss, train_acc = train_model(train_bnn_network, network, train_set)\n \n valid_acc = validate_model(network, test_set)\n \n print('Epoch: {} \\tTraining Loss: {:.4f} \\tTraining Accuracy: {:.4f} \\tvalidation Accuracy: {:.4f}'.\n format(i, train_loss, train_acc, valid_acc))\n",[536],{"type":18,"tag":523,"props":537,"children":538},{"__ignoreMap":7},[539],{"type":24,"value":534},{"type":18,"tag":32,"props":541,"children":542},{},[543,545],{"type":24,"value":544},"完整的代码可以戳",{"type":18,"tag":43,"props":546,"children":549},{"href":547,"rel":548},"https://gitee.com/mindspore/mindspore/tree/master/tests/st/probability/bnn_layers",[47],[550],{"type":24,"value":551},"An example of Bayesian Neural Network",{"type":18,"tag":32,"props":553,"children":554},{},[555,556],{"type":24,"value":501},{"type":18,"tag":67,"props":557,"children":558},{},[559],{"type":24,"value":560},"神经网络“一键转换”贝叶斯神经网络",{"type":18,"tag":32,"props":562,"children":563},{},[564],{"type":24,"value":565},"有没有同学不想了解贝叶斯的相关原理，却想尝试一下贝叶斯神经网络？MindSpore深度概率学习库提供了贝叶斯转换接口（mindspore.nn.probability.transform），支持神经网络模型一键转换成贝叶斯神经网络。 APITransformToBNN主要实现了两个功能：模型级别的转换（transform_to_bnn_model）和层级的转换（transform_to_bnn_model）。",{"type":18,"tag":32,"props":567,"children":568},{},[569],{"type":24,"value":570},"下面就让我们看看如何实现神经网络向贝叶斯神经网络的转换吧：",{"type":18,"tag":518,"props":572,"children":574},{"code":573},"network = LeNet5()\n \n# loss function definition\ncriterion = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n \n# optimization definition\noptimizer = AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001)\n \nnet_with_loss = WithLossCell(network, criterion)\ntrain_network = TrainOneStepCell(net_with_loss, optimizer)\n \n# transform the whole DNN modle to BNN\nbnn_transformer = transforms.TransformToBNN(train_network, dnn_factor=1, bnn_factor=1)\ntrain_bnn_network = bnn_transformer.transform_to_bnn_model()\n",[575],{"type":18,"tag":523,"props":576,"children":577},{"__ignoreMap":7},[578],{"type":24,"value":573},{"type":18,"tag":32,"props":580,"children":581},{},[582],{"type":24,"value":583},"在调用TransformToBNN实现模型转换之后，只需要按照普通神经网络的训练方法进行训练即可。如果只想要转换神经网络中指定类型的层（卷积层或者全连接层），调用层级转换（transform_to_bnn_model）的功能即可：",{"type":18,"tag":518,"props":585,"children":587},{"code":586},"train_bnn_network = bnn_transformer.transform_to_bnn_layer(nn.Dense, bnn_layers.DenseReparam)\n",[588],{"type":18,"tag":523,"props":589,"children":590},{"__ignoreMap":7},[591],{"type":24,"value":586},{"type":18,"tag":32,"props":593,"children":594},{},[595,597],{"type":24,"value":596},"模型转换的完整代码实现戳",{"type":18,"tag":43,"props":598,"children":601},{"href":599,"rel":600},"https://gitee.com/mindspore/mindspore/tree/master/tests/st/probability/transforms",[47],[602],{"type":24,"value":603},"An example of transforms",{"type":18,"tag":32,"props":605,"children":606},{},[607],{"type":24,"value":608},"本篇文章就到这里啦，这次主要分享了贝叶斯神经网络的原理与实现，如果有不对之处欢迎大家批评指正哈。",{"type":18,"tag":32,"props":610,"children":611},{},[612],{"type":24,"value":613},"参考文献：",{"type":18,"tag":32,"props":615,"children":616},{},[617],{"type":24,"value":618},"[1] Charles Blundell, Julien Cornebise, Koray Kavukcuoglu and Daan Wierstra, Weight Uncertainty in Neural Networks, In Proceedings of the 32nd International Conference on Machine Learning (ICML 2015), 2015.",{"type":18,"tag":32,"props":620,"children":621},{},[622],{"type":24,"value":623},"[2] Ethan Goan, Clinton Fookes, Bayesian Neural Networks: An Introduction and Survey, Case Studies in Applied Bayesian Data Science: CIRM Jean-Morlet Chair: 45-87, 2020.",{"type":18,"tag":32,"props":625,"children":626},{},[627,629],{"type":24,"value":628},"[3] ",{"type":18,"tag":43,"props":630,"children":633},{"href":631,"rel":632},"https://zhuanlan.zhihu.com/p/81170602",[47],[634],{"type":24,"value":635},"Bayesian Neural Networks：贝叶斯神经网络",{"type":18,"tag":32,"props":637,"children":638},{},[639],{"type":24,"value":640},"[4] 贝叶斯神经网络最新综述",{"type":18,"tag":32,"props":642,"children":643},{},[644,646],{"type":24,"value":645},"[5]",{"type":18,"tag":43,"props":647,"children":650},{"href":648,"rel":649},"https://davidstutz.de/a-short-introduction-to-bayesian-neural-networks/",[47],[651],{"type":24,"value":652},"A Short Introduction to Bayesian Neural Networks",{"title":7,"searchDepth":654,"depth":654,"links":655},4,[],"markdown","content:technology-blogs:zh:320.md","content","technology-blogs/zh/320.md","technology-blogs/zh/320","md",1776506127104]