[{"data":1,"prerenderedAt":397},["ShallowReactive",2],{"content-query-wLcukwwxi7":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":391,"_id":392,"_source":393,"_file":394,"_stem":395,"_extension":396},"/technology-blogs/zh/1905","zh",false,"","联邦学习 | 无处不在的隐私泄露！","如何让AI模型学习到通用知识又不保留个体信息，是我们要解决的一个问题！","2022-10-11","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/10/24/b4e0d9c6ab44497c9ed3c9efafe6da69.png","technology-blogs",{"type":14,"children":15,"toc":384},"root",[16,24,33,56,79,91,96,103,108,115,136,141,146,151,177,182,187,192,197,202,207,214,219,224,231,236,241,248,252,257,262,267,277,282,287,303,310,318,328,343,353,369],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"联邦学习-无处不在的隐私泄露",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":17,"tag":29,"props":30,"children":32},"img",{"alt":7,"src":31},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/10/24/543e0400cb934c20ba8d8c174adeda4b.gif",[],{"type":17,"tag":25,"props":34,"children":35},{},[36,42,44,49,51],{"type":17,"tag":37,"props":38,"children":39},"strong",{},[40],{"type":23,"value":41},"作者",{"type":23,"value":43},"：",{"type":17,"tag":37,"props":45,"children":46},{},[47],{"type":23,"value":48},"AI安全Mr.Jin",{"type":23,"value":50}," ｜",{"type":17,"tag":37,"props":52,"children":53},{},[54],{"type":23,"value":55},"来源：知乎",{"type":17,"tag":25,"props":57,"children":58},{},[59,61,70,72,77],{"type":23,"value":60},"相信看过",{"type":17,"tag":62,"props":63,"children":67},"a",{"href":64,"rel":65},"https://zhuanlan.zhihu.com/p/472650896",[66],"nofollow",[68],{"type":23,"value":69},"上一篇内容",{"type":23,"value":71},"的同学已经对联邦学习的背景和算法思想有了概念，总结起来就是：",{"type":17,"tag":37,"props":73,"children":74},{},[75],{"type":23,"value":76},"数据不动模型动",{"type":23,"value":78},"。但是，本地数据不出端就能防止隐私信息泄露吗？那可不一定！",{"type":17,"tag":25,"props":80,"children":81},{},[82,84],{"type":23,"value":83},"比如说，可以进行人机对话的GPT2模型，竟然被“诱导”说出来某些网友的姓名、电话号码和地址！",{"type":17,"tag":62,"props":85,"children":88},{"href":86,"rel":87},"https://link.zhihu.com/?target=https%3A//mp.weixin.qq.com/s/n1jjFzVkGFT4x7P9D9JnZg",[66],[89],{"type":23,"value":90},"https://mp.weixin.qq.com/s/n1jjFzVkGFT4x7P9D9JnZg",{"type":17,"tag":25,"props":92,"children":93},{},[94],{"type":23,"value":95},"接下来给大家介绍两种窃取数据隐私的攻击方法。",{"type":17,"tag":25,"props":97,"children":98},{},[99],{"type":17,"tag":29,"props":100,"children":102},{"alt":7,"src":101},"https://pic3.zhimg.com/80/v2-1e6dba5c04141b330bb027acc3ce2556_720w.webp",[],{"type":17,"tag":25,"props":104,"children":105},{},[106],{"type":23,"value":107},"网络图片，侵删",{"type":17,"tag":109,"props":110,"children":112},"h2",{"id":111},"１模型逆向攻击",[113],{"type":23,"value":114},"１，模型逆向攻击",{"type":17,"tag":25,"props":116,"children":117},{},[118,120,125,127,134],{"type":23,"value":119},"在联邦学习过程中，客户端是需要向Server发送本地训练后的模型权重的。由于模型权重是基于本地数据集进行训练的，",{"type":17,"tag":37,"props":121,"children":122},{},[123],{"type":23,"value":124},"所以它或多或少会携带本地数据集的信息",{"type":23,"value":126},"。一旦模型权重在传输过程中被截取，那么攻击者就可以根据模型权重推测出训练数据集的相关信息。例如，有人提出了这么一种模型逆向攻击，可以根据模型权重还原出训练样本",{"type":17,"tag":62,"props":128,"children":131},{"href":129,"rel":130},"https://zhuanlan.zhihu.com/p/474888427#ref_1",[66],[132],{"type":23,"value":133},"[1]",{"type":23,"value":135},"！这种攻击方法的大概过程如下：",{"type":17,"tag":25,"props":137,"children":138},{},[139],{"type":23,"value":140},"假设在某一轮，Server下发给Client的模型权重是 w0 ，Client把 w0 更新到自己的模型上之后，利用本地的数据集（假设是１张图片） x0 和标签 y0 进行训练，得到模型的更新梯度",{"type":17,"tag":25,"props":142,"children":143},{},[144],{"type":23,"value":145},"∇w=∂loss(f(w0,x0),y0)∂w",{"type":17,"tag":25,"props":147,"children":148},{},[149],{"type":23,"value":150},"其中 f(w0,x0) 表示模型对于 x0的预测值， loss(f(w0,x0),y0) 表示预测结果和真实标签对比的损失值， ∂loss∂w 表示损失函数关于模型参数求导，一般的AI计算框架会提供接口计算 ∇w 。",{"type":17,"tag":25,"props":152,"children":153},{},[154,156,161,163,168,170,175],{"type":23,"value":155},"此外，攻击者也参与了联邦训练（这意味着",{"type":17,"tag":37,"props":157,"children":158},{},[159],{"type":23,"value":160},"攻击者也持有",{"type":23,"value":162}," w0 ",{"type":17,"tag":37,"props":164,"children":165},{},[166],{"type":23,"value":167},"，并且知道",{"type":23,"value":169}," x0 ",{"type":17,"tag":37,"props":171,"children":172},{},[173],{"type":23,"value":174},"的形状",{"type":23,"value":176},"），并且截获了 ∇w ，他现在要按如下步骤来还原Client的本地数据 x0 ：",{"type":17,"tag":25,"props":178,"children":179},{},[180],{"type":23,"value":181},"step 1，攻击者随机初始化一个与x0 的形状相同的初始输入 xinit 和初始标签 yinit ，然后构建一个损失函数",{"type":17,"tag":25,"props":183,"children":184},{},[185],{"type":23,"value":186},"loss(xinit,yinit)=||∂loss(f(w0,xinit),yinit)∂w−∇w||2",{"type":17,"tag":25,"props":188,"children":189},{},[190],{"type":23,"value":191},"其中 ||x||2 表示求 x 的2-范数。",{"type":17,"tag":25,"props":193,"children":194},{},[195],{"type":23,"value":196},"step 2，攻击者当然希望 loss(xinit,yinit) 能尽可能地小，这样的话才能说明 xinit 接近于 x0 ， yinit 接近于 y0 。那么怎么去调整 xinit 和 yinit 呢？也可以根据“梯度方向是函数增长最快的方向”，这样调整：",{"type":17,"tag":25,"props":198,"children":199},{},[200],{"type":23,"value":201},"xnew=xinit−η∂loss(xinit,yinit)∂xinitynew=yinit−η∂loss(xinit,yinit)∂yinit",{"type":17,"tag":25,"props":203,"children":204},{},[205],{"type":23,"value":206},"其中 η 是学习率。计算完之后，另 xinit=xnew,yinit=ynew 。把step 2迭代进行多次后， xnew 和 ynew 会逐渐向真实值 x0 和 y0 靠近——攻击者的目的达到了。一般为了加快攻击速度，可以先猜测标签 y0 的值，然后调整 xinit。看一下攻击的效果：",{"type":17,"tag":25,"props":208,"children":209},{},[210],{"type":17,"tag":29,"props":211,"children":213},{"alt":7,"src":212},"https://pic3.zhimg.com/80/v2-9f4fd8946bb417ef37dbdcf0f440615a_720w.webp",[],{"type":17,"tag":25,"props":215,"children":216},{},[217],{"type":23,"value":218},"图片数据攻击效果[1]",{"type":17,"tag":25,"props":220,"children":221},{},[222],{"type":23,"value":223},"可以看到，经过500次迭代之后，图片都几乎被还原了！除了图片数据集，文字数据也可以被还原：",{"type":17,"tag":25,"props":225,"children":226},{},[227],{"type":17,"tag":29,"props":228,"children":230},{"alt":7,"src":229},"https://pic4.zhimg.com/80/v2-92f214167dcb73e2928fdaeae73daf8f_720w.webp",[],{"type":17,"tag":25,"props":232,"children":233},{},[234],{"type":23,"value":235},"文字数据集攻击效果[1]",{"type":17,"tag":25,"props":237,"children":238},{},[239],{"type":23,"value":240},"所以说，不要以为数据不出端，你的隐私信息就稳了！",{"type":17,"tag":25,"props":242,"children":243},{},[244],{"type":17,"tag":29,"props":245,"children":247},{"alt":7,"src":246},"https://pic4.zhimg.com/80/v2-67fa4f7cc8d0cd0807bda4401a6ef38b_720w.webp",[],{"type":17,"tag":25,"props":249,"children":250},{},[251],{"type":23,"value":107},{"type":17,"tag":25,"props":253,"children":254},{},[255],{"type":23,"value":256},"除了模型逆向攻击，还有一种窃取隐私信息的方法叫做成员推理攻击。",{"type":17,"tag":109,"props":258,"children":260},{"id":259},"成员推理攻击",[261],{"type":23,"value":259},{"type":17,"tag":25,"props":263,"children":264},{},[265],{"type":23,"value":266},"成员推理攻击是用来判断某个样本是否属于目标模型的训练数据集，从而得知该样本的一些集体属性。比如说，某医院根据一批患者的症状训练了一个机器模型，并且把这个模型公开给了大家用；而我手里有某个人的生理特征，那么我可以利用成员推理攻击来推测这个人的数据是否被用于训练医院发布的模型，从而知道他是否去医药看过这个病。",{"type":17,"tag":25,"props":268,"children":269},{},[270,272],{"type":23,"value":271},"一种比较经典的成员推理攻击是这样的：被攻击的目标是一批隐私训练数据集和利用这批训练集训练出来的模型。攻击者会通过白盒窃取或者黑盒探测的方式得到和目标模型的结构、参数都相似的影子模型，然后生成和目标数据集分布相似的仿真数据集，最后用影子模型和仿真数据集训练一个分类模型，这个分类模型可以根据某样本经过目标模型的输出来判断它是否属于目标训练数据集。当然，在真实场景中，影子模型和仿真数据集的逼真程度都不是很高，",{"type":17,"tag":37,"props":273,"children":274},{},[275],{"type":23,"value":276},"所以成员推理攻击大多时候是作为一个测试方法，评估模型训练过程的隐私泄露程度。",{"type":17,"tag":25,"props":278,"children":279},{},[280],{"type":23,"value":281},"其实当一个AI模型具备了分类预测、生成语句的能力的时候，已经表明它存储了很多知识，这些知识又不可避免地包括了一些训练样本的个体信息。如何让AI模型学习到通用知识又不保留个体信息，是我们要解决的一个问题！",{"type":17,"tag":109,"props":283,"children":285},{"id":284},"参考",[286],{"type":23,"value":284},{"type":17,"tag":288,"props":289,"children":290},"ol",{},[291],{"type":17,"tag":292,"props":293,"children":294},"li",{},[295,297],{"type":23,"value":296},"1.Deep Leakage from Gradients ",{"type":17,"tag":62,"props":298,"children":301},{"href":299,"rel":300},"https://arxiv.org/pdf/1906.08935.pdf",[66],[302],{"type":23,"value":299},{"type":17,"tag":25,"props":304,"children":305},{},[306],{"type":17,"tag":29,"props":307,"children":309},{"alt":7,"src":308},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/10/24/f5482b1c286f4581b7f3deb4b69c8d1a.jpg",[],{"type":17,"tag":25,"props":311,"children":312},{},[313],{"type":17,"tag":37,"props":314,"children":315},{},[316],{"type":23,"value":317},"MindSpore官方资料",{"type":17,"tag":25,"props":319,"children":320},{},[321,326],{"type":17,"tag":37,"props":322,"children":323},{},[324],{"type":23,"value":325},"官方QQ群",{"type":23,"value":327}," : 486831414",{"type":17,"tag":25,"props":329,"children":330},{},[331,336,337],{"type":17,"tag":37,"props":332,"children":333},{},[334],{"type":23,"value":335},"官网",{"type":23,"value":43},{"type":17,"tag":62,"props":338,"children":341},{"href":339,"rel":340},"https://www.mindspore.cn/",[66],[342],{"type":23,"value":339},{"type":17,"tag":25,"props":344,"children":345},{},[346,351],{"type":17,"tag":37,"props":347,"children":348},{},[349],{"type":23,"value":350},"Gitee",{"type":23,"value":352}," : https : //gitee.com/mindspore/mindspore",{"type":17,"tag":25,"props":354,"children":355},{},[356,361,363],{"type":17,"tag":37,"props":357,"children":358},{},[359],{"type":23,"value":360},"GitHub",{"type":23,"value":362}," : ",{"type":17,"tag":62,"props":364,"children":367},{"href":365,"rel":366},"https://github.com/mindspore-ai/mindspore",[66],[368],{"type":23,"value":365},{"type":17,"tag":25,"props":370,"children":371},{},[372,377,378],{"type":17,"tag":37,"props":373,"children":374},{},[375],{"type":23,"value":376},"论坛",{"type":23,"value":43},{"type":17,"tag":62,"props":379,"children":382},{"href":380,"rel":381},"https://bbs.huaweicloud.com/forum/forum-1076-1.html",[66],[383],{"type":23,"value":380},{"title":7,"searchDepth":385,"depth":385,"links":386},4,[387,389,390],{"id":111,"depth":388,"text":114},2,{"id":259,"depth":388,"text":259},{"id":284,"depth":388,"text":284},"markdown","content:technology-blogs:zh:1905.md","content","technology-blogs/zh/1905.md","technology-blogs/zh/1905","md",1776506116894]