[{"data":1,"prerenderedAt":189},["ShallowReactive",2],{"content-query-EHbUMeofcO":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":183,"_id":184,"_source":185,"_file":186,"_stem":187,"_extension":188},"/technology-blogs/zh/1891","zh",false,"","基于隐式神经网络表达的数据压缩","基于INR的数据压缩技术是一个非常有前景的研究方向，针对不同模态数据适用的INR模型结构、调制权重的稀疏性、以及拟合优化策略都值得进一步探索","2022-09-30","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2022/09/30/94b0d0c99a52447eb3607692d45735f7.png","technology-blogs","大V博文",{"type":15,"children":16,"toc":180},"root",[17,24,30,43,48,53,58,63,68,73,81,86,93,98,103,108,113,118,123,128,133,140,145,150,155,160,165,170,175],{"type":18,"tag":19,"props":20,"children":21},"element","h1",{"id":8},[22],{"type":23,"value":8},"text",{"type":18,"tag":25,"props":26,"children":27},"p",{},[28],{"type":23,"value":29},"作者：于璠",{"type":18,"tag":25,"props":31,"children":32},{},[33,35],{"type":23,"value":34},"文章来源：",{"type":18,"tag":36,"props":37,"children":41},"a",{"href":38,"rel":39},"https://zhuanlan.zhihu.com/p/567668446",[40],"nofollow",[42],{"type":23,"value":38},{"type":18,"tag":25,"props":44,"children":45},{},[46],{"type":23,"value":47},"数据压缩是一种在日常生活中广泛应用的技术，从算法角度来讲，压缩的过程是通过改变数据的表征范式以达到保留信息、去除冗余的过程。近来，深度学习在数据压缩领域的应用不仅表现出极好的性能，还为数据表征提出了具有启发性的新范式。",{"type":18,"tag":25,"props":49,"children":50},{},[51],{"type":23,"value":52},"基于隐式神经网络表达（Implicit Neural Representation）的数据压缩模型以函数表征数据、其函数拟合数据的过程就是压缩过程，近期的工作表示[1][2][3]，基于INR的数据压缩模型网络结构简单，拟合迅速，并在语音、图片、甚至气象数据的压缩上超越传统算法。",{"type":18,"tag":25,"props":54,"children":55},{},[56],{"type":23,"value":57},"学习基于INR的数据压缩算法可以拆解为对两个问题的探索，首先是INR模型如何表征数据，其次是如何训练INR模型。",{"type":18,"tag":25,"props":59,"children":60},{},[61],{"type":23,"value":62},"问题一，INR如何表征数据？",{"type":18,"tag":25,"props":64,"children":65},{},[66],{"type":23,"value":67},"根据通用近似定理[4]可知，给定合适的权重，神经网络可以非常简单的网络架构来逼近非常复杂函数，也就是说，以多层感知机构成的神经网络可以拟合一段语音的信号分布、一个图片的像素值分布、甚至是一个三维场景的体密度分布。",{"type":18,"tag":25,"props":69,"children":70},{},[71],{"type":23,"value":72},"举例来讲，以图片的压缩为例，已知神经网络f1已经拟合并表征图片d1，那么在给定像素坐标x时，可以求得x处像素值为y = f1(x)，而在输入所有像素坐标后，可以求得整张图片的像素值，从而得到由函数f1表征的图片d1（见图1）。",{"type":18,"tag":25,"props":74,"children":75},{},[76],{"type":18,"tag":77,"props":78,"children":80},"img",{"alt":7,"src":79},"https://pic1.zhimg.com/80/v2-ebe2f28e95f2589a5ce106c25145f6b4_720w.webp",[],{"type":18,"tag":25,"props":82,"children":83},{},[84],{"type":23,"value":85},"同样的原理，神经网络f2可以表征图片d2，以此类推，神经网络就成为了数据的“函数式”表征（见图2）。",{"type":18,"tag":25,"props":87,"children":88},{},[89],{"type":18,"tag":77,"props":90,"children":92},{"alt":7,"src":91},"https://pic1.zhimg.com/80/v2-08ba5eb38c6023ee72efe3d2cd525c80_720w.webp",[],{"type":18,"tag":25,"props":94,"children":95},{},[96],{"type":23,"value":97},"那么INR模型f1与f2之间有什么关联呢？在基于INR表征数据的算法中，表征同类数据（如图片、三维场景等）的INR模型拥有同样的结构、不同的权重。基于[2][3]可以得知，INR模型可以被分为基模型与调制模型，其中基模型对应权重为预训练得出，调制模型对应权重为基于不同数据样本进行增量训练得出，也就是说，f1与f2只有调制权重不同，而调制权重在整个模型权重中稀疏性极高。",{"type":18,"tag":25,"props":99,"children":100},{},[101],{"type":23,"value":102},"基于上述的描述，我们可以得知，n张图片可以被一个基模型和n组调制权重表征，每组调制权重就是对应图片的压缩后的数据表征形式。",{"type":18,"tag":25,"props":104,"children":105},{},[106],{"type":23,"value":107},"问题二，如何训练INR模型？",{"type":18,"tag":25,"props":109,"children":110},{},[111],{"type":23,"value":112},"在了解INR模型拥有表征数据的能力后，第二个问题就是如何训练INR模型来迅速的拟合数据呢？数据压缩技术的应用场景繁多，其中无线通信的信号压缩等场景对实时性要求极高，所以INR模型的高性能训练与高效的拟合优化策略也至关重要。",{"type":18,"tag":25,"props":114,"children":115},{},[116],{"type":23,"value":117},"在问题一的探讨中提到，INR模型可以分为基模型和调制模型，其中基模型的训练是离线进行，调制模型的训练是基于压缩的目标数据进行的权重微调过程，是在线进行。为了提升压缩效率，调制模型训练的实时性至关重要，考虑到以上诉求，COIN++ [2]中将元学习方法MAML[5]引入，以提升INR模型的训练效率。",{"type":18,"tag":25,"props":119,"children":120},{},[121],{"type":23,"value":122},"MAML是一种模型优化方法，其目的在于针对一个模型学习最佳的初始化权重，从而使得该模型面向不同的任务进行训练时可以通过极少次权重更新收敛；其过程有两步，inner loop计算在给定初始化权重的情况下少次权重更新后的损失，outer loop则根据该损失优化初始化权重。在INR模型训练的语境下，MAML可以被这样理解：其目的在于针对INR模型学习最佳的基模型权重，从而INR模型在面对不同数据时可以通过极少次的调制权重更新达到拟合的目的；其过程中inner loop只更新调制权重来计算压缩损失，outer loop则根据损失优化基模型权重。通过运用元学习方法，基于INR的数据压缩算法在压缩效率大大提升。",{"type":18,"tag":25,"props":124,"children":125},{},[126],{"type":23,"value":127},"在COIN++的基础上，Functa [3]进一步提升了基于INR的数据压缩效率，直接运用生成类网络对调制权重进行预测，而其中生成类网络的训练数据集则是由元学习训练出的调制权重组成。",{"type":18,"tag":25,"props":129,"children":130},{},[131],{"type":23,"value":132},"以上是基于INR的数据压缩技术概览，那么该计算在数据压缩场景中的效果如何呢？根据COIN++中实验结果可以看出，在压缩气象数据时表现出远远超越了传统方法的效果，而在压缩图片、语音数据时则只在有限的压缩比例下超越传统方法（见图3）。",{"type":18,"tag":25,"props":134,"children":135},{},[136],{"type":18,"tag":77,"props":137,"children":139},{"alt":7,"src":138},"https://pic1.zhimg.com/80/v2-692068d541e32a5b4005f7e81eea2fe0_720w.webp",[],{"type":18,"tag":25,"props":141,"children":142},{},[143],{"type":23,"value":144},"综上所述，基于INR的数据压缩技术是一个非常有前景的研究方向，针对不同模态数据适用的INR模型结构、调制权重的稀疏性、以及拟合优化策略都值得进一步探索。同时， “通过模型拟合数据”的思想在实际场景中的应用也有很大的价值，首先，INR模型的大小、结构、训练难度是与信息复杂度成正比，而非原数据的分辨率，这使得时空相关性强的数据能够得到卓越的压缩效果，达到极致的去除冗余信息的效果，其次， INR模型在被解压时，输入采样点可控制解压后数据的分辨率，用户可以自由的进行数据的部分解压、低分辨率解压、甚至是超分辨率解压。",{"type":18,"tag":25,"props":146,"children":147},{},[148],{"type":23,"value":149},"[1] Emilien Dupont et al. “Coin: Compression with implicit neural representations”. In: arXiv preprint",{"type":18,"tag":25,"props":151,"children":152},{},[153],{"type":23,"value":154},"arXiv:2103.03123 (2021).",{"type":18,"tag":25,"props":156,"children":157},{},[158],{"type":23,"value":159},"[2] Emilien Dupont et al. “Coin++: Data agnostic neural compression”. In: arXiv preprint arXiv:2201.12904(2022).",{"type":18,"tag":25,"props":161,"children":162},{},[163],{"type":23,"value":164},"[3] Emilien Dupont et al. “From data to functa: Your data point is a function and you should treat it like one”.In: arXiv preprint arXiv:2201.12204 (2022).",{"type":18,"tag":25,"props":166,"children":167},{},[168],{"type":23,"value":169},"[4] Hornik, Kurt; Stinchcombe, Maxwell; White, Halbert (1989). Multilayer Feedforward Networks are Universal Approximators (PDF). Neural Networks. Vol. 2. Pergamon Press. pp. 359–366.",{"type":18,"tag":25,"props":171,"children":172},{},[173],{"type":23,"value":174},"[5] Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation",{"type":18,"tag":25,"props":176,"children":177},{},[178],{"type":23,"value":179},"of deep networks. In International Conference on Machine Learning, 2017.",{"title":7,"searchDepth":181,"depth":181,"links":182},4,[],"markdown","content:technology-blogs:zh:1891.md","content","technology-blogs/zh/1891.md","technology-blogs/zh/1891","md",1776506116804]