[{"data":1,"prerenderedAt":424},["ShallowReactive",2],{"content-query-O0qmNwgCrd":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":418,"_id":419,"_source":420,"_file":421,"_stem":422,"_extension":423},"/technology-blogs/zh/2944","zh",false,"","论文精讲 | 基于昇思MindSpore的动作频率自适应视频时序动作提名生成研究，解决精确定位未修剪视频中的动作问题","作者：李锐锋 ｜来源：知乎","2024-01-05","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/cf20eca9e1124ecaa8a54d6bd5e9ccf1.png","technology-blogs",{"type":14,"children":15,"toc":415},"root",[16,24,43,48,53,58,63,68,79,84,93,98,110,118,126,131,136,141,146,154,162,174,179,187,195,200,205,210,215,220,225,233,238,245,250,257,262,271,278,283,291,299,304,311,316,321,328,333,338,346,354,359,364,372,383,394,404],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"论文精讲-基于昇思mindspore的动作频率自适应视频时序动作提名生成研究解决精确定位未修剪视频中的动作问题",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28,30,36,38],{"type":23,"value":29},"**作者：**",{"type":17,"tag":31,"props":32,"children":33},"strong",{},[34],{"type":23,"value":35},"李锐锋",{"type":23,"value":37}," ｜",{"type":17,"tag":31,"props":39,"children":40},{},[41],{"type":23,"value":42},"来源：知乎",{"type":17,"tag":25,"props":44,"children":45},{},[46],{"type":23,"value":47},"论文标题",{"type":17,"tag":25,"props":49,"children":50},{},[51],{"type":23,"value":52},"Temporal Action Proposal Generation With Action Frequency Adaptive Network",{"type":17,"tag":25,"props":54,"children":55},{},[56],{"type":23,"value":57},"论文来源",{"type":17,"tag":25,"props":59,"children":60},{},[61],{"type":23,"value":62},"TMM 2023",{"type":17,"tag":25,"props":64,"children":65},{},[66],{"type":23,"value":67},"论文链接",{"type":17,"tag":25,"props":69,"children":70},{},[71],{"type":17,"tag":72,"props":73,"children":77},"a",{"href":74,"rel":75},"https://ieeexplore.ieee.org/abstract/document/10183357",[76],"nofollow",[78],{"type":23,"value":74},{"type":17,"tag":25,"props":80,"children":81},{},[82],{"type":23,"value":83},"代码链接",{"type":17,"tag":25,"props":85,"children":86},{},[87],{"type":17,"tag":72,"props":88,"children":91},{"href":89,"rel":90},"https://gitee.com/chunjie-zhang/afan-tmm2023",[76],[92],{"type":23,"value":89},{"type":17,"tag":25,"props":94,"children":95},{},[96],{"type":23,"value":97},"昇思MindSpore作为开源的AI框架，为产学研和开发人员带来端边云全场景协同、极简开发、极致性能、安全可信的体验，支持超大规模AI预训练，自2020年3月28日开源来已超过6百万的下载量。昇思MindSpore已支持数百篇AI顶会论文，走入Top100+高校教学，通过HMS在5000+App上商用，拥有数量众多的开发者，在AI计算中心、智能制造、金融、云、无线、数通、能源、消费者1+8+N、智能汽车等端边云车全场景广泛应用，是Gitee指数最高的开源软件。欢迎大家参与开源贡献、套件、模型众智、行业创新与应用、算法创新、学术合作、AI书籍合作等，贡献您在云侧、端侧、边侧以及安全领域的应用案例。",{"type":17,"tag":25,"props":99,"children":100},{},[101,103,108],{"type":23,"value":102},"在科技界、学术界和工业界对昇思MindSpore的广泛支持下，基于昇思MindSpore的AI论文2023年在所有AI框架中占比7%，连续两年进入全球第二，感谢CAAI和各位高校老师支持，我们一起继续努力做好AI科研创新。昇思MindSpore社区支持顶级会议论文研究，持续构建原创AI成果。我会不定期挑选一些优秀的论文来推送和解读，希望更多的产学研专家跟昇思MindSpore合作，一起推动原创AI研究，昇思MindSpore社区会持续支撑好AI创新和AI应用，我选择了来自北京交通大学计算机与信息技术学院的",{"type":17,"tag":31,"props":104,"children":105},{},[106],{"type":23,"value":107},"张淳杰老师",{"type":23,"value":109},"团队的一篇论文解读，感谢各位专家教授同学的投稿。",{"type":17,"tag":25,"props":111,"children":112},{},[113],{"type":17,"tag":31,"props":114,"children":115},{},[116],{"type":23,"value":117},"01",{"type":17,"tag":25,"props":119,"children":120},{},[121],{"type":17,"tag":31,"props":122,"children":123},{},[124],{"type":23,"value":125},"研究背景",{"type":17,"tag":25,"props":127,"children":128},{},[129],{"type":23,"value":130},"作为视频理解领域中的核心内容，视频时序动作提名生成任务旨在准确预测未修剪视频中人体动作实例的起始和结束时间，对于理解视频中的人类行为起到至关重要的作用。尽管近年来时序动作提名生成的性能有了显著提升，但大多数先前的研究忽略了原始视频中动作频率的变化，导致这些方法在处理高动作频率视频时性能不尽如人意。通过详细的数据分析，我们确定了两个主要问题：首先，高动作频率视频和低动作频率视频之间存在数据不平衡；其次，在高动作频率视频中，短时动作片段的检测性能相对较差。为了应对这些挑战，我们提出了一种灵活适应不同动作频率的有效框架，它可以无缝嵌入到现有的时序动作提名生成方法中，显著提高其性能。我们的算法可以基于昇思MindSpore官方文档示例以及我们提供的代码实现。",{"type":17,"tag":25,"props":132,"children":133},{},[134],{"type":23,"value":135},"随着视频监控、手机等摄像设备的迅速发展，视频数据量迅速增加。人工处理如此庞大的视频数据几乎是不可能。如何使用智能视频理解算法来有效地收集、管理和利用这些视频数据，成为了研究热点。近年来，人体动作识别取得了显著进展，但它主要关注短时修剪的视频片段，难以应用于长时间未修剪的真实视频。因此，关注未修剪长视频的时序动作检测引起了广泛的关注。",{"type":17,"tag":25,"props":137,"children":138},{},[139],{"type":23,"value":140},"该任务首先需要准确定位未修剪的长视频中人体动作实例的起始和结束时间，然后对这些动作进行分类。现有方法通常将这个任务分为两个子任务：时间动作提议生成（TAPG）任务和动作分类任务。TAPG的目标是预测未修剪视频中人体动作实例的起始和结束时间，而分类任务则是对这些动作实例进行分类。尽管在动作分类方面取得了显著的进展，但由于TAPG的召回率不足，时序动作检测的性能仍然有待改进。因此，如何精确定位未修剪视频中的动作是亟需解决的问题。未修剪视频的持续时间各不相同，同时其中的动作实例的持续时间也变化多样，这给时间动作提议生成带来了巨大的挑战。",{"type":17,"tag":25,"props":142,"children":143},{},[144],{"type":23,"value":145},"大多数TAPG模型主要关注预测时间动作实例的边界或动作锚定框的置信度分数，或者两者兼顾。然而，在这个过程中，大多数方法都忽视了未修剪视频中动作频率的变化，而在真实世界中这种情况是十分常见的。因此，研究如何适应不同的动作频率视频并提高在高动作频率视频上的性能变得至关重要。",{"type":17,"tag":25,"props":147,"children":148},{},[149],{"type":17,"tag":31,"props":150,"children":151},{},[152],{"type":23,"value":153},"02",{"type":17,"tag":25,"props":155,"children":156},{},[157],{"type":17,"tag":31,"props":158,"children":159},{},[160],{"type":23,"value":161},"团队介绍",{"type":17,"tag":25,"props":163,"children":164},{},[165,167,172],{"type":23,"value":166},"论文第一作者",{"type":17,"tag":31,"props":168,"children":169},{},[170],{"type":23,"value":171},"唐业鹏",{"type":23,"value":173},"是北京交通大学计算机与信息技术学院23届博士，研究方向为计算机视觉、视频理解、时序动作定位等。",{"type":17,"tag":25,"props":175,"children":176},{},[177],{"type":23,"value":178},"北京交通大学数字媒体信息处理研究中心（Mepro ）肇始于1998年，2012年入选教育部“创新团队发展计划”。该中心现有教师14人，博、硕士研究生100余人。该中心的研究领域为数字媒体信息处理，研究方向主要包括图像\\视频编码与传输、数字水印与数字取证、媒体内容分析与理解等。2022 年，实验室共发表高水平论文共 61 篇，其中包括本领域国际顶级汇刊 IEEE Trans.论文 38 篇，国际顶级会议如 NeurIPS、CVPR、ECCV、ACM MM 等论文 23 篇。",{"type":17,"tag":25,"props":180,"children":181},{},[182],{"type":17,"tag":31,"props":183,"children":184},{},[185],{"type":23,"value":186},"03",{"type":17,"tag":25,"props":188,"children":189},{},[190],{"type":17,"tag":31,"props":191,"children":192},{},[193],{"type":23,"value":194},"论文简介",{"type":17,"tag":25,"props":196,"children":197},{},[198],{"type":23,"value":199},"本文介绍了一项关于视频时序动作提名生成技术的研究。该技术在视频分析、智能监控分析等领域扮演着至关重要的角色，其目标是从未剪辑的长视频中对人类的行为进行定位。未修剪视频的持续时间各不相同，其包含的动作实例持续时间也不同，这给时序动作提名生成任务带来了巨大挑战。",{"type":17,"tag":25,"props":201,"children":202},{},[203],{"type":23,"value":204},"大多数时序动作提名生成模型主要关注预测时间动作实例的边界或动作锚定框的置信度分数，或者两者兼顾。然而，这些方法都忽视了未修剪视频中动作频率的变化问题。通过数据分析，我们总结了两个主要问题：",{"type":17,"tag":25,"props":206,"children":207},{},[208],{"type":23,"value":209},"1.数据不平衡导致高动作频率视频性能不佳。模型往往在低动作频率视频上表现良好，而无法很好处理高动作频率视频。",{"type":17,"tag":25,"props":211,"children":212},{},[213],{"type":23,"value":214},"2.大量短时动作限制了在高动作频率视频上的性能。在高动作频率视频中有比低动作频率视频更多的短时动作实例。定位这些短时动作实例很困难，类似于目标检测中的小物体。",{"type":17,"tag":25,"props":216,"children":217},{},[218],{"type":23,"value":219},"为解决上述问题，我们提出了一个动作频率自适应的时序动作提名生成框架。一方面，我们通过专家学习方式来减轻数据不平衡。具体来说，我们将整个视频数据集分成几个子集，每个子集具有较少不平衡的数据分布。通过这种方式，我们可以确保在这些子集上训练的专家模型在数据不平衡方面受到的影响较小。为了整合来自专家模型的知识，我们随后通过知识蒸馏训练一个统一的学生模型，该模型适应不同动作频率的视频。",{"type":17,"tag":25,"props":221,"children":222},{},[223],{"type":23,"value":224},"同时，我们设计了一个动作频率分类器来辨别高动作频率视频，再对其进行精细检测，提高短时动作的预测性能。我们的方法可以方便地应用在现有时间动作提议生成模型之上。我们在两个经典模型（BMN和DBG）上验证所提出的方法，并在四个基准数据集进行了性能评估。充分的实验结果证明了我们方法的有效性和通用性。",{"type":17,"tag":25,"props":226,"children":227},{},[228],{"type":17,"tag":229,"props":230,"children":232},"img",{"alt":7,"src":231},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/36a288369937451ba3171b3c77936812.png",[],{"type":17,"tag":25,"props":234,"children":235},{},[236],{"type":23,"value":237},"现有模型在不同动作频率视频的性能对比",{"type":17,"tag":25,"props":239,"children":240},{},[241],{"type":17,"tag":229,"props":242,"children":244},{"alt":7,"src":243},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/cac382fabf32460bbb7eaeabc8fd98db.png",[],{"type":17,"tag":25,"props":246,"children":247},{},[248],{"type":23,"value":249},"现有数据集的不同动作频率视频的分布",{"type":17,"tag":25,"props":251,"children":252},{},[253],{"type":17,"tag":229,"props":254,"children":256},{"alt":7,"src":255},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/6fce74395dc542e3a9a1e3a2404f2937.png",[],{"type":17,"tag":25,"props":258,"children":259},{},[260],{"type":23,"value":261},"框架图",{"type":17,"tag":25,"props":263,"children":264},{},[265,269],{"type":17,"tag":229,"props":266,"children":268},{"alt":7,"src":267},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/1a8f1b5215b54ca685bf0a07b42446f0.png",[],{"type":23,"value":270}," 专家学习模块",{"type":17,"tag":25,"props":272,"children":273},{},[274],{"type":17,"tag":229,"props":275,"children":277},{"alt":7,"src":276},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/cccb29e3e29845568cd2a3bf787f0aee.png",[],{"type":17,"tag":25,"props":279,"children":280},{},[281],{"type":23,"value":282},"精细化处理模块",{"type":17,"tag":25,"props":284,"children":285},{},[286],{"type":17,"tag":31,"props":287,"children":288},{},[289],{"type":23,"value":290},"04",{"type":17,"tag":25,"props":292,"children":293},{},[294],{"type":17,"tag":31,"props":295,"children":296},{},[297],{"type":23,"value":298},"实验结果",{"type":17,"tag":25,"props":300,"children":301},{},[302],{"type":23,"value":303},"我们在常用的视频时序动作提名生成数据集THUOMO14、ActivityNet-1.3、Fineaction和HACS上，与先进的时序动作提名生成算法进行了对比，并进一步对实验结果进行了分析研究。",{"type":17,"tag":25,"props":305,"children":306},{},[307],{"type":17,"tag":229,"props":308,"children":310},{"alt":7,"src":309},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/04e5dfe712b64157991ff66ab86f8aae.png",[],{"type":17,"tag":25,"props":312,"children":313},{},[314],{"type":23,"value":315},"不同数据集上的时序动作提名性能对比",{"type":17,"tag":25,"props":317,"children":318},{},[319],{"type":23,"value":320},"实验结果显示，我们的动作频率自适应方法能够有效提升现有时序动作提名生产方法。在ActivityNet-1.3数据集上，我们的模型AFAN（BMN）在验证集和测试集的AUC方面分别比基准方法BMN高出1.06%和1.43%。为了进一步验证我们方法的有效性，我们还基于先进的时序动作提名生成模型DBG进行实验验证。DBG同时生成起始、结束和动作置信度图以评估所有动作提议，与BMN相比，它可以生成更灵活的动作提议。由于具有更好的动作提议生成器DBG，我们的AFAN（DBG）在ActivityNet-1.3数据集上取得了更高的性能。",{"type":17,"tag":25,"props":322,"children":323},{},[324],{"type":17,"tag":229,"props":325,"children":327},{"alt":7,"src":326},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/01/12/698d4346d98641cd94c1ca48a207f899.png",[],{"type":17,"tag":25,"props":329,"children":330},{},[331],{"type":23,"value":332},"不同动作频率视频的性能对比",{"type":17,"tag":25,"props":334,"children":335},{},[336],{"type":23,"value":337},"此外，我们还分析了不同动作频率视频下的性能以及与相应基准方法进行比较。实验结果显示，我们的动作频率自适应方法能够有效提升高动作频率视频的性能。通过加权知识蒸馏来减轻数据不平衡问题时，低动作频率视频的性能（数量上占主导地位）会略有下降，但仍然保持较高的水平。因此，我们的方法保持了基线方法在低动作频率视频上高性能。而对于高动作频率视频，我们的方法明显优于基准方法。因此，我们的方法在整体性能上取得了更好的表现。",{"type":17,"tag":25,"props":339,"children":340},{},[341],{"type":17,"tag":31,"props":342,"children":343},{},[344],{"type":23,"value":345},"05",{"type":17,"tag":25,"props":347,"children":348},{},[349],{"type":17,"tag":31,"props":350,"children":351},{},[352],{"type":23,"value":353},"总结与展望",{"type":17,"tag":25,"props":355,"children":356},{},[357],{"type":23,"value":358},"本文介绍了一种动作频率自适应视频时序行为提名生成的方法，旨在高质量地完成未剪辑视频中人类动作的定位任务。该方法针对未剪辑视频动作频率变化问题进行研究。通过数据分析，发现现有方法受限于数据不平衡和高频率视频中的短时动作。为此，设计了专家学习模块和精细化处理模块分别用来减小数据不平衡问题和短时动作的影响。在四个基准数据集上进行的大量实验证明了我们方法的有效性和通用性。我们的方法为视频时序行为提名生成的研究提供了新思路，有助于现实场景的应用。",{"type":17,"tag":25,"props":360,"children":361},{},[362],{"type":23,"value":363},"往期回顾",{"type":17,"tag":18,"props":365,"children":366},{"id":7},[367],{"type":17,"tag":72,"props":368,"children":371},{"href":369,"rel":370},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247611994&idx=1&sn=58e193e0d992ba9a0b9a4d5b58330b7d&chksm=c11e2615f669af0378d929150afe8475ff1d681c6ae21b0681bfeb58810c1078e83ac77c1287&scene=21#wechat_redirect",[76],[],{"type":17,"tag":18,"props":373,"children":375},{"id":374},"论文精讲-基于昇思mindspore无锚框时序动作定位研究解决未剪辑视频的识别和定位问题",[376],{"type":17,"tag":72,"props":377,"children":380},{"href":378,"rel":379},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247612399&idx=1&sn=5ee0e45e3d4548c42b2f0236180e1688&chksm=c11e27a0f669aeb68ec97b2ad0fb47720c1982e4ccb87cba1649451453212e470a20d7430bb9&scene=21#wechat_redirect",[76],[381],{"type":23,"value":382},"论文精讲 | 基于昇思MindSpore无锚框时序动作定位研究解决未剪辑视频的识别和定位问题",{"type":17,"tag":18,"props":384,"children":386},{"id":385},"论文精讲-基于昇思mindspore的行人重识别和步态识别探究解决行人的换装问题",[387],{"type":17,"tag":72,"props":388,"children":391},{"href":389,"rel":390},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247612203&idx=1&sn=458b39b426f7f9a5b8b4603491adc64e&chksm=c11e2764f669ae721763e8dced1b0172fa504443a9e04b3add2215ac000f262b82b3e801da9c&scene=21#wechat_redirect",[76],[392],{"type":23,"value":393},"论文精讲 | 基于昇思MindSpore的行人重识别和步态识别探究解决行人的换装问题",{"type":17,"tag":18,"props":395,"children":397},{"id":396},"论文精讲-基于昇思mindspore打造首个深度学习开源视频压缩算法库opendmc支持跨平台和多种评价指标",[398],{"type":17,"tag":72,"props":399,"children":401},{"href":369,"rel":400},[76],[402],{"type":23,"value":403},"论文精讲 | 基于昇思MindSpore打造首个深度学习开源视频压缩算法库OpenDMC,支持跨平台和多种评价指标",{"type":17,"tag":18,"props":405,"children":407},{"id":406},"论文精讲-基于昇思mindspore的rgb-d显著性目标检测网络picr-net准确高质量检测实物",[408],{"type":17,"tag":72,"props":409,"children":412},{"href":410,"rel":411},"http://mp.weixin.qq.com/s?__biz=MzkxMTM2MjMzNg==&mid=2247611777&idx=1&sn=b8fb5fae2124943c515dc2002c357867&chksm=c11e25cef669acd8abf028cd6f7f0144e5fca87fd8c60cd5defce2509d3506a6b593427706df&scene=21#wechat_redirect",[76],[413],{"type":23,"value":414},"论文精讲 | 基于昇思MindSpore的RGB-D显著性目标检测网络PICR-Net，准确高质量检测实物",{"title":7,"searchDepth":416,"depth":416,"links":417},4,[],"markdown","content:technology-blogs:zh:2944.md","content","technology-blogs/zh/2944.md","technology-blogs/zh/2944","md",1776506124500]