[{"data":1,"prerenderedAt":313},["ShallowReactive",2],{"content-query-1pg1RHqx4Q":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":307,"_id":308,"_source":309,"_file":310,"_stem":311,"_extension":312},"/technology-blogs/zh/3790","zh",false,"","BEIT模型论文解读，并基于MindSpore NLP推理复现","作者：四顾   来源：开源实习","2025-07-08","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/11/48996fde314947a59bc6f299a7dbaa18.png","technology-blogs","开发者说",{"type":15,"children":16,"toc":304},"root",[17,25,31,36,41,49,54,63,71,76,84,92,99,109,114,119,124,141,146,151,161,166,174,185,193,198,205,210,217,225,230,237,245,253,258,263,268,273,278,286,294],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"beit模型论文解读并基于mindspore-nlp推理复现",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":30},"作者：四顾",{"type":18,"tag":26,"props":32,"children":33},{},[34],{"type":24,"value":35},"来源：开源实习",{"type":18,"tag":26,"props":37,"children":38},{},[39],{"type":24,"value":40},"昇思MindSpore开源实习模型论文解读任务已顺利完成，共收到高质量模型论文解读稿件10+篇。欢迎开发者积极参与昇思MindSpore开源实习活动，开源实习暑期活动已开启，更多新任务等你来挑战！",{"type":18,"tag":26,"props":42,"children":43},{},[44],{"type":18,"tag":45,"props":46,"children":48},"img",{"alt":7,"src":47},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/11/2fb95171402740c19d978a6602f2ee28.png",[],{"type":18,"tag":26,"props":50,"children":51},{},[52],{"type":24,"value":53},"开源实习官网",{"type":18,"tag":26,"props":55,"children":56},{},[57],{"type":18,"tag":58,"props":59,"children":60},"strong",{},[61],{"type":24,"value":62},"# 01",{"type":18,"tag":26,"props":64,"children":65},{},[66],{"type":18,"tag":58,"props":67,"children":68},{},[69],{"type":24,"value":70},"引言",{"type":18,"tag":26,"props":72,"children":73},{},[74],{"type":24,"value":75},"视觉Transformer在计算机视觉领域中取得了重大突破。然而，相比于卷积神经网络，它们通常需要大量数据才能发挥出最佳性能。为了解决这一问题，微软的研究人员提出一种名为BEiT（Bidirectional Encoder representation from Image Transformers）的自监督视觉表征模型，将自然语言处理的成功经验BERT引入到视觉领域，并取得了不错的效果。",{"type":18,"tag":26,"props":77,"children":78},{},[79],{"type":18,"tag":58,"props":80,"children":81},{},[82],{"type":24,"value":83},"# 02",{"type":18,"tag":26,"props":85,"children":86},{},[87],{"type":18,"tag":58,"props":88,"children":89},{},[90],{"type":24,"value":91},"论文创新点",{"type":18,"tag":26,"props":93,"children":94},{},[95],{"type":18,"tag":45,"props":96,"children":98},{"alt":7,"src":97},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/11/b8388fa11209467a8d10208d6008bc81.png",[],{"type":18,"tag":26,"props":100,"children":101},{},[102,104],{"type":24,"value":103},"**1、**",{"type":18,"tag":58,"props":105,"children":106},{},[107],{"type":24,"value":108},"图像表示",{"type":18,"tag":26,"props":110,"children":111},{},[112],{"type":24,"value":113},"本文中，图像有两个视图的表示，分别是图像块和视觉token。这两种类型分别作为预训练过程中的输入和输出表示。",{"type":18,"tag":26,"props":115,"children":116},{},[117],{"type":24,"value":118},"**图像块：**将二维图像分割成一系列图像块序列，以便标准 Transformer 能够直接接收图像数据。例如，将 224×224 的图像分割成 14×14 的网格，每个图像块为 16×16。",{"type":18,"tag":26,"props":120,"children":121},{},[122],{"type":24,"value":123},"**视觉token：**类似于自然语言中的词，将图像表示为由“图像标记器”获得的离散token序列。视觉token是通过离散变分自编码器（dVAE）学习得到的，每个图像被标记为一系列离散的token，如将 224×224 的图像标记为 14×14 的视觉token网格，使用大小为 8192 的词汇表。",{"type":18,"tag":26,"props":125,"children":126},{},[127,129,134,136],{"type":24,"value":128},"**2、**",{"type":18,"tag":58,"props":130,"children":131},{},[132],{"type":24,"value":133},"掩码图像建模",{"type":24,"value":135}," ",{"type":18,"tag":58,"props":137,"children":138},{},[139],{"type":24,"value":140},"(Masked Image Modeling, MIM)",{"type":18,"tag":26,"props":142,"children":143},{},[144],{"type":24,"value":145},"BEIT模型的核心创新点就是提出了掩码图像建模MIM，是一种自监督学习任务，旨在通过预测图像中被随机掩码遮盖的部分来学习图像的特征表示。具体而言，先将图像分割成图像块（如 16×16 像素）并将其“标记化”为离散的视觉token，然后随机掩码一定比例的图像块，将这些损坏的图像块输入到 Transformer 中，预训练目标是基于损坏的图像块恢复原始的视觉token。",{"type":18,"tag":26,"props":147,"children":148},{},[149],{"type":24,"value":150},"在实际应用过程中，采用块状掩码（blockwise masking）而非随机选择掩码位置。每次掩码一个图像块区域，设置每个块的最小掩码数为 16，然后随机选择掩码块的宽高比，重复此过程直到获得足够的掩码块（如占总图像块的 40%）。这种块状掩码方式有助于模型更好地学习图像的局部结构和全局语义信息。对于每个被掩码的位置，使用 softmax 分类器来预测对应的视觉token。预训练的目标是最大化给定损坏图像下正确视觉token的对数似然。",{"type":18,"tag":26,"props":152,"children":153},{},[154,156],{"type":24,"value":155},"**3、**",{"type":18,"tag":58,"props":157,"children":158},{},[159],{"type":24,"value":160},"预训练和微调",{"type":18,"tag":26,"props":162,"children":163},{},[164],{"type":24,"value":165},"在预训练阶段，BEiT模型使用掩码图像建模任务进行训练，以学习图像的特征表示。在微调阶段，使用预训练模型的权重进行初始化，并使用下游任务的标注数据进行微调。并进行大量实验，验证本文提出的方法的有效性。",{"type":18,"tag":26,"props":167,"children":168},{},[169],{"type":18,"tag":58,"props":170,"children":171},{},[172],{"type":24,"value":173},"# 03",{"type":18,"tag":26,"props":175,"children":176},{},[177],{"type":18,"tag":58,"props":178,"children":179},{},[180],{"type":18,"tag":58,"props":181,"children":182},{},[183],{"type":24,"value":184},"数据集上的评价指标得分",{"type":18,"tag":26,"props":186,"children":187},{},[188],{"type":18,"tag":58,"props":189,"children":190},{},[191],{"type":24,"value":192},"1、图像分类",{"type":18,"tag":26,"props":194,"children":195},{},[196],{"type":24,"value":197},"BEiT的预训练在ImageNet-1K数据集上进行，并在下游任务上进行了微调，并使用ImageNet-1K数据集进行了评估。从评估结果中可以看出，BEiT基础模型在224×224分辨率的图像上达到了83.2%的top-1准确率，在384×384分辨率的图像上达到了84.6%的top-1准确率。.参数更多的模型在224×224分辨率的图像上达到了85.2%的top-1准确率，在384×384分辨率的图像上达到了86.3%的top-1准确率。在相同条件下BEiT的效果由于其他模型。",{"type":18,"tag":26,"props":199,"children":200},{},[201],{"type":18,"tag":45,"props":202,"children":204},{"alt":7,"src":203},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/11/1358f7f625cf4f4ca66b60f566bd43f5.png",[],{"type":18,"tag":26,"props":206,"children":207},{},[208],{"type":24,"value":209},"本文也正在CIFAR-100数据集上进行微调和实验评估，微调后推理准确率为91.8%。",{"type":18,"tag":26,"props":211,"children":212},{},[213],{"type":18,"tag":45,"props":214,"children":216},{"alt":7,"src":215},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/11/e22a7555dd894cadb35ac91038cb2737.png",[],{"type":18,"tag":26,"props":218,"children":219},{},[220],{"type":18,"tag":58,"props":221,"children":222},{},[223],{"type":24,"value":224},"2、图像分割",{"type":18,"tag":26,"props":226,"children":227},{},[228],{"type":24,"value":229},"语义分割任务的目标是为输入图像的每个像素预测一个对应的类别，在ADE20K数据集上进行了图像分割任务的评估，该数据集包含 25,000 张图像和 150 个语义类别。同时在该数据集上进行微调和测试。最终测试得分为47.7。",{"type":18,"tag":26,"props":231,"children":232},{},[233],{"type":18,"tag":45,"props":234,"children":236},{"alt":7,"src":235},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2025/07/11/9af3c653dd3f4d45868ac26a69152936.png",[],{"type":18,"tag":26,"props":238,"children":239},{},[240],{"type":18,"tag":58,"props":241,"children":242},{},[243],{"type":24,"value":244},"# 04",{"type":18,"tag":26,"props":246,"children":247},{},[248],{"type":18,"tag":58,"props":249,"children":250},{},[251],{"type":24,"value":252},"创新点相比于其他工作的优势",{"type":18,"tag":26,"props":254,"children":255},{},[256],{"type":24,"value":257},"**性能提升：**在图像分类和语义分割等下游任务中，BEiT相较于从头训练和之前的自监督模型取得了更好的性能。例如，在ImageNet数据集上，BEiT-B的top-1准确率达到83.2%，BEIT384-L的准确率达到86.3%，超过了其他自监督方法以及一些监督预训练方法。",{"type":18,"tag":26,"props":259,"children":260},{},[261],{"type":24,"value":262},"**收敛速度和稳定性：**与从头训练相比，BEiT预训练后再微调在训练过程中的收敛速度更快且更稳定。这意味着在实际应用中，使用BEiT可以减少训练时间和资源消耗，同时获得更好的模型性能。",{"type":18,"tag":26,"props":264,"children":265},{},[266],{"type":24,"value":267},"**学习语义区域的能力：**尽管BEiT的预训练过程不使用任何人类标注数据，但其自注意力机制能够学习到区分语义区域和对象边界的能力。这表明BEiT在预训练过程中自动获取了图像的语义信息，从而为下游任务提供了更丰富的特征表示。",{"type":18,"tag":26,"props":269,"children":270},{},[271],{"type":24,"value":272},"**模型扩展性：**对于更大规模的模型，BEiT的优势更加显著。当扩展到大型模型时，BEiT在性能上的提升超过了仅使用监督预训练的大型模型，显示出更好的扩展性。例如，BEIT-L在ImageNet384上的准确率比ViT384-L高1.2%，这表明BEiT的预训练方法对于大规模模型更为有效。",{"type":18,"tag":26,"props":274,"children":275},{},[276],{"type":24,"value":277},"**减少标注数据依赖：**在与监督预训练的比较中发现，BEiT与监督预训练是互补的。通过中间微调，BEiT在使用较少标注数据的情况下能够进一步提升性能，降低了对大规模标注数据的依赖，这对于标注数据获取困难的场景具有重要意义。",{"type":18,"tag":26,"props":279,"children":280},{},[281],{"type":18,"tag":58,"props":282,"children":283},{},[284],{"type":24,"value":285},"# 05",{"type":18,"tag":26,"props":287,"children":288},{},[289],{"type":18,"tag":58,"props":290,"children":291},{},[292],{"type":24,"value":293},"MindSpore NLP推理移植",{"type":18,"tag":295,"props":296,"children":298},"pre",{"code":297},"import os\nos.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n\nimport mindspore as ms\nfrom mindspore import context, Tensor, ops\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision as vision\nfrom mindspore.dataset.vision import Inter\nfrom mindnlp.transformers import BeitForImageClassification, BeitImageProcessor\nimport numpy as np\n\n# Ascend 设备配置\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\n# 加载模型（float16）\nmodel = BeitForImageClassification.from_pretrained(\n    pretrained_model_name_or_path='microsoft/beit-large-patch16-224',\n    num_labels=10,\n    ignore_mismatched_sizes=True\n)\nmodel.set_train(False)\nfor param in model.get_parameters():\n    param.set_dtype(ms.float16)\n\n# 图像预处理\nprocessor = BeitImageProcessor.from_pretrained('microsoft/beit-large-patch16-224')\nsize = processor.size[\"height\"]\ntransform_ops = [\n    vision.Resize((size, size), interpolation=Inter.BICUBIC),\n    vision.Normalize(mean=processor.image_mean, std=processor.image_std),\n    vision.HWC2CHW(),\n    lambda x: x.astype(np.float16)\n]\n\n# 加载测试集\ncifar10_ds = ds.Cifar10Dataset(dataset_dir='./cifar10_data_bin', usage='test', shuffle=False)\ncifar10_ds = cifar10_ds.map(operations=transform_ops, input_columns=\"image\", num_parallel_workers=8)\ncifar10_ds = cifar10_ds.batch(64, drop_remainder=False)\n\n# 预热模型\n_ = model(Tensor(np.random.randn(1, 3, size, size), dtype=ms.float16))\n\n# 推理与准确率计算\ntotal_correct, total_samples = 0, 0\nfor data in cifar10_ds.create_tuple_iterator():\n    images, labels = data\n    logits = model(images).logits\n    preds = ops.argmax(logits, 1).astype(ms.int64)\n    labels = labels.astype(ms.int64)\n    correct = ops.equal(preds, labels).sum().asnumpy()\n    total_correct += correct\n    total_samples += images.shape[0]\n\n# 输出准确率\naccuracy = total_correct / total_samples\nprint(f\"{accuracy * 100:.2f}%\")\n",[299],{"type":18,"tag":300,"props":301,"children":302},"code",{"__ignoreMap":7},[303],{"type":24,"value":297},{"title":7,"searchDepth":305,"depth":305,"links":306},4,[],"markdown","content:technology-blogs:zh:3790.md","content","technology-blogs/zh/3790.md","technology-blogs/zh/3790","md",1776506135314]