[{"data":1,"prerenderedAt":982},["ShallowReactive",2],{"content-query-CPQdnC7zKU":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":976,"_id":977,"_source":978,"_file":979,"_stem":980,"_extension":981},"/technology-blogs/zh/3145","zh",false,"","基于MindSpore框架的深度学习模型在CV方向的应用--(5) 基于DeepLabV3实现语义分割案例","本实验主要介绍使用MindSpore深度学习框架在PASCAL VOC格式的数据集上实现Deeplabv3网络模型并完成语义分割及测试。","2024-06-06","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/06/07/6d1cae62cea44671803fc497805dbebf.png","technology-blogs","实践",{"type":15,"children":16,"toc":933},"root",[17,25,30,37,57,63,68,73,81,86,99,104,109,114,119,127,132,137,142,150,155,160,166,171,176,194,200,207,212,235,240,245,250,298,304,309,315,320,330,336,341,349,355,361,379,387,392,400,405,413,419,425,430,438,450,455,461,467,472,477,481,486,501,506,510,514,521,525,536,540,544,548,552,559,563,567,571,578,582,586,591,595,599,614,619,624,628,647,651,655,659,698,703,707,712,716,727,732,736,746,751,756,782,786,796,800,810,815,820,824,834,840,845,850,859,865,871,876,885,890,899,905,910,919,924],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"基于mindspore框架的深度学习模型在cv方向的应用-5-基于deeplabv3实现语义分割案例",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":24,"value":9},{"type":18,"tag":31,"props":32,"children":34},"h2",{"id":33},"_1实验目的",[35],{"type":24,"value":36},"1、实验目的",{"type":18,"tag":38,"props":39,"children":40},"ul",{},[41,47,52],{"type":18,"tag":42,"props":43,"children":44},"li",{},[45],{"type":24,"value":46},"掌握如何使用MindSpore深度学习框架构建deeplabv3网络模型。",{"type":18,"tag":42,"props":48,"children":49},{},[50],{"type":24,"value":51},"了解如何使用MindSpore准备数据集以及预训练相关模型。",{"type":18,"tag":42,"props":53,"children":54},{},[55],{"type":24,"value":56},"了解如何使用MindSpore加载预训练模型并完成预测。",{"type":18,"tag":31,"props":58,"children":60},{"id":59},"_2语义分割算法原理介绍",[61],{"type":24,"value":62},"2、语义分割算法原理介绍",{"type":18,"tag":26,"props":64,"children":65},{},[66],{"type":24,"value":67},"语义分割是基于像素点级别的物体识别问题。目标是用对应的所表示的类来标记图像的每个像素。因为我们正在预测图像中的每个像素，所以此任务通常被称为密集预测。",{"type":18,"tag":26,"props":69,"children":70},{},[71],{"type":24,"value":72},"语义分割有着广泛的应用场景，包括自动驾驶，人机交互，医学图像诊断，计算摄影学和增强现实等。",{"type":18,"tag":38,"props":74,"children":75},{},[76],{"type":18,"tag":42,"props":77,"children":78},{},[79],{"type":24,"value":80},"语义分割网络介绍",{"type":18,"tag":26,"props":82,"children":83},{},[84],{"type":24,"value":85},"一般的语义分割架构可以被认为是一个编码器-解码器网络。编码器通常是一个预训练的分类网络，像 VGG、ResNet，然后是一个解码器网络。这些架构不同的地方主要在于解码器网络。解码器的任务是将编码器学习到的可判别特征（较低分辨率）从语义上投影到像素空间（较高分辨率），以获得密集分类。语义分割不仅需要在像素级有判别能力，还需要有能将编码器在不同阶段学到的可判别特征投影到像素空间的机制。不同的架构采用不同的机制（跳跃连接、金字塔池化等）作为解码机制的一部分。",{"type":18,"tag":38,"props":87,"children":88},{},[89,94],{"type":18,"tag":42,"props":90,"children":91},{},[92],{"type":24,"value":93},"Deeplab网络介绍 近来，深度卷积网络（DCNN）在高级视觉任务（图像分类和目标检测）中展示了优异的性能。",{"type":18,"tag":42,"props":95,"children":96},{},[97],{"type":24,"value":98},"Deeplabv1结合DCNN 和概率图模型来解决像素级分类任务（即语义分割）。其关键特点：",{"type":18,"tag":26,"props":100,"children":101},{},[102],{"type":24,"value":103},"1.提出空洞卷积（atrous convolution）。",{"type":18,"tag":26,"props":105,"children":106},{},[107],{"type":24,"value":108},"2.在最后两个最大池化操作中不降低特征图的分辨率，并在倒数第二个最大池化之后的卷积中使用空洞卷积。",{"type":18,"tag":26,"props":110,"children":111},{},[112],{"type":24,"value":113},"3.使用 CRF（条件随机场） 作为后处理，恢复边界细节，达到准确定位效果。",{"type":18,"tag":26,"props":115,"children":116},{},[117],{"type":24,"value":118},"4.附加输入图像和前四个最大池化层的每个输出到一个两层卷积，然后拼接到主网络的最后一层，达到 多尺度预测效果。",{"type":18,"tag":38,"props":120,"children":121},{},[122],{"type":18,"tag":42,"props":123,"children":124},{},[125],{"type":24,"value":126},"Deeplabv3关键特点：",{"type":18,"tag":26,"props":128,"children":129},{},[130],{"type":24,"value":131},"1.强调上采样过滤器的卷积，或“空洞卷积”，在密集预测任务中是一个强大的工具。空洞卷积允许显式地控制在深度卷积神经网络中计算的特征响应的分辨率。它还允许有效地扩大过滤器的视野，在不增加参数数量或计算量的情况下引入更大的上下文。",{"type":18,"tag":26,"props":133,"children":134},{},[135],{"type":24,"value":136},"2.提出了一种空洞空间金字塔池化（ASPP）的多尺度鲁棒分割方法。ASPP 使用多个采样率的过滤器和有效的视野探测传入的卷积特征层，从而在多个尺度上捕获目标和图像上下文。 avatar",{"type":18,"tag":26,"props":138,"children":139},{},[140],{"type":24,"value":141},"3.结合 DCNNs 方法和概率图形模型，改进了目标边界的定位。DCNNs 中常用的最大池化和下采样的组合实现了不变性，但对定位精度有一定的影响。通过将 DCNN 最后一层的响应与一个全连接条件随机场(CRF)相结合来克服这个问题。",{"type":18,"tag":38,"props":143,"children":144},{},[145],{"type":18,"tag":42,"props":146,"children":147},{},[148],{"type":24,"value":149},"Deeplabv3相较于之前的Deeplab有很大的改进，在Pascal VOC 2012图像语义分割基准上获得了state-of-art的性能,参考论文《Rethinking Atrous Convolution for Semantic Image Segmentation 》。 其关键特点：",{"type":18,"tag":26,"props":151,"children":152},{},[153],{"type":24,"value":154},"1.为了解决多尺度目标的分割问题，串行/并行设计了能够捕捉多尺度上下文的模块，模块中采用不同的空洞率。",{"type":18,"tag":26,"props":156,"children":157},{},[158],{"type":24,"value":159},"2.增强了先前提出的空洞空间金字塔池化（ASPP）模块，增加了图像级特征来编码全局上下文，使得模块可以在多尺度下探测卷积特征。并在没有 CRF 作为后处理的情况下显著提升了性能。",{"type":18,"tag":31,"props":161,"children":163},{"id":162},"_3-实验环境",[164],{"type":24,"value":165},"3 实验环境",{"type":18,"tag":26,"props":167,"children":168},{},[169],{"type":24,"value":170},"在动手进行实践之前，需要注意以下几点：",{"type":18,"tag":26,"props":172,"children":173},{},[174],{"type":24,"value":175},"确保实验环境正确安装，包括安装MindSpore。安装过程：首先登录MindSpore官网安装页面，根据安装指南下载安装包及查询相关文档。同时，官网环境安装也可以按下表说明找到对应环境搭建文档链接，根据环境搭建手册配置对应的实验环境。",{"type":18,"tag":38,"props":177,"children":178},{},[179,184,189],{"type":18,"tag":42,"props":180,"children":181},{},[182],{"type":24,"value":183},"推荐使用交互式的计算环境Jupyter Notebook，其交互性强，易于可视化，适合频繁修改的数据分析实验环境。",{"type":18,"tag":42,"props":185,"children":186},{},[187],{"type":24,"value":188},"实验也可以在华为云一站式的AI开发平台ModelArts上完成。",{"type":18,"tag":42,"props":190,"children":191},{},[192],{"type":24,"value":193},"推荐实验环境：MindSpore版本=MindSpore 2.0；Python环境=3.7。",{"type":18,"tag":31,"props":195,"children":197},{"id":196},"_4数据处理",[198],{"type":24,"value":199},"4、数据处理",{"type":18,"tag":201,"props":202,"children":204},"h3",{"id":203},"_41数据准备",[205],{"type":24,"value":206},"4.1数据准备",{"type":18,"tag":26,"props":208,"children":209},{},[210],{"type":24,"value":211},"PASCAL VOC挑战赛 （The PASCAL Visual Object Classes ）是一个世界级的计算机视觉挑战赛，在2005年-2012年举办。 PASCAL VOC挑战赛主要包括以下几类：",{"type":18,"tag":38,"props":213,"children":214},{},[215,220,225,230],{"type":18,"tag":42,"props":216,"children":217},{},[218],{"type":24,"value":219},"图像分类(Object Classification)",{"type":18,"tag":42,"props":221,"children":222},{},[223],{"type":24,"value":224},"目标检测(Object Detection)",{"type":18,"tag":42,"props":226,"children":227},{},[228],{"type":24,"value":229},"目标分割(Object Segmentation)",{"type":18,"tag":42,"props":231,"children":232},{},[233],{"type":24,"value":234},"行为识别(Action Classification) 等。",{"type":18,"tag":26,"props":236,"children":237},{},[238],{"type":24,"value":239},"voc数据集是该项比赛公布使用的数据集，在2012版本中，共拥有17000张图片及相对应的标注，供参赛者使用以完成相关的挑战。 在Pascal VOC数据集中主要包含20个目标类别，下图展示了所有类别的名称以及所属超类。",{"type":18,"tag":26,"props":241,"children":242},{},[243],{"type":24,"value":244},"本次实验采用voc2012数据集切割后的 miniVOC数据集，共有58张图片及相应的标注。",{"type":18,"tag":26,"props":246,"children":247},{},[248],{"type":24,"value":249},"数据格式如下：",{"type":18,"tag":38,"props":251,"children":252},{},[253,258,263,268,273,278,283,288,293],{"type":18,"tag":42,"props":254,"children":255},{},[256],{"type":24,"value":257},"/seg2",{"type":18,"tag":42,"props":259,"children":260},{},[261],{"type":24,"value":262},"├── JPEG",{"type":18,"tag":42,"props":264,"children":265},{},[266],{"type":24,"value":267}," ├──xxxxxxx.jpg",{"type":18,"tag":42,"props":269,"children":270},{},[271],{"type":24,"value":272}," └──xxxxxxx2.jpg",{"type":18,"tag":42,"props":274,"children":275},{},[276],{"type":24,"value":277},"├──MASK1",{"type":18,"tag":42,"props":279,"children":280},{},[281],{"type":24,"value":282}," ├──xxxxxx.png",{"type":18,"tag":42,"props":284,"children":285},{},[286],{"type":24,"value":287}," └──xxxxxx2.jpg",{"type":18,"tag":42,"props":289,"children":290},{},[291],{"type":24,"value":292},"├──train.txt",{"type":18,"tag":42,"props":294,"children":295},{},[296],{"type":24,"value":297},"└──val.txt",{"type":18,"tag":201,"props":299,"children":301},{"id":300},"_42数据加载",[302],{"type":24,"value":303},"4.2数据加载",{"type":18,"tag":26,"props":305,"children":306},{},[307],{"type":24,"value":308},"使用预先处理好的voc格式数据集，将其保存在项目的根目录下。",{"type":18,"tag":201,"props":310,"children":312},{"id":311},"_43-导入python库",[313],{"type":24,"value":314},"4.3 导入python库",{"type":18,"tag":26,"props":316,"children":317},{},[318],{"type":24,"value":319},"在代码最开始集中导入整个实验中所需要使用的库和函数。",{"type":18,"tag":321,"props":322,"children":324},"pre",{"code":323},"# 导入依赖库\n# os库\nimport os\n# 引入numpy\nimport numpy as np\n# 引入读写不同数据文件格式的函数\nimport scipy.io\n# 引入数据序列化和反序列化\nimport pickle\n# 引入操作图像方法\nfrom PIL import Image\n# 引入高级的文件,文件夹,压缩包处理模块\nimport shutil\n# 引入计算机视觉库\nimport cv2\n# 引入归一化提供训练测试所用的数据集\nfrom mindspore.mindrecord import FileWriter\n# 引入数据读取\nimport mindspore.dataset as de\n# 引入MindSpore\nimport mindspore as ms\n# 引入神经网络模块\nimport mindspore.nn as nn\n#导入mindspore中的ops模块\nimport mindspore.ops  as P\n# 引入张量模块\nfrom mindspore import Tensor\nfrom mindspore import load_checkpoint\nfrom mindspore import load_param_into_net\nfrom mindspore import dtype as mstype\n# 引入python解释器和它的环境有关的函数\nimport sys\n# 将字典转为easydict\nfrom easydict import EasyDict as edict\n# 引入模型训练或推理的高阶接口。\n# 引入用于构建Callback函数基类。\nfrom mindspore.train import ModelCheckpoint, CheckpointConfig,LossMonitor, TimeMonitor,Model\n# 引入集合通信接口\nfrom mindspore.communication import init, get_rank, get_group_size\n# 引入用于构建Callback函数的基类。\nfrom mindspore import amp\nfrom mindspore import set_seed\nimport PIL\n# 引入绘图模块\nimport matplotlib.pyplot as plt\n# 引入可视化库\nimport matplotlib as mpl\n# 引入可视化库色彩模块\nimport matplotlib.colors as colors\n# import moxing as mox\n",[325],{"type":18,"tag":326,"props":327,"children":328},"code",{"__ignoreMap":7},[329],{"type":24,"value":323},{"type":18,"tag":201,"props":331,"children":333},{"id":332},"_44-数据预处理",[334],{"type":24,"value":335},"4.4 数据预处理",{"type":18,"tag":26,"props":337,"children":338},{},[339],{"type":24,"value":340},"构建一个数据集分割类，通过该类生成Mindrecord文件，实现预处理数据集等操作。",{"type":18,"tag":321,"props":342,"children":344},{"code":343},"# 设置Opencv的线程数量为0\ncv2.setNumThreads(0)\n# 数据集对象，用于载入语义分割数据集\nclass SegDataset:\n    def __init__(self,\n                 image_mean,# 图像像素值平均值\n                 image_std,# 图像像素值标准差\n                 data_file='',# 数据集文件路径\n                 batch_size=32,# 单次训练所使用样本的数量\n                 crop_size=512,# 随机裁剪后的图片大小\n                 max_scale=2.0,# 最大缩放比例\n                 min_scale=0.5,# 最小缩放比例\n                 ignore_label=255,# 忽略标签值\n                 num_classes=21,# 图像和标签中的类别数量\n                 num_readers=2,# 读取数据的IO线程数量\n                 num_parallel_calls=4,# 数据集batch的并行度\n                 shard_id=None, # 数据集分片ID，None表示无分片\n                 shard_num=None# 数据集分片数量，None表示无分片\n                               ):\n        # 定义数据集文件路径\n        self.data_file = data_file\n        # 定义单次训练所使用样本的数量\n        self.batch_size = batch_size\n        # 定义随机裁剪后的图片大小\n        self.crop_size = crop_size\n        # 定义图像像素值平均值\n        self.image_mean = np.array(image_mean, dtype=np.float32)\n        # 定义图像像素值标准差\n        self.image_std = np.array(image_std, dtype=np.float32)\n        # 定义最大缩放比例\n        self.max_scale = max_scale\n        # 定义最小缩放比例\n        self.min_scale = min_scale\n        # 定义忽略标签值\n        self.ignore_label = ignore_label\n        # 定义图像和标签中的类别数量\n        self.num_classes = num_classes\n        # 定义读取数据的IO线程数量\n        self.num_readers = num_readers\n        # 定义数据集batch的并行度\n        self.num_parallel_calls = num_parallel_calls\n        # 定义数据集分片ID\n        self.shard_id = shard_id\n        # 定义数据集分片数量\n        self.shard_num = shard_num\n        # VOC数据集原始图片文件夹路径\n        self.voc_img_dir = os.path.join(self.data_file,'JPEG')\n        # VOC数据集语义标注图片文件夹路径\n        self.voc_anno_dir = os.path.join(self.data_file,'MASK1')\n        # VOC数据集训练集文件列表路径\n        self.voc_train_lst = os.path.join(self.data_file,'train.txt')\n        # VOC数据集验证集文件列表路径\n        self.voc_val_lst = os.path.join(self.data_file,'val.txt')\n        #  VOC数据集使用的灰度标注图片文件夹路径\n        self.voc_anno_gray_dir = os.path.join(self.data_file,'SegmentationClassGray')\n        # 生成的MindRecord文件保存路径\n        self.mindrecord_save =  os.path.join(self.data_file,'VOC_mindrecord')\n        # 最大缩放比例必须大于最小缩放比例                               \n        assert max_scale > min_scale\n    #数据预处理，包括图像的解码，尺度缩放，随机裁剪等操作\n    def preprocess_(self, image, label):\n        #bgr图像解码\n        image_out = cv2.imdecode(np.frombuffer(image, dtype=np.uint8), cv2.IMREAD_COLOR)\n        #灰度图像解码\n        label_out = cv2.imdecode(np.frombuffer(label, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)\n        #尺度缩放\n        sc = np.random.uniform(self.min_scale, self.max_scale)\n        new_h, new_w = int(sc * image_out.shape[0]), int(sc * image_out.shape[1])\n        image_out = cv2.resize(image_out, (new_w, new_h), interpolation=cv2.INTER_CUBIC)\n        label_out = cv2.resize(label_out, (new_w, new_h), interpolation=cv2.INTER_NEAREST)\n        #图像标准化\n        image_out = (image_out - self.image_mean) / self.image_std\n        #随机裁剪\n        h_, w_ = max(new_h, self.crop_size), max(new_w, self.crop_size)\n        pad_h, pad_w = h_ - new_h, w_ - new_w\n        if pad_h > 0 or pad_w > 0:\n            image_out = cv2.copyMakeBorder(image_out, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)\n            label_out = cv2.copyMakeBorder(label_out, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=self.ignore_label)\n        offset_h = np.random.randint(0, h_ - self.crop_size + 1)\n        offset_w = np.random.randint(0, w_ - self.crop_size + 1)\n        image_out = image_out[offset_h: offset_h + self.crop_size, offset_w: offset_w + self.crop_size, :]\n        label_out = label_out[offset_h: offset_h + self.crop_size, offset_w: offset_w+self.crop_size]\n        #随机水平翻转\n        if np.random.uniform(0.0, 1.0) > 0.5:\n            image_out = image_out[:, ::-1, :]\n            label_out = label_out[:, ::-1]\n        #图像转置以符合模型输入要求\n        image_out = image_out.transpose((2, 0, 1))\n        image_out = image_out.copy()\n        label_out = label_out.copy()\n        return image_out, label_out\n\n    #得到灰度数据集的方法，若文件已存在，则直接返回\n    def get_gray_dataset(self):\n        if os.path.exists(self.voc_anno_gray_dir):\n            print('the gray file is already exists！')\n            return\n        os.makedirs(self.voc_anno_gray_dir)\n\n        #转换彩色图像为灰度图像，并保存到指定路径\n        print('converting voc color png to gray png ...')\n        for ann in os.listdir(self.voc_anno_dir):\n            ann_im = Image.open(os.path.join(self.voc_anno_dir, ann))\n            #将该图像转换为灰度图像\n            ann_im = Image.fromarray(np.array(ann_im))\n            ann_im.save(os.path.join(self.voc_anno_gray_dir, ann))\n        print('converting done')\n    #获取MindRecord格式的数据集，num_shards为生成MindRecord的分片数，shuffle为是否对数据做洗牌处理    \n    def get_mindrecord_dataset(self, is_training,num_shards=1, shuffle=True):\n        datas = []\n        if is_training:\n            data_lst = self.voc_train_lst\n            self.mindrecord_save = os.path.join(self.mindrecord_save,'train')\n        else:\n            data_lst = self.voc_val_lst\n            self.mindrecord_save = os.path.join(self.mindrecord_save,'eval')\n        \n        if os.path.exists(self.mindrecord_save):\n            #shutil.rmtree(self.mindrecord_save)\n            print('mindrecord file is already exists！')\n            self.mindrecord_save = os.path.join(self.mindrecord_save,'VOC_mindrecord')\n            return\n        \n        with open(data_lst) as f:\n            lines = f.readlines()\n        if shuffle:\n            np.random.shuffle(lines)\n            \n        print('creating mindrecord dataset...')\n        os.makedirs(self.mindrecord_save)\n        self.mindrecord_save = os.path.join(self.mindrecord_save,'VOC_mindrecord')\n        print('number of samples:', len(lines))\n        #定义MindRecord的schema\n        seg_schema = {\"file_name\": {\"type\": \"string\"}, \"label\": {\"type\": \"bytes\"}, \"data\": {\"type\": \"bytes\"}}\n        writer = FileWriter(file_name=self.mindrecord_save, shard_num=num_shards)\n        writer.add_schema(seg_schema, \"seg_schema\")\n        #将schema写入MindRecord\n        cnt = 0\n        for l in lines:\n            id_ = l.strip()\n            img_path = os.path.join(self.voc_img_dir, id_ + '.jpg')\n            label_path = os.path.join(self.voc_anno_gray_dir, id_ + '.png')\n            \n            sample_ = {\"file_name\": img_path.split('/')[-1]}\n            with open(img_path, 'rb') as f:\n                sample_['data'] = f.read()\n            with open(label_path, 'rb') as f:\n                sample_['label'] = f.read()\n            datas.append(sample_)\n            cnt += 1\n            if cnt % 1000 == 0:\n                writer.write_raw_data(datas)\n                print('number of samples written:', cnt)\n                datas = []\n\n        if datas:\n            writer.write_raw_data(datas)\n        writer.commit()\n        print('number of samples written:', cnt)\n        print('Create Mindrecord Done')\n    #生成文件    \n    def get_dataset(self, repeat=1):\n        data_set = de.MindDataset(dataset_files=self.mindrecord_save, columns_list=[\"data\", \"label\"],\n                                  shuffle=True, num_parallel_workers=self.num_readers,\n                                  num_shards=self.shard_num, shard_id=self.shard_id)\n        transforms_list = self.preprocess_\n        data_set = data_set.map(operations=transforms_list, input_columns=[\"data\", \"label\"],\n                                output_columns=[\"data\", \"label\"],\n                                num_parallel_workers=self.num_parallel_calls)\n        data_set = data_set.shuffle(buffer_size=self.batch_size * 10)\n        data_set = data_set.batch(self.batch_size, drop_remainder=True)\n        data_set = data_set.repeat(repeat)\n        return data_set\n",[345],{"type":18,"tag":326,"props":346,"children":347},{"__ignoreMap":7},[348],{"type":24,"value":343},{"type":18,"tag":31,"props":350,"children":352},{"id":351},"_5-实验过程",[353],{"type":24,"value":354},"5. 实验过程",{"type":18,"tag":201,"props":356,"children":358},{"id":357},"_51-模型构建",[359],{"type":24,"value":360},"5.1 模型构建",{"type":18,"tag":38,"props":362,"children":363},{},[364,369,374],{"type":18,"tag":42,"props":365,"children":366},{},[367],{"type":24,"value":368},"利用MindSpore框架提供的相关函数完成Deeplabv3主体网络的构建，",{"type":18,"tag":42,"props":370,"children":371},{},[372],{"type":24,"value":373},"通过定义多个resnet cell结构组成整体的Deeplabv3网络。",{"type":18,"tag":42,"props":375,"children":376},{},[377],{"type":24,"value":378},"最终的网络是以类的方式进行定义，通过实例化即可创建对应的网络对象。",{"type":18,"tag":321,"props":380,"children":382},{"code":381}," #定义1x1卷积层\n  def conv1x1(in_planes, out_planes, stride=1):\n      return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, weight_init='xavier_uniform')\n  \n  # 定义3x3卷积层\n  def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):\n      return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, pad_mode='pad', padding=padding,\n                       dilation=dilation, weight_init='xavier_uniform')\n  \n  #定义Resnet主体网络\n  class Resnet(nn.Cell):\n      def __init__(self, block, block_num, output_stride, use_batch_statistics=True):\n          super(Resnet, self).__init__()\n          self.inplanes = 64# 输入通道数\n          # 第一层卷积层\n          self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, pad_mode='pad', padding=3,\n                                 weight_init='xavier_uniform')\n          self.bn1 = nn.BatchNorm2d(self.inplanes, use_batch_statistics=use_batch_statistics)\n          self.relu = nn.ReLU()\n          self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')\n          # 第一层残差块\n          self.layer1 = self._make_layer(block, 64, block_num[0], use_batch_statistics=use_batch_statistics)\n          # 第二层残差块\n          self.layer2 = self._make_layer(block, 128, block_num[1], stride=2, use_batch_statistics=use_batch_statistics)\n          # 根据输出步长选择第三、第四层残差块\n          if output_stride == 16:\n              self.layer3 = self._make_layer(block, 256, block_num[2], stride=2,\n                                             use_batch_statistics=use_batch_statistics)\n              self.layer4 = self._make_layer(block, 512, block_num[3], stride=1, base_dilation=2, grids=[1, 2, 4],\n                                             use_batch_statistics=use_batch_statistics)\n          elif output_stride == 8:\n              self.layer3 = self._make_layer(block, 256, block_num[2], stride=1, base_dilation=2,\n                                             use_batch_statistics=use_batch_statistics)\n              self.layer4 = self._make_layer(block, 512, block_num[3], stride=1, base_dilation=4, grids=[1, 2, 4],\n                                             use_batch_statistics=use_batch_statistics)\n      # 构建残差块\n      def _make_layer(self, block, planes, blocks, stride=1, base_dilation=1, grids=None, use_batch_statistics=True):\n          if stride != 1 or self.inplanes != planes * block.expansion:\n              downsample = nn.SequentialCell([\n                  conv1x1(self.inplanes, planes * block.expansion, stride),\n                  nn.BatchNorm2d(planes * block.expansion, use_batch_statistics=use_batch_statistics)\n              ])\n  \n          if grids is None:\n              grids = [1] * blocks\n  \n          layers = [\n              block(self.inplanes, planes, stride, downsample, dilation=base_dilation * grids[0],\n                    use_batch_statistics=use_batch_statistics)\n          ]\n          self.inplanes = planes * block.expansion\n          for i in range(1, blocks):\n              layers.append(\n                  block(self.inplanes, planes, dilation=base_dilation * grids[i],\n                        use_batch_statistics=use_batch_statistics))\n  \n          return nn.SequentialCell(layers)\n      # 前向推理\n      def construct(self, x):\n          out = self.conv1(x)# 第一层卷积\n          out = self.bn1(out)\n          out = self.relu(out)\n          out = self.maxpool(out)# 第一层池化\n          out = self.layer1(out)  # 第一层残差块\n          out = self.layer2(out)  # 第二层残差块\n          out = self.layer3(out)  # 第三层残差块\n          out = self.layer4(out)  # 第四层残差块\n  \n          return out\n  \n  #构建Bottleneck，用于ResNeXt中构建残差块\n  class Bottleneck(nn.Cell):\n      # 扩充率为4\n      expansion = 4\n  \n      def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_batch_statistics=True):\n          super(Bottleneck, self).__init__()\n          # 第一个1x1卷积层\n          self.conv1 = conv1x1(inplanes, planes)\n          # 第一个BatchNorm层\n          self.bn1 = nn.BatchNorm2d(planes, use_batch_statistics=use_batch_statistics)\n          # 第二个3x3卷积层\n          self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)\n          # 第二个BatchNorm层\n          self.bn2 = nn.BatchNorm2d(planes, use_batch_statistics=use_batch_statistics)\n          # 第三个1x1卷积层，维度扩充\n          self.conv3 = conv1x1(planes, planes * self.expansion)\n          # 第三个BatchNorm层\n          self.bn3 = nn.BatchNorm2d(planes * self.expansion, use_batch_statistics=use_batch_statistics)\n          # Relu激活函数\n          self.relu = nn.ReLU()\n          # 下采样层，使维度匹配\n          self.downsample = downsample\n          # 张量相加操作\n          self.add = P.Add()\n  \n      def construct(self, x):\n          identity = x\n  \n          out = self.conv1(x)\n          out = self.bn1(out)\n          out = self.relu(out)\n  \n          out = self.conv2(out)\n          out = self.bn2(out)\n          out = self.relu(out)\n  \n          out = self.conv3(out)\n          out = self.bn3(out)\n  \n          if self.downsample is not None:\n              identity = self.downsample(x)\n          # 将identity和out相加\n          out = self.add(out, identity)\n          out = self.relu(out)\n          return out\n  \n  #构建ASPP，用于DeepLabV3+中的ASPP模块\n  class ASPP(nn.Cell):\n      def __init__(self, atrous_rates, phase='train', in_channels=2048, num_classes=21,\n                   use_batch_statistics=True):\n          super(ASPP, self).__init__()\n          # 训练或者测试阶段\n          self.phase = phase\n          # 输出通道数\n          out_channels = 256\n          # ASPP卷积层1\n          self.aspp1 = ASPPConv(in_channels, out_channels, atrous_rates[0], use_batch_statistics=use_batch_statistics)\n           # ASPP卷积层2\n          self.aspp2 = ASPPConv(in_channels, out_channels, atrous_rates[1], use_batch_statistics=use_batch_statistics)\n           # ASPP卷积层3\n          self.aspp3 = ASPPConv(in_channels, out_channels, atrous_rates[2], use_batch_statistics=use_batch_statistics)\n           # ASPP卷积层4\n          self.aspp4 = ASPPConv(in_channels, out_channels, atrous_rates[3], use_batch_statistics=use_batch_statistics)\n          # ASPP池化层\n          self.aspp_pooling = ASPPPooling(in_channels, out_channels)\n          # 输出通道数为(out_channels * (len(atrous_rates) + 1))\n          self.conv1 = nn.Conv2d(out_channels * (len(atrous_rates) + 1), out_channels, kernel_size=1,\n                                 weight_init='xavier_uniform')\n          # BatchNorm层\n          self.bn1 = nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics)\n          # Relu激活函数\n          self.relu = nn.ReLU()\n          # 输出通道数为num_classes\n          self.conv2 = nn.Conv2d(out_channels, num_classes, kernel_size=1, weight_init='xavier_uniform', has_bias=True)\n          # 沿着通道维度拼接\n          self.concat = P.Concat(axis=1)\n          # 随机失活，防止过拟合\n          self.drop = nn.Dropout(p=0.3)\n  \n      def construct(self, x):\n          x1 = self.aspp1(x)\n          x2 = self.aspp2(x)\n          x3 = self.aspp3(x)\n          x4 = self.aspp4(x)\n          x5 = self.aspp_pooling(x)\n  \n          x = self.concat((x1, x2))\n          x = self.concat((x, x3))\n          x = self.concat((x, x4))\n          x = self.concat((x, x5))\n  \n          x = self.conv1(x)\n          x = self.bn1(x)\n          x = self.relu(x)\n          # 如果是训练阶段，则进行随机失活\n          if self.phase == 'train':\n              x = self.drop(x)\n          x = self.conv2(x)\n          return x\n  \n  ##定义ASPPPooling类，用于定义ASPP池操作\n  class ASPPPooling(nn.Cell):\n      #定义参数\n      def __init__(self, in_channels, out_channels, use_batch_statistics=True):\n          super(ASPPPooling, self).__init__()\n          #定义卷积层操作\n          self.conv = nn.SequentialCell([\n              #1x1卷积操作\n              nn.Conv2d(in_channels, out_channels, kernel_size=1, weight_init='xavier_uniform'),\n              #批量归一化操作\n              nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics),\n              #激活函数操作\n              nn.ReLU()\n          ])\n          #定义shape操作\n          self.shape = P.Shape()\n  \n      def construct(self, x):\n          #获取输入x的大小\n          size = self.shape(x)\n          #先对输入x进行全局平均池化操作\n          out = nn.AvgPool2d(size[2])(x)\n          #再进行卷积、批量归一化、激活操作\n          out = self.conv(out)\n          #大小变化为输入x的大小\n          out = P.ResizeNearestNeighbor((size[2], size[3]), True)(out)\n          return out\n  \n  #定义ASPPConv类，用于定义ASPP卷积操作\n  class ASPPConv(nn.Cell):\n      #定义参数\n      def __init__(self, in_channels, out_channels, atrous_rate=1, use_batch_statistics=True):\n          super(ASPPConv, self).__init__()\n          #根据不同的空洞卷积率定义不同的卷积操作\n          if atrous_rate == 1:\n              conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, has_bias=False, weight_init='xavier_uniform')\n          else:\n              conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, pad_mode='pad', padding=atrous_rate,\n                               dilation=atrous_rate, weight_init='xavier_uniform')\n          #批量归一化操作\n          bn = nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics)\n          #激活函数操作\n          relu = nn.ReLU()\n          #定义卷积层操作\n          self.aspp_conv = nn.SequentialCell([conv, bn, relu])\n  \n      def construct(self, x):\n          #进行卷积、批量归一化、激活操作\n          out = self.aspp_conv(x)\n          return out\n  \n  #定义DeepLabV3类，用于定义整个DeepLabV3网络\n  class DeepLabV3(nn.Cell):\n      def __init__(self, phase='train', num_classes=21, output_stride=16, freeze_bn=False):\n          super(DeepLabV3, self).__init__()\n          #根据输入的参数freeze_bn来判断是否使用批量归一化操作\n          use_batch_statistics = not freeze_bn\n          #调用Resnet类来构建ResNet网络\n          self.resnet = Resnet(Bottleneck, [3, 4, 23, 3], output_stride=output_stride,\n                               use_batch_statistics=use_batch_statistics)\n          #调用ASPP类来构建ASPP网络\n          self.aspp = ASPP([1, 6, 12, 18], phase, 2048, num_classes,\n                           use_batch_statistics=use_batch_statistics)\n          #定义shape操作\n          self.shape = P.Shape()\n  \n      def construct(self, x):\n          #获取输入x的大小\n          size = self.shape(x)\n          #将输入x输入到ResNet网络中，得到输出\n          out = self.resnet(x)\n          #将ResNet的输出输入到ASPP网络中，得到ASPP的输出\n          out = self.aspp(out)\n          #将ASPP的输出进行大小变换，变成与输入x相同的大小\n          out = P.ResizeBilinear((size[2], size[3]), True)(out)\n          return out\n",[383],{"type":18,"tag":326,"props":384,"children":385},{"__ignoreMap":7},[386],{"type":24,"value":381},{"type":18,"tag":26,"props":388,"children":389},{},[390],{"type":24,"value":391},"定义不同的学习率函数。",{"type":18,"tag":321,"props":393,"children":395},{"code":394},"#定义不同的学习率\n#生成cosine学习率下降序列\ndef cosine_lr(base_lr, decay_steps, total_steps):\n    for i in range(int(total_steps)):\n        step_ = min(i, decay_steps)\n        yield base_lr * 0.5 * (1 + np.cos(np.pi * step_ / decay_steps))\n\n#生成polynomial学习率下降序列\ndef poly_lr(base_lr, decay_steps, total_steps, end_lr=0.0001, power=0.9):\n    for i in range(int(total_steps)):\n        step_ = min(i, decay_steps)\n        yield (base_lr - end_lr) * ((1.0 - step_ / decay_steps) ** power) + end_lr\n\n#生成exponential学习率下降序列\ndef exponential_lr(base_lr, decay_steps, decay_rate, total_steps, staircase=False):\n    for i in range(total_steps):\n        if staircase:\n            power_ = i // decay_steps\n        else:\n            power_ = float(i) / decay_steps\n        yield base_lr * (decay_rate ** power_)\n",[396],{"type":18,"tag":326,"props":397,"children":398},{"__ignoreMap":7},[399],{"type":24,"value":394},{"type":18,"tag":26,"props":401,"children":402},{},[403],{"type":24,"value":404},"定义损失函数。",{"type":18,"tag":321,"props":406,"children":408},{"code":407},"#定义损失函数\nclass SoftmaxCrossEntropyLoss(nn.Cell):\n    def __init__(self, num_cls=21, ignore_label=255):\n        super(SoftmaxCrossEntropyLoss, self).__init__()\n        #one-hot编码相关操作\n        self.one_hot = P.OneHot(axis=-1)\n        self.on_value = Tensor(1.0, mstype.float32)\n        self.off_value = Tensor(0.0, mstype.float32)\n        #类型转换相关操作\n        self.cast = P.Cast()\n        #softmax交叉熵损失函数及相关操作\n        self.ce = nn.SoftmaxCrossEntropyWithLogits()\n        #类别数及忽略标签\n        self.not_equal = P.NotEqual()\n        self.num_cls = num_cls\n        self.ignore_label = ignore_label\n        #矩阵乘法及求和相关操作\n        self.mul = P.Mul()\n        self.sum = P.ReduceSum(False)\n        self.div = P.RealDiv()\n        #转置及形状变换相关操作\n        self.transpose = P.Transpose()\n        self.reshape = P.Reshape()\n    #将标签转换为整形\n    def construct(self, logits, labels):\n        #将标签拉成一维并转换形状\n        labels_int = self.cast(labels, mstype.int32)\n        labels_int = self.reshape(labels_int, (-1,))\n         #将logits转置并转换形状\n        logits_ = self.transpose(logits, (0, 2, 3, 1))\n        logits_ = self.reshape(logits_, (-1, self.num_cls))\n         #生成权重\n        weights = self.not_equal(labels_int, self.ignore_label)\n        weights = self.cast(weights, mstype.float32)\n        #生成one-hot标签\n        one_hot_labels = self.one_hot(labels_int, self.num_cls, self.on_value, self.off_value)\n        #计算softmax交叉熵损失\n        loss = self.ce(logits_, one_hot_labels)\n         #加权\n        loss = self.mul(weights, loss)\n        #求平均损失\n        loss = self.div(self.sum(loss), self.sum(weights))\n        return loss\n",[409],{"type":18,"tag":326,"props":410,"children":411},{"__ignoreMap":7},[412],{"type":24,"value":407},{"type":18,"tag":31,"props":414,"children":416},{"id":415},"_6模型训练",[417],{"type":24,"value":418},"6、模型训练",{"type":18,"tag":201,"props":420,"children":422},{"id":421},"_61-构建训练函数",[423],{"type":24,"value":424},"6.1 构建训练函数",{"type":18,"tag":26,"props":426,"children":427},{},[428],{"type":24,"value":429},"利用MindSpore中相关的函数加载预训练的模型，并构建训练网络的类。",{"type":18,"tag":321,"props":431,"children":433},{"code":432},"# 设置随机种子\nset_seed(1)\n# 设置上下文\nms.set_context(mode=ms.GRAPH_MODE, save_graphs=False,\n                    device_target=\"CPU\")\n# 建立训练网络\nclass BuildTrainNetwork(nn.Cell):\n    def __init__(self, network, criterion):\n        super(BuildTrainNetwork, self).__init__()\n        self.network = network\n        self.criterion = criterion\n\n    def construct(self, input_data, label):\n        output = self.network(input_data)\n        net_loss = self.criterion(output, label)\n        return net_loss\n# 训练函数\ndef train(args):\n    # 如果使用分布式训练，则初始化\n    if args.is_distributed:\n        init()\n        args.rank = get_rank()\n        args.group_size = get_group_size()\n\n        parallel_mode = ParallelMode.DATA_PARALLEL\n        ms.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=args.group_size)\n    \n    \n    # 构建数据集\n    dataset = SegDataset(image_mean=args.image_mean,\n                                        image_std=args.image_std,\n                                        data_file=args.data_file,\n                                        batch_size=args.batch_size,\n                                        crop_size=args.crop_size,\n                                        max_scale=args.max_scale,\n                                        min_scale=args.min_scale,\n                                        ignore_label=args.ignore_label,\n                                        num_classes=args.num_classes,\n                                        num_readers=2,\n                                        num_parallel_calls=4,\n                                        shard_id=args.rank,\n                                        shard_num=args.group_size)\n    dataset.get_gray_dataset()\n    dataset.get_mindrecord_dataset(is_training=True)\n    dataset = dataset.get_dataset(repeat=1)\n    \n\n    # 构建相关网络\n    if args.model == 'deeplab_v3_s16':\n        network = DeepLabV3('train', args.num_classes, 16, args.freeze_bn)\n    elif args.model == 'deeplab_v3_s8':\n        network = DeepLabV3('train', args.num_classes, 8, args.freeze_bn)\n    else:\n        raise NotImplementedError('model [{:s}] not recognized'.format(args.model))\n\n    # 构建损失函数\n    loss_ = SoftmaxCrossEntropyLoss(args.num_classes, args.ignore_label)\n    loss_.add_flags_recursive(fp32=True)\n    train_net = BuildTrainNetwork(network, loss_)\n\n    # l加载预训练模型\n    param_dict = ms.load_checkpoint(args.ckpt_file)\n    ms.load_param_into_net(train_net, param_dict)\n\n    # 优化器\n    iters_per_epoch = dataset.get_dataset_size()\n    total_train_steps = iters_per_epoch * args.train_epochs\n    if args.lr_type == 'cos':\n        lr_iter = cosine_lr(args.base_lr, total_train_steps, total_train_steps)\n    elif args.lr_type == 'poly':\n        lr_iter = poly_lr(args.base_lr, total_train_steps, total_train_steps, end_lr=0.0, power=0.9)\n    elif args.lr_type == 'exp':\n        lr_iter = exponential_lr(args.base_lr, args.lr_decay_step, args.lr_decay_rate,\n                                                total_train_steps, staircase=True)\n    else:\n        raise ValueError('unknown learning rate type')\n    opt = nn.Momentum(params=train_net.trainable_params(), learning_rate=lr_iter, momentum=0.9, weight_decay=0.0001,\n                      loss_scale=args.loss_scale)\n\n    # 损失梯度缩放\n    manager_loss_scale = amp.FixedLossScaleManager(args.loss_scale, drop_overflow_update=False)\n    model = Model(train_net, optimizer=opt, amp_level=\"O3\", loss_scale_manager=manager_loss_scale)\n\n    # 回调函数，用于保存 ckpts\n    time_cb = TimeMonitor(data_size=iters_per_epoch)\n    loss_cb = LossMonitor()\n    cbs = [time_cb, loss_cb]\n\n    if args.rank == 0:\n        config_ck = CheckpointConfig(save_checkpoint_steps=iters_per_epoch,\n                                     keep_checkpoint_max=args.keep_checkpoint_max)\n        ckpoint_cb = ModelCheckpoint(prefix=args.model, directory=args.train_dir, config=config_ck)\n        cbs.append(ckpoint_cb)\n    # 开始训练\n    model.train(args.train_epochs, dataset, callbacks=cbs,dataset_sink_mode=True)\n",[434],{"type":18,"tag":326,"props":435,"children":436},{"__ignoreMap":7},[437],{"type":24,"value":432},{"type":18,"tag":26,"props":439,"children":440},{},[441],{"type":18,"tag":442,"props":443,"children":447},"a",{"href":444,"rel":445},"https://www.hiascend.com/forum/interactivity?targetUser=9d1265aa60fe4434aaed595a831dba5b",[446],"nofollow",[448],{"type":24,"value":449},"私信",{"type":18,"tag":19,"props":451,"children":453},{"id":452},"基于mindspore框架的深度学习模型在cv方向的应用-5-基于deeplabv3实现语义分割案例-1",[454],{"type":24,"value":8},{"type":18,"tag":201,"props":456,"children":458},{"id":457},"mindspore",[459],{"type":24,"value":460},"MindSpore",{"type":18,"tag":201,"props":462,"children":464},{"id":463},"ai",[465],{"type":24,"value":466},"AI",{"type":18,"tag":201,"props":468,"children":470},{"id":469},"模型训练",[471],{"type":24,"value":469},{"type":18,"tag":26,"props":473,"children":474},{},[475],{"type":24,"value":476},"发表于 2024-06-06 11:41:0914查看",{"type":18,"tag":26,"props":478,"children":479},{},[480],{"type":24,"value":9},{"type":18,"tag":31,"props":482,"children":484},{"id":483},"_1实验目的-1",[485],{"type":24,"value":36},{"type":18,"tag":38,"props":487,"children":488},{},[489,493,497],{"type":18,"tag":42,"props":490,"children":491},{},[492],{"type":24,"value":46},{"type":18,"tag":42,"props":494,"children":495},{},[496],{"type":24,"value":51},{"type":18,"tag":42,"props":498,"children":499},{},[500],{"type":24,"value":56},{"type":18,"tag":31,"props":502,"children":504},{"id":503},"_2语义分割算法原理介绍-1",[505],{"type":24,"value":62},{"type":18,"tag":26,"props":507,"children":508},{},[509],{"type":24,"value":67},{"type":18,"tag":26,"props":511,"children":512},{},[513],{"type":24,"value":72},{"type":18,"tag":38,"props":515,"children":516},{},[517],{"type":18,"tag":42,"props":518,"children":519},{},[520],{"type":24,"value":80},{"type":18,"tag":26,"props":522,"children":523},{},[524],{"type":24,"value":85},{"type":18,"tag":38,"props":526,"children":527},{},[528,532],{"type":18,"tag":42,"props":529,"children":530},{},[531],{"type":24,"value":93},{"type":18,"tag":42,"props":533,"children":534},{},[535],{"type":24,"value":98},{"type":18,"tag":26,"props":537,"children":538},{},[539],{"type":24,"value":103},{"type":18,"tag":26,"props":541,"children":542},{},[543],{"type":24,"value":108},{"type":18,"tag":26,"props":545,"children":546},{},[547],{"type":24,"value":113},{"type":18,"tag":26,"props":549,"children":550},{},[551],{"type":24,"value":118},{"type":18,"tag":38,"props":553,"children":554},{},[555],{"type":18,"tag":42,"props":556,"children":557},{},[558],{"type":24,"value":126},{"type":18,"tag":26,"props":560,"children":561},{},[562],{"type":24,"value":131},{"type":18,"tag":26,"props":564,"children":565},{},[566],{"type":24,"value":136},{"type":18,"tag":26,"props":568,"children":569},{},[570],{"type":24,"value":141},{"type":18,"tag":38,"props":572,"children":573},{},[574],{"type":18,"tag":42,"props":575,"children":576},{},[577],{"type":24,"value":149},{"type":18,"tag":26,"props":579,"children":580},{},[581],{"type":24,"value":154},{"type":18,"tag":26,"props":583,"children":584},{},[585],{"type":24,"value":159},{"type":18,"tag":31,"props":587,"children":589},{"id":588},"_3-实验环境-1",[590],{"type":24,"value":165},{"type":18,"tag":26,"props":592,"children":593},{},[594],{"type":24,"value":170},{"type":18,"tag":26,"props":596,"children":597},{},[598],{"type":24,"value":175},{"type":18,"tag":38,"props":600,"children":601},{},[602,606,610],{"type":18,"tag":42,"props":603,"children":604},{},[605],{"type":24,"value":183},{"type":18,"tag":42,"props":607,"children":608},{},[609],{"type":24,"value":188},{"type":18,"tag":42,"props":611,"children":612},{},[613],{"type":24,"value":193},{"type":18,"tag":31,"props":615,"children":617},{"id":616},"_4数据处理-1",[618],{"type":24,"value":199},{"type":18,"tag":201,"props":620,"children":622},{"id":621},"_41数据准备-1",[623],{"type":24,"value":206},{"type":18,"tag":26,"props":625,"children":626},{},[627],{"type":24,"value":211},{"type":18,"tag":38,"props":629,"children":630},{},[631,635,639,643],{"type":18,"tag":42,"props":632,"children":633},{},[634],{"type":24,"value":219},{"type":18,"tag":42,"props":636,"children":637},{},[638],{"type":24,"value":224},{"type":18,"tag":42,"props":640,"children":641},{},[642],{"type":24,"value":229},{"type":18,"tag":42,"props":644,"children":645},{},[646],{"type":24,"value":234},{"type":18,"tag":26,"props":648,"children":649},{},[650],{"type":24,"value":239},{"type":18,"tag":26,"props":652,"children":653},{},[654],{"type":24,"value":244},{"type":18,"tag":26,"props":656,"children":657},{},[658],{"type":24,"value":249},{"type":18,"tag":38,"props":660,"children":661},{},[662,666,670,674,678,682,686,690,694],{"type":18,"tag":42,"props":663,"children":664},{},[665],{"type":24,"value":257},{"type":18,"tag":42,"props":667,"children":668},{},[669],{"type":24,"value":262},{"type":18,"tag":42,"props":671,"children":672},{},[673],{"type":24,"value":267},{"type":18,"tag":42,"props":675,"children":676},{},[677],{"type":24,"value":272},{"type":18,"tag":42,"props":679,"children":680},{},[681],{"type":24,"value":277},{"type":18,"tag":42,"props":683,"children":684},{},[685],{"type":24,"value":282},{"type":18,"tag":42,"props":687,"children":688},{},[689],{"type":24,"value":287},{"type":18,"tag":42,"props":691,"children":692},{},[693],{"type":24,"value":292},{"type":18,"tag":42,"props":695,"children":696},{},[697],{"type":24,"value":297},{"type":18,"tag":201,"props":699,"children":701},{"id":700},"_42数据加载-1",[702],{"type":24,"value":303},{"type":18,"tag":26,"props":704,"children":705},{},[706],{"type":24,"value":308},{"type":18,"tag":201,"props":708,"children":710},{"id":709},"_43-导入python库-1",[711],{"type":24,"value":314},{"type":18,"tag":26,"props":713,"children":714},{},[715],{"type":24,"value":319},{"type":18,"tag":26,"props":717,"children":718},{},[719,725],{"type":18,"tag":326,"props":720,"children":722},{"className":721},[],[723],{"type":24,"value":724},"# 导入依赖库 # os库 import os # 引入numpy import numpy as np # 引入读写不同数据文件格式的函数 import scipy.io # 引入数据序列化和反序列化 import pickle # 引入操作图像方法 from PIL import Image # 引入高级的文件,文件夹,压缩包处理模块 import shutil # 引入计算机视觉库 import cv2 # 引入归一化提供训练测试所用的数据集 from mindspore.mindrecord import FileWriter # 引入数据读取 import mindspore.dataset as de # 引入MindSpore import mindspore as ms # 引入神经网络模块 import mindspore.nn as nn #导入mindspore中的ops模块 import mindspore.ops  as P # 引入张量模块 from mindspore import Tensor from mindspore import load_checkpoint from mindspore import load_param_into_net from mindspore import dtype as mstype # 引入python解释器和它的环境有关的函数 import sys # 将字典转为easydict from easydict import EasyDict as edict # 引入模型训练或推理的高阶接口。 # 引入用于构建Callback函数基类。 from mindspore.train import ModelCheckpoint, CheckpointConfig,LossMonitor, TimeMonitor,Model # 引入集合通信接口 from mindspore.communication import init, get_rank, get_group_size # 引入用于构建Callback函数的基类。 from mindspore import amp from mindspore import set_seed import PIL # 引入绘图模块 import matplotlib.pyplot as plt # 引入可视化库 import matplotlib as mpl # 引入可视化库色彩模块 import matplotlib.colors as colors # import moxing as mox",{"type":24,"value":726},"\n复制",{"type":18,"tag":201,"props":728,"children":730},{"id":729},"_44-数据预处理-1",[731],{"type":24,"value":335},{"type":18,"tag":26,"props":733,"children":734},{},[735],{"type":24,"value":340},{"type":18,"tag":26,"props":737,"children":738},{},[739,745],{"type":18,"tag":326,"props":740,"children":742},{"className":741},[],[743],{"type":24,"value":744},"# 设置Opencv的线程数量为0 cv2.setNumThreads(0) # 数据集对象，用于载入语义分割数据集 class SegDataset:     def __init__(self,                  image_mean,# 图像像素值平均值                  image_std,# 图像像素值标准差                  data_file='',# 数据集文件路径                  batch_size=32,# 单次训练所使用样本的数量                  crop_size=512,# 随机裁剪后的图片大小                  max_scale=2.0,# 最大缩放比例                  min_scale=0.5,# 最小缩放比例                  ignore_label=255,# 忽略标签值                  num_classes=21,# 图像和标签中的类别数量                  num_readers=2,# 读取数据的IO线程数量                  num_parallel_calls=4,# 数据集batch的并行度                  shard_id=None, # 数据集分片ID，None表示无分片                  shard_num=None# 数据集分片数量，None表示无分片                                ):         # 定义数据集文件路径         self.data_file = data_file         # 定义单次训练所使用样本的数量         self.batch_size = batch_size         # 定义随机裁剪后的图片大小         self.crop_size = crop_size         # 定义图像像素值平均值         self.image_mean = np.array(image_mean, dtype=np.float32)         # 定义图像像素值标准差         self.image_std = np.array(image_std, dtype=np.float32)         # 定义最大缩放比例         self.max_scale = max_scale         # 定义最小缩放比例         self.min_scale = min_scale         # 定义忽略标签值         self.ignore_label = ignore_label         # 定义图像和标签中的类别数量         self.num_classes = num_classes         # 定义读取数据的IO线程数量         self.num_readers = num_readers         # 定义数据集batch的并行度         self.num_parallel_calls = num_parallel_calls         # 定义数据集分片ID         self.shard_id = shard_id         # 定义数据集分片数量         self.shard_num = shard_num         # VOC数据集原始图片文件夹路径         self.voc_img_dir = os.path.join(self.data_file,'JPEG')         # VOC数据集语义标注图片文件夹路径         self.voc_anno_dir = os.path.join(self.data_file,'MASK1')         # VOC数据集训练集文件列表路径         self.voc_train_lst = os.path.join(self.data_file,'train.txt')         # VOC数据集验证集文件列表路径         self.voc_val_lst = os.path.join(self.data_file,'val.txt')         #  VOC数据集使用的灰度标注图片文件夹路径         self.voc_anno_gray_dir = os.path.join(self.data_file,'SegmentationClassGray')         # 生成的MindRecord文件保存路径         self.mindrecord_save =  os.path.join(self.data_file,'VOC_mindrecord')         # 最大缩放比例必须大于最小缩放比例                                        assert max_scale > min_scale     #数据预处理，包括图像的解码，尺度缩放，随机裁剪等操作     def preprocess_(self, image, label):         #bgr图像解码         image_out = cv2.imdecode(np.frombuffer(image, dtype=np.uint8), cv2.IMREAD_COLOR)         #灰度图像解码         label_out = cv2.imdecode(np.frombuffer(label, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)         #尺度缩放         sc = np.random.uniform(self.min_scale, self.max_scale)         new_h, new_w = int(sc * image_out.shape[0]), int(sc * image_out.shape[1])         image_out = cv2.resize(image_out, (new_w, new_h), interpolation=cv2.INTER_CUBIC)         label_out = cv2.resize(label_out, (new_w, new_h), interpolation=cv2.INTER_NEAREST)         #图像标准化         image_out = (image_out - self.image_mean) / self.image_std         #随机裁剪         h_, w_ = max(new_h, self.crop_size), max(new_w, self.crop_size)         pad_h, pad_w = h_ - new_h, w_ - new_w         if pad_h > 0 or pad_w > 0:             image_out = cv2.copyMakeBorder(image_out, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)             label_out = cv2.copyMakeBorder(label_out, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=self.ignore_label)         offset_h = np.random.randint(0, h_ - self.crop_size + 1)         offset_w = np.random.randint(0, w_ - self.crop_size + 1)         image_out = image_out[offset_h: offset_h + self.crop_size, offset_w: offset_w + self.crop_size, :]         label_out = label_out[offset_h: offset_h + self.crop_size, offset_w: offset_w+self.crop_size]         #随机水平翻转         if np.random.uniform(0.0, 1.0) > 0.5:             image_out = image_out[:, ::-1, :]             label_out = label_out[:, ::-1]         #图像转置以符合模型输入要求         image_out = image_out.transpose((2, 0, 1))         image_out = image_out.copy()         label_out = label_out.copy()         return image_out, label_out      #得到灰度数据集的方法，若文件已存在，则直接返回     def get_gray_dataset(self):         if os.path.exists(self.voc_anno_gray_dir):             print('the gray file is already exists！')             return         os.makedirs(self.voc_anno_gray_dir)          #转换彩色图像为灰度图像，并保存到指定路径         print('converting voc color png to gray png ...')         for ann in os.listdir(self.voc_anno_dir):             ann_im = Image.open(os.path.join(self.voc_anno_dir, ann))             #将该图像转换为灰度图像             ann_im = Image.fromarray(np.array(ann_im))             ann_im.save(os.path.join(self.voc_anno_gray_dir, ann))         print('converting done')     #获取MindRecord格式的数据集，num_shards为生成MindRecord的分片数，shuffle为是否对数据做洗牌处理         def get_mindrecord_dataset(self, is_training,num_shards=1, shuffle=True):         datas = []         if is_training:             data_lst = self.voc_train_lst             self.mindrecord_save = os.path.join(self.mindrecord_save,'train')         else:             data_lst = self.voc_val_lst             self.mindrecord_save = os.path.join(self.mindrecord_save,'eval')                  if os.path.exists(self.mindrecord_save):             #shutil.rmtree(self.mindrecord_save)             print('mindrecord file is already exists！')             self.mindrecord_save = os.path.join(self.mindrecord_save,'VOC_mindrecord')             return                  with open(data_lst) as f:             lines = f.readlines()         if shuffle:             np.random.shuffle(lines)                      print('creating mindrecord dataset...')         os.makedirs(self.mindrecord_save)         self.mindrecord_save = os.path.join(self.mindrecord_save,'VOC_mindrecord')         print('number of samples:', len(lines))         #定义MindRecord的schema         seg_schema = {\"file_name\": {\"type\": \"string\"}, \"label\": {\"type\": \"bytes\"}, \"data\": {\"type\": \"bytes\"}}         writer = FileWriter(file_name=self.mindrecord_save, shard_num=num_shards)         writer.add_schema(seg_schema, \"seg_schema\")         #将schema写入MindRecord         cnt = 0         for l in lines:             id_ = l.strip()             img_path = os.path.join(self.voc_img_dir, id_ + '.jpg')             label_path = os.path.join(self.voc_anno_gray_dir, id_ + '.png')                          sample_ = {\"file_name\": img_path.split('/')[-1]}             with open(img_path, 'rb') as f:                 sample_['data'] = f.read()             with open(label_path, 'rb') as f:                 sample_['label'] = f.read()             datas.append(sample_)             cnt += 1             if cnt % 1000 == 0:                 writer.write_raw_data(datas)                 print('number of samples written:', cnt)                 datas = []          if datas:             writer.write_raw_data(datas)         writer.commit()         print('number of samples written:', cnt)         print('Create Mindrecord Done')     #生成文件         def get_dataset(self, repeat=1):         data_set = de.MindDataset(dataset_files=self.mindrecord_save, columns_list=[\"data\", \"label\"],                                   shuffle=True, num_parallel_workers=self.num_readers,                                   num_shards=self.shard_num, shard_id=self.shard_id)         transforms_list = self.preprocess_         data_set = data_set.map(operations=transforms_list, input_columns=[\"data\", \"label\"],                                 output_columns=[\"data\", \"label\"],                                 num_parallel_workers=self.num_parallel_calls)         data_set = data_set.shuffle(buffer_size=self.batch_size * 10)         data_set = data_set.batch(self.batch_size, drop_remainder=True)         data_set = data_set.repeat(repeat)         return data_set",{"type":24,"value":726},{"type":18,"tag":31,"props":747,"children":749},{"id":748},"_5-实验过程-1",[750],{"type":24,"value":354},{"type":18,"tag":201,"props":752,"children":754},{"id":753},"_51-模型构建-1",[755],{"type":24,"value":360},{"type":18,"tag":38,"props":757,"children":758},{},[759,763,767],{"type":18,"tag":42,"props":760,"children":761},{},[762],{"type":24,"value":368},{"type":18,"tag":42,"props":764,"children":765},{},[766],{"type":24,"value":373},{"type":18,"tag":42,"props":768,"children":769},{},[770,771,775,781],{"type":24,"value":378},{"type":18,"tag":772,"props":773,"children":774},"br",{},[],{"type":18,"tag":326,"props":776,"children":778},{"className":777},[],[779],{"type":24,"value":780},"#定义1x1卷积层   def conv1x1(in_planes, out_planes, stride=1):       return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, weight_init='xavier_uniform')      # 定义3x3卷积层   def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):       return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, pad_mode='pad', padding=padding,                        dilation=dilation, weight_init='xavier_uniform')      #定义Resnet主体网络   class Resnet(nn.Cell):       def __init__(self, block, block_num, output_stride, use_batch_statistics=True):           super(Resnet, self).__init__()           self.inplanes = 64# 输入通道数           # 第一层卷积层           self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, pad_mode='pad', padding=3,                                  weight_init='xavier_uniform')           self.bn1 = nn.BatchNorm2d(self.inplanes, use_batch_statistics=use_batch_statistics)           self.relu = nn.ReLU()           self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')           # 第一层残差块           self.layer1 = self._make_layer(block, 64, block_num[0], use_batch_statistics=use_batch_statistics)           # 第二层残差块           self.layer2 = self._make_layer(block, 128, block_num[1], stride=2, use_batch_statistics=use_batch_statistics)           # 根据输出步长选择第三、第四层残差块           if output_stride == 16:               self.layer3 = self._make_layer(block, 256, block_num[2], stride=2,                                              use_batch_statistics=use_batch_statistics)               self.layer4 = self._make_layer(block, 512, block_num[3], stride=1, base_dilation=2, grids=[1, 2, 4],                                              use_batch_statistics=use_batch_statistics)           elif output_stride == 8:               self.layer3 = self._make_layer(block, 256, block_num[2], stride=1, base_dilation=2,                                              use_batch_statistics=use_batch_statistics)               self.layer4 = self._make_layer(block, 512, block_num[3], stride=1, base_dilation=4, grids=[1, 2, 4],                                              use_batch_statistics=use_batch_statistics)       # 构建残差块       def _make_layer(self, block, planes, blocks, stride=1, base_dilation=1, grids=None, use_batch_statistics=True):           if stride != 1 or self.inplanes != planes * block.expansion:               downsample = nn.SequentialCell([                   conv1x1(self.inplanes, planes * block.expansion, stride),                   nn.BatchNorm2d(planes * block.expansion, use_batch_statistics=use_batch_statistics)               ])              if grids is None:               grids = [1] * blocks              layers = [               block(self.inplanes, planes, stride, downsample, dilation=base_dilation * grids[0],                     use_batch_statistics=use_batch_statistics)           ]           self.inplanes = planes * block.expansion           for i in range(1, blocks):               layers.append(                   block(self.inplanes, planes, dilation=base_dilation * grids[i],                         use_batch_statistics=use_batch_statistics))              return nn.SequentialCell(layers)       # 前向推理       def construct(self, x):           out = self.conv1(x)# 第一层卷积           out = self.bn1(out)           out = self.relu(out)           out = self.maxpool(out)# 第一层池化           out = self.layer1(out)  # 第一层残差块           out = self.layer2(out)  # 第二层残差块           out = self.layer3(out)  # 第三层残差块           out = self.layer4(out)  # 第四层残差块              return out      #构建Bottleneck，用于ResNeXt中构建残差块   class Bottleneck(nn.Cell):       # 扩充率为4       expansion = 4          def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_batch_statistics=True):           super(Bottleneck, self).__init__()           # 第一个1x1卷积层           self.conv1 = conv1x1(inplanes, planes)           # 第一个BatchNorm层           self.bn1 = nn.BatchNorm2d(planes, use_batch_statistics=use_batch_statistics)           # 第二个3x3卷积层           self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)           # 第二个BatchNorm层           self.bn2 = nn.BatchNorm2d(planes, use_batch_statistics=use_batch_statistics)           # 第三个1x1卷积层，维度扩充           self.conv3 = conv1x1(planes, planes * self.expansion)           # 第三个BatchNorm层           self.bn3 = nn.BatchNorm2d(planes * self.expansion, use_batch_statistics=use_batch_statistics)           # Relu激活函数           self.relu = nn.ReLU()           # 下采样层，使维度匹配           self.downsample = downsample           # 张量相加操作           self.add = P.Add()          def construct(self, x):           identity = x              out = self.conv1(x)           out = self.bn1(out)           out = self.relu(out)              out = self.conv2(out)           out = self.bn2(out)           out = self.relu(out)              out = self.conv3(out)           out = self.bn3(out)              if self.downsample is not None:               identity = self.downsample(x)           # 将identity和out相加           out = self.add(out, identity)           out = self.relu(out)           return out      #构建ASPP，用于DeepLabV3+中的ASPP模块   class ASPP(nn.Cell):       def __init__(self, atrous_rates, phase='train', in_channels=2048, num_classes=21,                    use_batch_statistics=True):           super(ASPP, self).__init__()           # 训练或者测试阶段           self.phase = phase           # 输出通道数           out_channels = 256           # ASPP卷积层1           self.aspp1 = ASPPConv(in_channels, out_channels, atrous_rates[0], use_batch_statistics=use_batch_statistics)            # ASPP卷积层2           self.aspp2 = ASPPConv(in_channels, out_channels, atrous_rates[1], use_batch_statistics=use_batch_statistics)            # ASPP卷积层3           self.aspp3 = ASPPConv(in_channels, out_channels, atrous_rates[2], use_batch_statistics=use_batch_statistics)            # ASPP卷积层4           self.aspp4 = ASPPConv(in_channels, out_channels, atrous_rates[3], use_batch_statistics=use_batch_statistics)           # ASPP池化层           self.aspp_pooling = ASPPPooling(in_channels, out_channels)           # 输出通道数为(out_channels * (len(atrous_rates) + 1))           self.conv1 = nn.Conv2d(out_channels * (len(atrous_rates) + 1), out_channels, kernel_size=1,                                  weight_init='xavier_uniform')           # BatchNorm层           self.bn1 = nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics)           # Relu激活函数           self.relu = nn.ReLU()           # 输出通道数为num_classes           self.conv2 = nn.Conv2d(out_channels, num_classes, kernel_size=1, weight_init='xavier_uniform', has_bias=True)           # 沿着通道维度拼接           self.concat = P.Concat(axis=1)           # 随机失活，防止过拟合           self.drop = nn.Dropout(p=0.3)          def construct(self, x):           x1 = self.aspp1(x)           x2 = self.aspp2(x)           x3 = self.aspp3(x)           x4 = self.aspp4(x)           x5 = self.aspp_pooling(x)              x = self.concat((x1, x2))           x = self.concat((x, x3))           x = self.concat((x, x4))           x = self.concat((x, x5))              x = self.conv1(x)           x = self.bn1(x)           x = self.relu(x)           # 如果是训练阶段，则进行随机失活           if self.phase == 'train':               x = self.drop(x)           x = self.conv2(x)           return x      ##定义ASPPPooling类，用于定义ASPP池操作   class ASPPPooling(nn.Cell):       #定义参数       def __init__(self, in_channels, out_channels, use_batch_statistics=True):           super(ASPPPooling, self).__init__()           #定义卷积层操作           self.conv = nn.SequentialCell([               #1x1卷积操作               nn.Conv2d(in_channels, out_channels, kernel_size=1, weight_init='xavier_uniform'),               #批量归一化操作               nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics),               #激活函数操作               nn.ReLU()           ])           #定义shape操作           self.shape = P.Shape()          def construct(self, x):           #获取输入x的大小           size = self.shape(x)           #先对输入x进行全局平均池化操作           out = nn.AvgPool2d(size[2])(x)           #再进行卷积、批量归一化、激活操作           out = self.conv(out)           #大小变化为输入x的大小           out = P.ResizeNearestNeighbor((size[2], size[3]), True)(out)           return out      #定义ASPPConv类，用于定义ASPP卷积操作   class ASPPConv(nn.Cell):       #定义参数       def __init__(self, in_channels, out_channels, atrous_rate=1, use_batch_statistics=True):           super(ASPPConv, self).__init__()           #根据不同的空洞卷积率定义不同的卷积操作           if atrous_rate == 1:               conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, has_bias=False, weight_init='xavier_uniform')           else:               conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, pad_mode='pad', padding=atrous_rate,                                dilation=atrous_rate, weight_init='xavier_uniform')           #批量归一化操作           bn = nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics)           #激活函数操作           relu = nn.ReLU()           #定义卷积层操作           self.aspp_conv = nn.SequentialCell([conv, bn, relu])          def construct(self, x):           #进行卷积、批量归一化、激活操作           out = self.aspp_conv(x)           return out      #定义DeepLabV3类，用于定义整个DeepLabV3网络   class DeepLabV3(nn.Cell):       def __init__(self, phase='train', num_classes=21, output_stride=16, freeze_bn=False):           super(DeepLabV3, self).__init__()           #根据输入的参数freeze_bn来判断是否使用批量归一化操作           use_batch_statistics = not freeze_bn           #调用Resnet类来构建ResNet网络           self.resnet = Resnet(Bottleneck, [3, 4, 23, 3], output_stride=output_stride,                                use_batch_statistics=use_batch_statistics)           #调用ASPP类来构建ASPP网络           self.aspp = ASPP([1, 6, 12, 18], phase, 2048, num_classes,                            use_batch_statistics=use_batch_statistics)           #定义shape操作           self.shape = P.Shape()          def construct(self, x):           #获取输入x的大小           size = self.shape(x)           #将输入x输入到ResNet网络中，得到输出           out = self.resnet(x)           #将ResNet的输出输入到ASPP网络中，得到ASPP的输出           out = self.aspp(out)           #将ASPP的输出进行大小变换，变成与输入x相同的大小           out = P.ResizeBilinear((size[2], size[3]), True)(out)           return out",{"type":24,"value":726},{"type":18,"tag":26,"props":783,"children":784},{},[785],{"type":24,"value":391},{"type":18,"tag":26,"props":787,"children":788},{},[789,795],{"type":18,"tag":326,"props":790,"children":792},{"className":791},[],[793],{"type":24,"value":794},"#定义不同的学习率 #生成cosine学习率下降序列 def cosine_lr(base_lr, decay_steps, total_steps):     for i in range(int(total_steps)):         step_ = min(i, decay_steps)         yield base_lr * 0.5 * (1 + np.cos(np.pi * step_ / decay_steps))  #生成polynomial学习率下降序列 def poly_lr(base_lr, decay_steps, total_steps, end_lr=0.0001, power=0.9):     for i in range(int(total_steps)):         step_ = min(i, decay_steps)         yield (base_lr - end_lr) * ((1.0 - step_ / decay_steps) ** power) + end_lr  #生成exponential学习率下降序列 def exponential_lr(base_lr, decay_steps, decay_rate, total_steps, staircase=False):     for i in range(total_steps):         if staircase:             power_ = i // decay_steps         else:             power_ = float(i) / decay_steps         yield base_lr * (decay_rate ** power_)",{"type":24,"value":726},{"type":18,"tag":26,"props":797,"children":798},{},[799],{"type":24,"value":404},{"type":18,"tag":26,"props":801,"children":802},{},[803,809],{"type":18,"tag":326,"props":804,"children":806},{"className":805},[],[807],{"type":24,"value":808},"#定义损失函数 class SoftmaxCrossEntropyLoss(nn.Cell):     def __init__(self, num_cls=21, ignore_label=255):         super(SoftmaxCrossEntropyLoss, self).__init__()         #one-hot编码相关操作         self.one_hot = P.OneHot(axis=-1)         self.on_value = Tensor(1.0, mstype.float32)         self.off_value = Tensor(0.0, mstype.float32)         #类型转换相关操作         self.cast = P.Cast()         #softmax交叉熵损失函数及相关操作         self.ce = nn.SoftmaxCrossEntropyWithLogits()         #类别数及忽略标签         self.not_equal = P.NotEqual()         self.num_cls = num_cls         self.ignore_label = ignore_label         #矩阵乘法及求和相关操作         self.mul = P.Mul()         self.sum = P.ReduceSum(False)         self.div = P.RealDiv()         #转置及形状变换相关操作         self.transpose = P.Transpose()         self.reshape = P.Reshape()     #将标签转换为整形     def construct(self, logits, labels):         #将标签拉成一维并转换形状         labels_int = self.cast(labels, mstype.int32)         labels_int = self.reshape(labels_int, (-1,))          #将logits转置并转换形状         logits_ = self.transpose(logits, (0, 2, 3, 1))         logits_ = self.reshape(logits_, (-1, self.num_cls))          #生成权重         weights = self.not_equal(labels_int, self.ignore_label)         weights = self.cast(weights, mstype.float32)         #生成one-hot标签         one_hot_labels = self.one_hot(labels_int, self.num_cls, self.on_value, self.off_value)         #计算softmax交叉熵损失         loss = self.ce(logits_, one_hot_labels)          #加权         loss = self.mul(weights, loss)         #求平均损失         loss = self.div(self.sum(loss), self.sum(weights))         return loss",{"type":24,"value":726},{"type":18,"tag":31,"props":811,"children":813},{"id":812},"_6模型训练-1",[814],{"type":24,"value":418},{"type":18,"tag":201,"props":816,"children":818},{"id":817},"_61-构建训练函数-1",[819],{"type":24,"value":424},{"type":18,"tag":26,"props":821,"children":822},{},[823],{"type":24,"value":429},{"type":18,"tag":26,"props":825,"children":826},{},[827,833],{"type":18,"tag":326,"props":828,"children":830},{"className":829},[],[831],{"type":24,"value":832},"# 设置随机种子 set_seed(1) # 设置上下文 ms.set_context(mode=ms.GRAPH_MODE, save_graphs=False,                     device_target=\"CPU\") # 建立训练网络 class BuildTrainNetwork(nn.Cell):     def __init__(self, network, criterion):         super(BuildTrainNetwork, self).__init__()         self.network = network         self.criterion = criterion      def construct(self, input_data, label):         output = self.network(input_data)         net_loss = self.criterion(output, label)         return net_loss # 训练函数 def train(args):     # 如果使用分布式训练，则初始化     if args.is_distributed:         init()         args.rank = get_rank()         args.group_size = get_group_size()          parallel_mode = ParallelMode.DATA_PARALLEL         ms.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=args.group_size)               # 构建数据集     dataset = SegDataset(image_mean=args.image_mean,                                         image_std=args.image_std,                                         data_file=args.data_file,                                         batch_size=args.batch_size,                                         crop_size=args.crop_size,                                         max_scale=args.max_scale,                                         min_scale=args.min_scale,                                         ignore_label=args.ignore_label,                                         num_classes=args.num_classes,                                         num_readers=2,                                         num_parallel_calls=4,                                         shard_id=args.rank,                                         shard_num=args.group_size)     dataset.get_gray_dataset()     dataset.get_mindrecord_dataset(is_training=True)     dataset = dataset.get_dataset(repeat=1)           # 构建相关网络     if args.model == 'deeplab_v3_s16':         network = DeepLabV3('train', args.num_classes, 16, args.freeze_bn)     elif args.model == 'deeplab_v3_s8':         network = DeepLabV3('train', args.num_classes, 8, args.freeze_bn)     else:         raise NotImplementedError('model [{:s}] not recognized'.format(args.model))      # 构建损失函数     loss_ = SoftmaxCrossEntropyLoss(args.num_classes, args.ignore_label)     loss_.add_flags_recursive(fp32=True)     train_net = BuildTrainNetwork(network, loss_)      # l加载预训练模型     param_dict = ms.load_checkpoint(args.ckpt_file)     ms.load_param_into_net(train_net, param_dict)      # 优化器     iters_per_epoch = dataset.get_dataset_size()     total_train_steps = iters_per_epoch * args.train_epochs     if args.lr_type == 'cos':         lr_iter = cosine_lr(args.base_lr, total_train_steps, total_train_steps)     elif args.lr_type == 'poly':         lr_iter = poly_lr(args.base_lr, total_train_steps, total_train_steps, end_lr=0.0, power=0.9)     elif args.lr_type == 'exp':         lr_iter = exponential_lr(args.base_lr, args.lr_decay_step, args.lr_decay_rate,                                                 total_train_steps, staircase=True)     else:         raise ValueError('unknown learning rate type')     opt = nn.Momentum(params=train_net.trainable_params(), learning_rate=lr_iter, momentum=0.9, weight_decay=0.0001,                       loss_scale=args.loss_scale)      # 损失梯度缩放     manager_loss_scale = amp.FixedLossScaleManager(args.loss_scale, drop_overflow_update=False)     model = Model(train_net, optimizer=opt, amp_level=\"O3\", loss_scale_manager=manager_loss_scale)      # 回调函数，用于保存 ckpts     time_cb = TimeMonitor(data_size=iters_per_epoch)     loss_cb = LossMonitor()     cbs = [time_cb, loss_cb]      if args.rank == 0:         config_ck = CheckpointConfig(save_checkpoint_steps=iters_per_epoch,                                      keep_checkpoint_max=args.keep_checkpoint_max)         ckpoint_cb = ModelCheckpoint(prefix=args.model, directory=args.train_dir, config=config_ck)         cbs.append(ckpoint_cb)     # 开始训练     model.train(args.train_epochs, dataset, callbacks=cbs,dataset_sink_mode=True)",{"type":24,"value":726},{"type":18,"tag":201,"props":835,"children":837},{"id":836},"_62-模型训练",[838],{"type":24,"value":839},"6.2 模型训练",{"type":18,"tag":26,"props":841,"children":842},{},[843],{"type":24,"value":844},"使用之前构建好的训练函数，完成对模型的训练。",{"type":18,"tag":26,"props":846,"children":847},{},[848],{"type":24,"value":849},"其中预加载的模型是使用voc2012完整数据集训练完成的，为方便演示，本层所使用的数据集为voc裁剪以后的小型数据集，约60张图片。",{"type":18,"tag":26,"props":851,"children":852},{},[853],{"type":18,"tag":326,"props":854,"children":856},{"className":855},[],[857],{"type":24,"value":858},"##设定相关参数并转为edict对象 cfg = edict({     \"batch_size\": 16,     \"crop_size\": 513,     \"image_mean\": [103.53, 116.28, 123.675],#图片均值     \"image_std\": [57.375, 57.120, 58.395],#图片标准差     \"min_scale\": 0.5, #最小缩放比例     \"max_scale\": 2.0, #最大缩放比例     \"ignore_label\": 255, #忽略标签     \"num_classes\": 21, #分类数     \"train_epochs\" : 1, #训练轮数     \"lr_type\": 'cos', #学习率变化方式     \"base_lr\": 0.0, #基础学习率     \"lr_decay_step\": 3*91, #学习率递减步数     \"lr_decay_rate\" :0.1, #学习率递减率     \"loss_scale\": 2048, #损失函数缩放比例     \"model\": 'deeplab_v3_s8', #模型类型     'rank': 0, #排名     'group_size':1, #组大小     'keep_checkpoint_max':1, #最大保存点数     'train_dir': 'model', #训练目录     'is_distributed':False, #是否分布式训练     'freeze_bn':True #是否冻结BN层 })  #如果训练目录存在，则删除 if os.path.exists(cfg.train_dir):     shutil.rmtree(cfg.train_dir) #数据路径和checkpoint路径     data_path = './seg2'  cfg.data_file = data_path  ckpt_path = './ckpt/deeplab_v3_s8-300_11.ckpt'  cfg.ckpt_file = ckpt_path #开始训练模型 train(cfg)",{"type":18,"tag":31,"props":860,"children":862},{"id":861},"_7模型预测",[863],{"type":24,"value":864},"7、模型预测",{"type":18,"tag":201,"props":866,"children":868},{"id":867},"_71构建预测模块",[869],{"type":24,"value":870},"7.1、构建预测模块",{"type":18,"tag":26,"props":872,"children":873},{},[874],{"type":24,"value":875},"利用MindSpore库以及之前定义好的函数，构建用于模型预测相关的功能。",{"type":18,"tag":26,"props":877,"children":878},{},[879],{"type":18,"tag":326,"props":880,"children":882},{"className":881},[],[883],{"type":24,"value":884},"#设置MindSpore的模式为图模式，设备类型为CPU ms.set_context(mode=ms.GRAPH_MODE, device_target=\"CPU\", save_graphs=False)  #计算直方图 def cal_hist(a, b, n):     k = (a >= 0) & (a \u003C n)     return np.bincount(n * a[k].astype(np.int32) + b[k], minlength=n ** 2).reshape(n, n)  #长边缩放函数 def resize_long(img, long_size=513):     h, w, _ = img.shape     if h > w:         new_h = long_size         new_w = int(1.0 * long_size * w / h)     else:         new_w = long_size         new_h = int(1.0 * long_size * h / w)     imo = cv2.resize(img, (new_w, new_h))     return imo  #构建评估网络 class BuildEvalNetwork(nn.Cell):     def __init__(self, network):         super(BuildEvalNetwork, self).__init__()         self.network = network         self.softmax = nn.Softmax(axis=1)      def construct(self, input_data):         output = self.network(input_data)         output = self.softmax(output)         return output  #预处理函数 def pre_process(args, img_, crop_size=513):     # resize     img_ = resize_long(img_, crop_size)     resize_h, resize_w, _ = img_.shape      # mean, std     image_mean = np.array(args.image_mean)     image_std = np.array(args.image_std)     img_ = (img_ - image_mean) / image_std      # pad to crop_size     pad_h = crop_size - img_.shape[0]     pad_w = crop_size - img_.shape[1]     if pad_h > 0 or pad_w > 0:         img_ = cv2.copyMakeBorder(img_, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)      # hwc to chw     img_ = img_.transpose((2, 0, 1))     return img_, resize_h, resize_w  # 定义了一个函数eval_batch，用于在输入一张或多张图像时，评估模型的输出值参数args为命令行参数，eval_net为评估用网络，img_lst为输入图像的列表，crop_size为裁剪后的大小，默认为513flip为是否对图像进行镜像翻转，默认为True def eval_batch(args, eval_net, img_lst, crop_size=513, flip=True):     # 初始化结果列表     result_lst = []     # 获取batch_size     batch_size = len(img_lst)     # 初始化batch_img矩阵，尺寸为(batch_size, 3, crop_size, crop_size)     batch_img = np.zeros((args.batch_size, 3, crop_size, crop_size), dtype=np.float32)     # 初始化resize_hw列表，用于记录每张图片经过预处理后的尺寸     resize_hw = []     # 循环处理每张图片     for l in range(batch_size):         # 获取当前图片         img_ = img_lst[l]         # 对当前图片进行预处理，返回预处理后的图片，以及裁剪后的高度和宽度         img_, resize_h, resize_w = pre_process(args, img_, crop_size)         # 将预处理后的图片加入batch_img矩阵中         batch_img[l] = img_         # 将裁剪后的高度和宽度加入resize_hw列表中         resize_hw.append([resize_h, resize_w])     # 将batch_img矩阵以连续的方式存储     batch_img = np.ascontiguousarray(batch_img)     # 通过评估用网络（eval_net）对batch_img矩阵进行评估，得到输出结果net_out     net_out = eval_net(Tensor(batch_img, mstype.float32))     # 将输出结果转换为numpy数组     net_out = net_out.asnumpy()     # 如果flip为True，则对batch_img矩阵进行镜像翻转，并再次对翻转后的batch_img矩阵进行评估，将得到的结果加到net_out中       if flip:         batch_img = batch_img[:, :, :, ::-1]         net_out_flip = eval_net(Tensor(batch_img, mstype.float32))         net_out += net_out_flip.asnumpy()[:, :, :, ::-1]     # 循环处理每个batch     for bs in range(batch_size):         # 获取输出结果的概率值         probs_ = net_out[bs][:, :resize_hw[bs][0], :resize_hw[bs][1]].transpose((1, 2, 0))         # 获取原始图像的高度和宽度         ori_h, ori_w = img_lst[bs].shape[0], img_lst[bs].shape[1]         # 将概率值的尺寸调整为原始图像的尺寸         probs_ = cv2.resize(probs_, (ori_w, ori_h))         # 将处理后的结果加到结果列表中         result_lst.append(probs_)     # 返回结果列表     return result_lst  #定义了一个函数eval_batch_scales，用于在输入一张或多张图像时，按照不同比例分别进行评估，并将结果加起来参数args为命令行参数，eval_net为评估用网络，img_lst为输入图像的列表，scales为不同比例的列表base_crop_size为基准裁剪尺寸，默认为513，flip为是否对图像进行镜像翻转，默认为True def eval_batch_scales(args, eval_net, img_lst, scales,                       base_crop_size=513, flip=True):     # 根据比例列表计算不同尺寸的裁剪尺寸     sizes_ = [int((base_crop_size - 1) * sc) + 1 for sc in scales]     # 对第一个尺寸进行评估，并将结果加入probs_lst列表     probs_lst = eval_batch(args, eval_net, img_lst, crop_size=sizes_[0], flip=flip)     # 对其他尺寸进行评估，并将结果加到probs_lst中     for crop_size_ in sizes_[1:]:         probs_lst_tmp = eval_batch(args, eval_net, img_lst, crop_size=crop_size_, flip=flip)         for pl, _ in enumerate(probs_lst):             probs_lst[pl] += probs_lst_tmp[pl]      result_msk = []     for i in probs_lst:         # 将评估得到的概率值转换为类别号，加入result_msk列表中         result_msk.append(i.argmax(axis=2))     # 返回结果列表     return result_msk  # The color source: print(list(colors.cnames.keys())) #print(list(colors.cnames.keys())) num_class = {0: 'background', 1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat', 5: 'bottle', 6: 'bus', 7: 'car', 8: 'cat',              9: 'chair', 10: 'cow', 11: 'diningtable', 12: 'dog', 13: 'horse', 14: 'motorbike', 15: 'person', 16: 'pottedplant',              17: 'sheep', 18: 'sofa', 19: 'train', 20: 'tvmonitor', 21: 'edge'}  num_color = {0:'aliceblue', 1:'grey', 2:'red', 3:'green', 4:'darkorange', 5:'lime', 6:'bisque',      7:'black', 8:'blanchedalmond', 9:'blue', 10:'blueviolet', 11:'brown', 12:'burlywood', 13:'cadetblue',       14:'darkorange', 15:'tan', 16:'darkviolet', 17:'cornflowerblue', 18:'yellow', 19:'crimson', 20:'darkcyan'}  color_dic = [num_color[k] for k in sorted(num_color.keys())] bounds = list(range(21)) cmap = mpl.colors.ListedColormap(color_dic) norm = mpl.colors.BoundaryNorm(bounds, cmap.N) # 定义一个函数num_to_ClassAndColor，用于将类别号转换为颜色和类别名称 def num_to_ClassAndColor(num_list):     # 初始化颜色列表和类别列表     color_ = []     class_ = []     # 循环处理每个类别号     for num in num_list:         # 将类别号对应的颜色加入颜色列表中         color_.append(num_class[num])         # 将类别号对应的类别名称加入类别列表中         class_.append(num_color[num])     # 返回颜色列表和类别列表     return color_,class_",{"type":18,"tag":26,"props":886,"children":887},{},[888],{"type":24,"value":889},"定义函数net_eval，用于评估模型的输出值",{"type":18,"tag":26,"props":891,"children":892},{},[893],{"type":18,"tag":326,"props":894,"children":896},{"className":895},[],[897],{"type":24,"value":898},"def net_eval(args):     # 根据命令行参数和模型类型（args.model），创建评估用网络     if args.model == 'deeplab_v3_s16':         network = DeepLabV3('eval', args.num_classes, 16, args.freeze_bn)     elif args.model == 'deeplab_v3_s8':         network = DeepLabV3('eval', args.num_classes, 8, args.freeze_bn)     else:         raise NotImplementedError('model [{:s}] not recognized'.format(args.model))      eval_net = BuildEvalNetwork(network)      # 加载训练好的模型参数     param_dict = load_checkpoint(args.ckpt_file)     load_param_into_net(eval_net, param_dict)     # 设置评估用网络为测试状态     eval_net.set_train(False)         # 读取数据列表     with open(args.data_lst) as f:         img_lst = f.readlines()              # evaluate函数,初始化hist矩阵，大小为(args.num_classes, args.num_classes)初始化batch_img_lst和batch_msk_lstbi表示batch中的图片数量，image_num表示总共处理的图片数量      hist = np.zeros((args.num_classes, args.num_classes))     batch_img_lst = []     batch_msk_lst = []     bi = 0     image_num = 0     # 遍历img_lst中的每个id     for i, line in enumerate(img_lst):         id_ = line.strip()         img_path = os.path.join(cfg.voc_img_dir, id_ + '.jpg')         msk_path = os.path.join(cfg.voc_anno_gray_dir, id_ + '.png')         # 读取图像和标签             img_ = cv2.imread(img_path)         msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE)         batch_img_lst.append(img_)         batch_msk_lst.append(msk_)         # 判断是否需要输出图片评估结果         if args.if_png:             # 对当前batch中的所有图片进行评估             batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,                                           base_crop_size=args.crop_size, flip=args.flip)              # 获取图像大小             height ,weight = batch_res[0].shape             # 将标签中的ignore_label设置为0             batch_msk_lst[0][batch_msk_lst[0]==args.ignore_label] = 0             # 绘制三张显示图像、预测结果和标签的子图             plt.figure(figsize=(3 * weight/1024*10, 2 * height/1024*10))             plt.subplot(1,3,1)             image = Image.open(img_path)             plt.imshow(image)                          plt.subplot(1,3,2)             plt.imshow(image)             plt.imshow(batch_res[0],alpha=0.8,interpolation='none', cmap=cmap, norm=norm)                                       plt.subplot(1,3,3)             plt.imshow(image)             plt.imshow(batch_msk_lst[0],alpha=0.8,interpolation='none', cmap=cmap, norm=norm)             plt.show()             # 获取预测结果和真实标签中的唯一值             prediction_num = np.unique(batch_res[0])             real_num = np.unique(batch_msk_lst[0])             # 获取预测结果和真实标签对应的颜色值和类别名             prediction_color,prediction_class = num_to_ClassAndColor(prediction_num)             print('prediction num:',prediction_num)             print('prediction color:',prediction_color)             print('prediction class:',prediction_class)             real_color,real_class = num_to_ClassAndColor(real_num)             print('groundtruth num:',real_num)             print('groundtruth color:',real_color)             print('groundtruth class:',real_class)             # 清空batch_img_lst和batch_msk_lst，用于存储下一个batch的图像和标签             batch_img_lst = []             batch_msk_lst = []             # 判断当前处理的图片数是否小于args.num_png             if i \u003C args.num_png-1:                 continue             else:                 return         # 如果当前batch已经存满，则对这些图片进行评估             bi += 1         if bi == args.batch_size:             batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,                                           base_crop_size=args.crop_size, flip=args.flip)             # 计算当前batch的混淆矩阵，并将其累加到hist中             for mi in range(args.batch_size):                 hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)              bi = 0             batch_img_lst = []             batch_msk_lst = []             # 每处理100张图像，输出处理进度             if (i+1)%100 == 0:                 print('processed {} images'.format(i+1))         image_num = i     # 如果batch中还有剩余的图片，则对其进行评估     if bi > 0:         batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,                                       base_crop_size=args.crop_size, flip=args.flip)         # 计算当前batch的混淆矩阵，并将其累加到hist中         for mi in range(bi):             hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)         if (i+1) % 100 == 0:             print('processed {} images'.format(image_num + 1))     # 计算每个类别的IoU，求其平均值作为结果输出     np.seterr(divide=\"ignore\", invalid=\"ignore\")     iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))     print('mean IoU', np.nanmean(iu))",{"type":18,"tag":201,"props":900,"children":902},{"id":901},"_72验证模型",[903],{"type":24,"value":904},"7.2、验证模型",{"type":18,"tag":26,"props":906,"children":907},{},[908],{"type":24,"value":909},"利用刚才定义好的模型，从数据集读取图片进行网络验证，不显示图片。",{"type":18,"tag":26,"props":911,"children":912},{},[913],{"type":18,"tag":326,"props":914,"children":916},{"className":915},[],[917],{"type":24,"value":918},"# test  1 # 定义配置文件 cfg = edict({     \"batch_size\": 1, # 批大小     \"crop_size\": 513, # 图像裁剪大小     \"image_mean\": [103.53, 116.28, 123.675], # RGB图像为3通道，定义图像RGB均值     \"image_std\": [57.375, 57.120, 58.395], # RGB图像为3通道，定义图像RGB标准差     \"scales\": [1.0], # 数据增强的尺度范围     # [0.5,0.75,1.0,1.25,1.75]     'flip': True, # 是否进行翻转     'ignore_label': 255, # 忽略标签的像素值     'num_classes':21, # 类别数，此处为PASCAL VOC数据集     'model': 'deeplab_v3_s8', # 模型名称     'freeze_bn': True, # BN冻结，是否使用BN层      'if_png':False,        # 数据是否使用png格式     'num_png':10           # 数据集中的png图像数量 })  # 数据集路径 data_path = './seg2' # if not os.path.exists(data_path):      #mox.file.copy_parallel(src_url=\"s3://share-course/dataset/voc2012_raw/\", dst_url=data_path) cfg.data_file = data_path  # 定义数据集 dataset = SegDataset(image_mean=cfg.image_mean,                                     image_std=cfg.image_std,                                     data_file=cfg.data_file) # 获取灰度图像数据集 dataset.get_gray_dataset() # 数据集的txt标签路径 cfg.data_lst = os.path.join(cfg.data_file,'val.txt') # VOC数据集中的图像路径 cfg.voc_img_dir = os.path.join(cfg.data_file,'JPEG') # VOC数据集中的标签图像路径 cfg.voc_anno_gray_dir = os.path.join(cfg.data_file,'SegmentationClassGray')  ckpt_path = './ckpt' # if not os.path.exists(ckpt_path): #     mox.file.copy_parallel(src_url=\"s3://yyq-3/DATA/code/deeplabv3/model\", dst_url=ckpt_path)   #if yours model had saved # 加载模型文件路径 cfg.ckpt_file = os.path.join(ckpt_path,'deeplab_v3_s8-300_11.ckpt')   print('loading checkpoing:',cfg.ckpt_file) # 模型验证评估 net_eval(cfg)",{"type":18,"tag":26,"props":920,"children":921},{},[922],{"type":24,"value":923},"利用刚才定义好的模型，从数据集读取图片进行网络验证，显示图片及结果。",{"type":18,"tag":26,"props":925,"children":926},{},[927],{"type":18,"tag":326,"props":928,"children":930},{"className":929},[],[931],{"type":24,"value":932},"# test 2 cfg = edict({     \"batch_size\": 1, # 批大小     \"crop_size\": 513, # 图像裁剪大小     \"image_mean\": [103.53, 116.28, 123.675], # RGB图像为3通道，定义图像RGB均值     \"image_std\": [57.375, 57.120, 58.395], # RGB图像为3通道，定义图像RGB标准差     \"scales\": [1.0], # 数据增强的尺度范围     # [0.5,0.75,1.0,1.25,1.75]     'flip': True, # 是否进行翻转     'ignore_label': 255, # 忽略标签的像素值     'num_classes':21, # 类别数，此处为PASCAL VOC数据集     'model': 'deeplab_v3_s8', # 模型名称     'freeze_bn': True, # BN冻结，是否使用BN层      'if_png':True,         # 图像数据是否使用png格式     'num_png':3            # png图像数量 })    # import moxing as mox data_path = './seg2' # if not os.path.exists(data_path): #     mox.file.copy_parallel(src_url=\"s3://share-course/dataset/voc2012_raw/\", dst_url=data_path) # 数据集路径 cfg.data_file = data_path  # 定义数据集 dataset = SegDataset(image_mean=cfg.image_mean,                                     image_std=cfg.image_std,                                     data_file=cfg.data_file) dataset.get_gray_dataset() # 数据集的txt标签路径 cfg.data_lst = os.path.join(cfg.data_file,'val.txt') # VOC数据集中的图像路径 cfg.voc_img_dir = os.path.join(cfg.data_file,'JPEG') # VOC数据集中的标签图像路径 cfg.voc_anno_gray_dir = os.path.join(cfg.data_file,'SegmentationClassGray')  ckpt_path = './ckpt' # if not os.path.exists(ckpt_path): #     mox.file.copy_parallel(src_url=\"s3://yyq-3/DATA/code/deeplabv3/model\", dst_url=ckpt_path)     #if yours model had saved cfg.ckpt_file = os.path.join(ckpt_path,'deeplab_v3_s8-300_11.ckpt') \\ # 加载模型文件路径  print('loading checkpoing:',cfg.ckpt_file) # 模型验证评估 net_eval(cfg)",{"title":7,"searchDepth":934,"depth":934,"links":935},4,[936,938,939,940,947,950,956,957,958,959,965,968,972],{"id":33,"depth":937,"text":36},2,{"id":59,"depth":937,"text":62},{"id":162,"depth":937,"text":165},{"id":196,"depth":937,"text":199,"children":941},[942,944,945,946],{"id":203,"depth":943,"text":206},3,{"id":300,"depth":943,"text":303},{"id":311,"depth":943,"text":314},{"id":332,"depth":943,"text":335},{"id":351,"depth":937,"text":354,"children":948},[949],{"id":357,"depth":943,"text":360},{"id":415,"depth":937,"text":418,"children":951},[952,953,954,955],{"id":421,"depth":943,"text":424},{"id":457,"depth":943,"text":460},{"id":463,"depth":943,"text":466},{"id":469,"depth":943,"text":469},{"id":483,"depth":937,"text":36},{"id":503,"depth":937,"text":62},{"id":588,"depth":937,"text":165},{"id":616,"depth":937,"text":199,"children":960},[961,962,963,964],{"id":621,"depth":943,"text":206},{"id":700,"depth":943,"text":303},{"id":709,"depth":943,"text":314},{"id":729,"depth":943,"text":335},{"id":748,"depth":937,"text":354,"children":966},[967],{"id":753,"depth":943,"text":360},{"id":812,"depth":937,"text":418,"children":969},[970,971],{"id":817,"depth":943,"text":424},{"id":836,"depth":943,"text":839},{"id":861,"depth":937,"text":864,"children":973},[974,975],{"id":867,"depth":943,"text":870},{"id":901,"depth":943,"text":904},"markdown","content:technology-blogs:zh:3145.md","content","technology-blogs/zh/3145.md","technology-blogs/zh/3145","md",1776506126622]