[{"data":1,"prerenderedAt":1135},["ShallowReactive",2],{"content-query-zxZAhCBCZg":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":1129,"_id":1130,"_source":1131,"_file":1132,"_stem":1133,"_extension":1134},"/news/zh/2096","zh",false,"","震撼来袭！基于昇思MindSpore的同元软控AI系列工具箱正式发布，大幅度降低产品研发成本","基于昇思MindSpore的MWORKS AI工具箱将原有的仿真建模和AI模型有机融合，可以大幅度减少产品的研发成本。未来该工具箱可以应用到航空、航天、船舶、能源等复杂装备系统的工程建模，实现更高、更智能的数字孪生系统。","2023-01-28","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2023/01/30/a0b54938e87c413891ba51dfadb12f55.png","news",{"type":14,"children":15,"toc":1112},"root",[16,24,35,43,51,56,65,73,81,86,94,102,107,112,117,122,127,132,137,142,147,152,157,162,167,172,177,182,187,192,197,202,207,212,217,222,227,232,237,242,247,252,257,262,267,272,277,282,287,292,297,302,307,312,317,322,327,332,337,342,347,352,357,362,367,372,377,382,387,392,397,402,407,412,417,422,427,432,437,442,447,452,457,462,467,472,477,482,487,492,497,502,507,512,517,521,529,537,544,553,558,565,570,582,589,594,601,606,611,618,623,628,635,640,648,659,664,669,674,681,686,691,701,708,713,722,727,732,739,744,756,763,768,773,780,785,790,797,802,811,823,828,836,841,849,856,861,870,881,886,891,896,901,906,911,916,921,928,933,938,943,951,956,963,968,973,980,985,992,997,1005,1013,1018,1023,1028,1033,1042,1048,1056,1062,1070,1076,1084,1090,1098,1104],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"震撼来袭基于昇思mindspore的同元软控ai系列工具箱正式发布大幅度降低产品研发成本",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28,30],{"type":23,"value":29},"随着智能时代的到来，同元软控与华为携手合作，以昇思MindSpore为框架底座，打造了MWORKS AI工具箱，并于2023年1月8号正式对外发布。",{"type":17,"tag":31,"props":32,"children":33},"strong",{},[34],{"type":23,"value":9},{"type":17,"tag":25,"props":36,"children":37},{},[38],{"type":17,"tag":31,"props":39,"children":40},{},[41],{"type":23,"value":42},"01",{"type":17,"tag":25,"props":44,"children":45},{},[46],{"type":17,"tag":31,"props":47,"children":48},{},[49],{"type":23,"value":50},"MWORKS AI工具箱基础架构",{"type":17,"tag":25,"props":52,"children":53},{},[54],{"type":23,"value":55},"MWORKS AI工具箱主要包含深度学习与机器学习工具箱，深度学习工具箱支持前馈神经网络、卷积神经网络、循环神经网络等深度学习网络的设计、模型构建、训练以及深度学习应用。机器学习工具箱支持聚类分析、主成分分析、降维、分类、回归等数据的描述、分析及数据建模。工具架构如图示，主要分为底层、框架核心及前端。",{"type":17,"tag":25,"props":57,"children":58},{},[59],{"type":17,"tag":60,"props":61,"children":64},"img",{"alt":62,"src":63},"image.png","https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129064757.06118272938695393375740387281086:50540129085838:2400:61FBF37689C43966712AC89FADBD320915CE1D99BC683A0934F9C40F5AB05658.png",[],{"type":17,"tag":25,"props":66,"children":67},{},[68],{"type":17,"tag":31,"props":69,"children":70},{},[71],{"type":23,"value":72},"02",{"type":17,"tag":25,"props":74,"children":75},{},[76],{"type":17,"tag":31,"props":77,"children":78},{},[79],{"type":23,"value":80},"模块及功能",{"type":17,"tag":25,"props":82,"children":83},{},[84],{"type":23,"value":85},"本节主要介绍深度学习工具箱的主要模块及功能。深度学习工具箱5大模块，110个基础函数，分别是图像深度学习41个，预训练网络19个，网络训练组件2个，时序、序列和文本深度学习33个，函数逼近与聚类15个。",{"type":17,"tag":25,"props":87,"children":88},{},[89],{"type":17,"tag":31,"props":90,"children":91},{},[92],{"type":23,"value":93},"函数",{"type":17,"tag":25,"props":95,"children":96},{},[97],{"type":17,"tag":31,"props":98,"children":99},{},[100],{"type":23,"value":101},"说明（中文）",{"type":17,"tag":25,"props":103,"children":104},{},[105],{"type":23,"value":106},"trainingOptions",{"type":17,"tag":25,"props":108,"children":109},{},[110],{"type":23,"value":111},"训练深度学习神经网络的选项",{"type":17,"tag":25,"props":113,"children":114},{},[115],{"type":23,"value":116},"trainNetwork",{"type":17,"tag":25,"props":118,"children":119},{},[120],{"type":23,"value":121},"训练深度学习神经网络",{"type":17,"tag":25,"props":123,"children":124},{},[125],{"type":23,"value":126},"squeezenet",{"type":17,"tag":25,"props":128,"children":129},{},[130],{"type":23,"value":131},"SqueezeNet卷积神经网络",{"type":17,"tag":25,"props":133,"children":134},{},[135],{"type":23,"value":136},"googlenet",{"type":17,"tag":25,"props":138,"children":139},{},[140],{"type":23,"value":141},"GoogLeNet卷积神经网络",{"type":17,"tag":25,"props":143,"children":144},{},[145],{"type":23,"value":146},"inceptionv3",{"type":17,"tag":25,"props":148,"children":149},{},[150],{"type":23,"value":151},"Inception-v3卷积神经网络",{"type":17,"tag":25,"props":153,"children":154},{},[155],{"type":23,"value":156},"convolution1dLayer",{"type":17,"tag":25,"props":158,"children":159},{},[160],{"type":23,"value":161},"一维卷积层",{"type":17,"tag":25,"props":163,"children":164},{},[165],{"type":23,"value":166},"mobilenetv2",{"type":17,"tag":25,"props":168,"children":169},{},[170],{"type":23,"value":171},"MobileNet-v2卷积神经网络",{"type":17,"tag":25,"props":173,"children":174},{},[175],{"type":23,"value":176},"resnet18",{"type":17,"tag":25,"props":178,"children":179},{},[180],{"type":23,"value":181},"ResNet-18卷积神经网络",{"type":17,"tag":25,"props":183,"children":184},{},[185],{"type":23,"value":186},"resnet50",{"type":17,"tag":25,"props":188,"children":189},{},[190],{"type":23,"value":191},"ResNet-50卷积神经网络",{"type":17,"tag":25,"props":193,"children":194},{},[195],{"type":23,"value":196},"resnet101",{"type":17,"tag":25,"props":198,"children":199},{},[200],{"type":23,"value":201},"ResNet-101卷积神经网络",{"type":17,"tag":25,"props":203,"children":204},{},[205],{"type":23,"value":206},"xception",{"type":17,"tag":25,"props":208,"children":209},{},[210],{"type":23,"value":211},"Xception卷积神经网络",{"type":17,"tag":25,"props":213,"children":214},{},[215],{"type":23,"value":216},"transposedConv1dLayer",{"type":17,"tag":25,"props":218,"children":219},{},[220],{"type":23,"value":221},"转置一维卷积层",{"type":17,"tag":25,"props":223,"children":224},{},[225],{"type":23,"value":226},"batchNormalization1dLayer",{"type":17,"tag":25,"props":228,"children":229},{},[230],{"type":23,"value":231},"一维批处理规范化层",{"type":17,"tag":25,"props":233,"children":234},{},[235],{"type":23,"value":236},"batchNormalization3dLayer",{"type":17,"tag":25,"props":238,"children":239},{},[240],{"type":23,"value":241},"三维批处理规范化层",{"type":17,"tag":25,"props":243,"children":244},{},[245],{"type":23,"value":246},"shufflenet",{"type":17,"tag":25,"props":248,"children":249},{},[250],{"type":23,"value":251},"预训练的ShuffleNet卷积神经网络",{"type":17,"tag":25,"props":253,"children":254},{},[255],{"type":23,"value":256},"instanceNormalization1dLayer",{"type":17,"tag":25,"props":258,"children":259},{},[260],{"type":23,"value":261},"一维实例规范化层",{"type":17,"tag":25,"props":263,"children":264},{},[265],{"type":23,"value":266},"instanceNormalization3dLayer",{"type":17,"tag":25,"props":268,"children":269},{},[270],{"type":23,"value":271},"三维实例规范化层",{"type":17,"tag":25,"props":273,"children":274},{},[275],{"type":23,"value":276},"averagePooling1dLayer",{"type":17,"tag":25,"props":278,"children":279},{},[280],{"type":23,"value":281},"一维平均池化层",{"type":17,"tag":25,"props":283,"children":284},{},[285],{"type":23,"value":286},"alexnet",{"type":17,"tag":25,"props":288,"children":289},{},[290],{"type":23,"value":291},"AlexNet卷积神经网络",{"type":17,"tag":25,"props":293,"children":294},{},[295],{"type":23,"value":296},"vgg16",{"type":17,"tag":25,"props":298,"children":299},{},[300],{"type":23,"value":301},"VGG-16卷积神经网络",{"type":17,"tag":25,"props":303,"children":304},{},[305],{"type":23,"value":306},"vgg19",{"type":17,"tag":25,"props":308,"children":309},{},[310],{"type":23,"value":311},"VGG-19卷积神经网络",{"type":17,"tag":25,"props":313,"children":314},{},[315],{"type":23,"value":316},"convolution2dLayer",{"type":17,"tag":25,"props":318,"children":319},{},[320],{"type":23,"value":321},"二维卷积层",{"type":17,"tag":25,"props":323,"children":324},{},[325],{"type":23,"value":326},"convolution3dLayer",{"type":17,"tag":25,"props":328,"children":329},{},[330],{"type":23,"value":331},"3D卷积层",{"type":17,"tag":25,"props":333,"children":334},{},[335],{"type":23,"value":336},"groupedConvolution2dLayer",{"type":17,"tag":25,"props":338,"children":339},{},[340],{"type":23,"value":341},"二维分组卷积层",{"type":17,"tag":25,"props":343,"children":344},{},[345],{"type":23,"value":346},"transposedConv2dLayer",{"type":17,"tag":25,"props":348,"children":349},{},[350],{"type":23,"value":351},"转置二维卷积层",{"type":17,"tag":25,"props":353,"children":354},{},[355],{"type":23,"value":356},"transposedConv3dLayer",{"type":17,"tag":25,"props":358,"children":359},{},[360],{"type":23,"value":361},"转置的3D卷积层",{"type":17,"tag":25,"props":363,"children":364},{},[365],{"type":23,"value":366},"fullyConnectedLayer",{"type":17,"tag":25,"props":368,"children":369},{},[370],{"type":23,"value":371},"全连接层",{"type":17,"tag":25,"props":373,"children":374},{},[375],{"type":23,"value":376},"reluLayer",{"type":17,"tag":25,"props":378,"children":379},{},[380],{"type":23,"value":381},"整流线性单元（ReLU）层",{"type":17,"tag":25,"props":383,"children":384},{},[385],{"type":23,"value":386},"leakyReluLayer",{"type":17,"tag":25,"props":388,"children":389},{},[390],{"type":23,"value":391},"泄漏整流线性单元（ReLU）层",{"type":17,"tag":25,"props":393,"children":394},{},[395],{"type":23,"value":396},"clippedReluLayer",{"type":17,"tag":25,"props":398,"children":399},{},[400],{"type":23,"value":401},"修剪整流线性单元（ReLU）层",{"type":17,"tag":25,"props":403,"children":404},{},[405],{"type":23,"value":406},"eluLayer",{"type":17,"tag":25,"props":408,"children":409},{},[410],{"type":23,"value":411},"指数线性单位（ELU）层",{"type":17,"tag":25,"props":413,"children":414},{},[415],{"type":23,"value":416},"tanhLayer",{"type":17,"tag":25,"props":418,"children":419},{},[420],{"type":23,"value":421},"双曲正切（tanh）层",{"type":17,"tag":25,"props":423,"children":424},{},[425],{"type":23,"value":426},"swishLayer",{"type":17,"tag":25,"props":428,"children":429},{},[430],{"type":23,"value":431},"Swish层",{"type":17,"tag":25,"props":433,"children":434},{},[435],{"type":23,"value":436},"batchNormalizationLayer",{"type":17,"tag":25,"props":438,"children":439},{},[440],{"type":23,"value":441},"批处理规范化层",{"type":17,"tag":25,"props":443,"children":444},{},[445],{"type":23,"value":446},"groupNormalizationLayer",{"type":17,"tag":25,"props":448,"children":449},{},[450],{"type":23,"value":451},"组归一化层",{"type":17,"tag":25,"props":453,"children":454},{},[455],{"type":23,"value":456},"instanceNormalizationLayer",{"type":17,"tag":25,"props":458,"children":459},{},[460],{"type":23,"value":461},"实例规范化层",{"type":17,"tag":25,"props":463,"children":464},{},[465],{"type":23,"value":466},"layerNormalizationLayer",{"type":17,"tag":25,"props":468,"children":469},{},[470],{"type":23,"value":471},"层归一化层",{"type":17,"tag":25,"props":473,"children":474},{},[475],{"type":23,"value":476},"crossChannelNormalizationLayer",{"type":17,"tag":25,"props":478,"children":479},{},[480],{"type":23,"value":481},"通道局部响应标准化层",{"type":17,"tag":25,"props":483,"children":484},{},[485],{"type":23,"value":486},"dropoutLayer",{"type":17,"tag":25,"props":488,"children":489},{},[490],{"type":23,"value":491},"丢弃层",{"type":17,"tag":25,"props":493,"children":494},{},[495],{"type":23,"value":496},"crop2dLayer",{"type":17,"tag":25,"props":498,"children":499},{},[500],{"type":23,"value":501},"二维裁剪图层",{"type":17,"tag":25,"props":503,"children":504},{},[505],{"type":23,"value":506},"crop3dLayer",{"type":17,"tag":25,"props":508,"children":509},{},[510],{"type":23,"value":511},"3-D裁剪层",{"type":17,"tag":25,"props":513,"children":514},{},[515],{"type":23,"value":516},"…",{"type":17,"tag":25,"props":518,"children":519},{},[520],{"type":23,"value":516},{"type":17,"tag":25,"props":522,"children":523},{},[524],{"type":17,"tag":31,"props":525,"children":526},{},[527],{"type":23,"value":528},"03",{"type":17,"tag":25,"props":530,"children":531},{},[532],{"type":17,"tag":31,"props":533,"children":534},{},[535],{"type":23,"value":536},"应用案例",{"type":17,"tag":538,"props":539,"children":541},"h3",{"id":540},"我们基于同元-昇思mindspore-ai工具箱在图像文字和声音等领域展开了研究",[542],{"type":23,"value":543},"我们基于同元-昇思MindSpore AI工具箱，在图像、文字和声音等领域展开了研究。",{"type":17,"tag":538,"props":545,"children":547},{"id":546},"_31-手写数字识别与倾斜角度预测",[548],{"type":17,"tag":31,"props":549,"children":550},{},[551],{"type":23,"value":552},"3.1 手写数字识别与倾斜角度预测",{"type":17,"tag":25,"props":554,"children":555},{},[556],{"type":23,"value":557},"手写数字识别（MNIST）数据集是一个大型的手写体数字数据库，通常用于训练各种图像处理系统，也被广泛用于机器学习领域的训练和测试。手写数字识别数据集包含手写数字的合成图像、每张图像对应的数字以及倾斜的角度。该数据集共有60000张训练图片和10000张测试图片，每张图片上数字为0-9中的一种，总共分为9类，其中每张图片的大小为28*28（像素）。",{"type":17,"tag":25,"props":559,"children":560},{},[561],{"type":17,"tag":60,"props":562,"children":564},{"alt":62,"src":563},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129064905.11417662805906063360842366415988:50540129085838:2400:EE16FC52A05CEF4759995301C0655DAAA8767C3647CBEF4D60F5560D28B02999.png",[],{"type":17,"tag":25,"props":566,"children":567},{},[568],{"type":23,"value":569},"图1：手写数字识别数据集",{"type":17,"tag":25,"props":571,"children":572},{},[573,575,580],{"type":23,"value":574},"我们主要利用同元-昇思MindSpore AI工具箱提供的卷积算子（Conv2d）、最大池化算子（MaxPool）等构建一个卷积神经网络，并将图像及其对应的数字作为输入和输出，训练得到一个可识别手写数字的网络（",{"type":17,"tag":31,"props":576,"children":577},{},[578],{"type":23,"value":579},"基于同元-昇思MindSpore AI工具箱实现的详细代码见附录（1）",{"type":23,"value":581},"）。",{"type":17,"tag":25,"props":583,"children":584},{},[585],{"type":17,"tag":60,"props":586,"children":588},{"alt":62,"src":587},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129064928.42277710098287935821890765027180:50540129085838:2400:9CCDE4EB68F2AE8EA9EB80E1D2C5136CA8D6E8A99E0FA91636B06CC1D91E61C5.png",[],{"type":17,"tag":25,"props":590,"children":591},{},[592],{"type":23,"value":593},"图2：手写数字识别训练过程 在测试集中随机抽取6张图片展示，并使用训练好的模型进行识别，将结果打印在命令行窗口：",{"type":17,"tag":25,"props":595,"children":596},{},[597],{"type":17,"tag":60,"props":598,"children":600},{"alt":62,"src":599},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129064956.57727116568074303701194434240470:50540129085838:2400:826F168A0B5F6968DCC165F37BFF4F6DF07CB62A0F1E43F0323523FA88A726D5.png",[],{"type":17,"tag":25,"props":602,"children":603},{},[604],{"type":23,"value":605},"图3：手写数字识别结果",{"type":17,"tag":25,"props":607,"children":608},{},[609],{"type":23,"value":610},"该网络模型的卷积模块可以提取图像的特征，去掉Softmax层并在最后添加新的全连接层，得到一个新的网络模型，随后用数字倾斜角度作为预测目标来训练该网络。",{"type":17,"tag":25,"props":612,"children":613},{},[614],{"type":17,"tag":60,"props":615,"children":617},{"alt":62,"src":616},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065013.47473995654659199249519797635943:50540129085838:2400:684D76B1F22FD77444F7C37BE97CF806D921E394687DB0D31519B40BC858F9A3.png",[],{"type":17,"tag":25,"props":619,"children":620},{},[621],{"type":23,"value":622},"图4:手写数字倾斜角度预测训练过程",{"type":17,"tag":25,"props":624,"children":625},{},[626],{"type":23,"value":627},"随机挑选测试集中的图像进行预测，橙色线条为预测角度，可见与实际数字的倾斜角度基本相符。",{"type":17,"tag":25,"props":629,"children":630},{},[631],{"type":17,"tag":60,"props":632,"children":634},{"alt":62,"src":633},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065039.44196985627598151536718134702053:50540129085838:2400:6267BD2B134D5AF502DF67F9C1F79CF950B5AB9DF0D66516C3AB3E752603F942.png",[],{"type":17,"tag":25,"props":636,"children":637},{},[638],{"type":23,"value":639},"图5：手写数字倾斜角度预测结果",{"type":17,"tag":25,"props":641,"children":642},{},[643],{"type":17,"tag":31,"props":644,"children":645},{},[646],{"type":23,"value":647},"3.2 日语元音字符分类",{"type":17,"tag":25,"props":649,"children":650},{},[651,653,658],{"type":23,"value":652},"本示例说明如何使用长短期记忆 (LSTM) 网络对序列数据进行分类（",{"type":17,"tag":31,"props":654,"children":655},{},[656],{"type":23,"value":657},"基于同元-昇思MindSpore AI工具箱实现的详细代码见附录（2）",{"type":23,"value":581},{"type":17,"tag":25,"props":660,"children":661},{},[662],{"type":23,"value":663},"要训练深度神经网络以对序列数据进行分类，可以使用 LSTM 网络。LSTM 网络允许您将序列数据输入网络，并根据序列数据的各个时间步进行预测。",{"type":17,"tag":25,"props":665,"children":666},{},[667],{"type":23,"value":668},"本示例使用日语元音数据集。此示例训练一个 LSTM 网络，旨在根据表示连续说出的两个日语元音的时间序列数据来识别说话者。训练数据包含九个说话者的时间序列数据。每个序列有12个特征，且长度不同。该数据集包含270个训练观测值和370个测试观测值。",{"type":17,"tag":25,"props":670,"children":671},{},[672],{"type":23,"value":673},"加载日语元音训练数据。可视化第一个时间序列。每条线对应一个特征。",{"type":17,"tag":25,"props":675,"children":676},{},[677],{"type":17,"tag":60,"props":678,"children":680},{"alt":62,"src":679},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065117.58913588311230853800078193476940:50540129085838:2400:DAC7A06DE01BDD13A52C14CD8434FACB88F7FD9C1AF2E5F66E10B6E6B48673CC.png",[],{"type":17,"tag":25,"props":682,"children":683},{},[684],{"type":23,"value":685},"图6：数据集展示",{"type":17,"tag":25,"props":687,"children":688},{},[689],{"type":23,"value":690},"基于同元-昇思MindSpore AI工具箱提供的LSTM算子、全连接算子、Softmax激活函数等， 我们定义了LSTM网络架构，然后导入日语元音数据集进行训练，并打印Loss曲线。",{"type":17,"tag":692,"props":693,"children":695},"pre",{"code":694},"# 构建网络\nlayers = SequentialCell([\n    bilstmLayer(12, 100; NumLayers=3, Batch_First=true),\n    flattenLayer(),\n    fullyConnectedLayer(truncation_number * 200, 9),\n    softmaxLayer(),\n])\noptions = trainingOptions(\n    \"CrossEntropyLoss\", \"Adam\", \"Accuracy\", 27, 200, 0.001; Shuffle=true, Plots=true\n)\nnet = trainNetwork(train_data, train_label, layers, options)\nYPred = TyDeepLearning.classify(net, test_data)\naccuracy = Accuracy(YPred, test_label)\nprint(accuracy)\n",[696],{"type":17,"tag":697,"props":698,"children":699},"code",{"__ignoreMap":7},[700],{"type":23,"value":694},{"type":17,"tag":25,"props":702,"children":703},{},[704],{"type":17,"tag":60,"props":705,"children":707},{"alt":62,"src":706},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065214.77767967587776948800264703950432:50540129085838:2400:C8719DE81A94FE0A76117BF4D94F67EFFBDC42A10FED112F53A9634A393C7C1D.png",[],{"type":17,"tag":25,"props":709,"children":710},{},[711],{"type":23,"value":712},"图7：Loss曲线",{"type":17,"tag":538,"props":714,"children":716},{"id":715},"_33-基于lstm的人类活动分类任务",[717],{"type":17,"tag":31,"props":718,"children":719},{},[720],{"type":23,"value":721},"3.3 基于LSTM的人类活动分类任务",{"type":17,"tag":25,"props":723,"children":724},{},[725],{"type":23,"value":726},"该案例所采用的数据集来源于佩戴在人身上的传感器的数据序列。每个序列有三个特征，分别对应三个不同坐标方向下的加速度数据。该数据集包括七位志愿者的加速度数据，其中六组的数据作为训练集，另外一组作为测试集。",{"type":17,"tag":25,"props":728,"children":729},{},[730],{"type":23,"value":731},"某序列的其中一维坐标数据：",{"type":17,"tag":25,"props":733,"children":734},{},[735],{"type":17,"tag":60,"props":736,"children":738},{"alt":62,"src":737},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065245.98475131604090252366480742803966:50540129085838:2400:7EEC115B281A5204A0A1DA45192986A181E1BED976C542109494C6DF93074364.png",[],{"type":17,"tag":25,"props":740,"children":741},{},[742],{"type":23,"value":743},"图8：训练序列",{"type":17,"tag":25,"props":745,"children":746},{},[747,749,754],{"type":23,"value":748},"我们基于同元-昇思MindSpore AI工具箱中的网络层构建一个LSTM神经网络，训练后对测试数据进行测试。训练过程如下（",{"type":17,"tag":31,"props":750,"children":751},{},[752],{"type":23,"value":753},"基于同元-昇思MindSpore AI工具箱实现的详细代码见附录（3）",{"type":23,"value":755},"）：",{"type":17,"tag":25,"props":757,"children":758},{},[759],{"type":17,"tag":60,"props":760,"children":762},{"alt":62,"src":761},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065306.48223910018222896995928475344730:50540129085838:2400:98AC720DAA11AF09F01210676688D3C644B8F30D65FE8972844826CE80C1F7BC.png",[],{"type":17,"tag":25,"props":764,"children":765},{},[766],{"type":23,"value":767},"图9：LSTM网络训练过程",{"type":17,"tag":25,"props":769,"children":770},{},[771],{"type":23,"value":772},"训练完成后，利用已训练的网络对测试集进行测试。下图展示了测试数据集中的三类坐标数据：",{"type":17,"tag":25,"props":774,"children":775},{},[776],{"type":17,"tag":60,"props":777,"children":779},{"alt":62,"src":778},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065323.96866630727384649416455597528485:50540129085838:2400:EEA696B3CF28D8513617F82FC13C0F46427D4006CA4AB00C15E345150D4C116F.png",[],{"type":17,"tag":25,"props":781,"children":782},{},[783],{"type":23,"value":784},"图10：测试序列",{"type":17,"tag":25,"props":786,"children":787},{},[788],{"type":23,"value":789},"对以上数据进行预测得到结果：",{"type":17,"tag":25,"props":791,"children":792},{},[793],{"type":17,"tag":60,"props":794,"children":796},{"alt":62,"src":795},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065343.38787576248665947952774703192647:50540129085838:2400:2C6AAA2A6AD9E897DA8EEA254C742344AD249315FD7734ED0395C443F8AAC86D.png",[],{"type":17,"tag":25,"props":798,"children":799},{},[800],{"type":23,"value":801},"图11：测试结果",{"type":17,"tag":538,"props":803,"children":805},{"id":804},"_34-声纹识别",[806],{"type":17,"tag":31,"props":807,"children":808},{},[809],{"type":23,"value":810},"3.4 声纹识别",{"type":17,"tag":25,"props":812,"children":813},{},[814,816,821],{"type":23,"value":815},"声纹识别是一个突出的研究领域，具有多种应用，包括取证和生物识别认证。许多声纹识别系统依赖于预先计算的特征，如i向量或MFCC，然后将其输入机器学习或深度学习网络进行分类。其他深度学习语音系统绕过特征提取阶段，将音频信号直接馈送到网络。在这样的端到端系统中，网络直接学习低级音频信号特性（",{"type":17,"tag":31,"props":817,"children":818},{},[819],{"type":23,"value":820},"基于同元-昇思MindSpore AI工具箱实现的详细代码见附录（4））",{"type":23,"value":822},"。",{"type":17,"tag":25,"props":824,"children":825},{},[826],{"type":23,"value":827},"导入数据集，数据集来源中华语料库。可通过采样率调整数据集大小。",{"type":17,"tag":692,"props":829,"children":831},{"code":830},"train_data, train_label, test_data, test_label = VPR_dataset( 1500)\n",[832],{"type":17,"tag":697,"props":833,"children":834},{"__ignoreMap":7},[835],{"type":23,"value":830},{"type":17,"tag":25,"props":837,"children":838},{},[839],{"type":23,"value":840},"类似地，基于同元-昇思MindSpore AI工具箱提供的卷积算子（Conv2d）、批标准化算子（BN）、最大池化算子（MaxPool）等定义了如下的网络结构，导入数据集进行训练。并绘制Loss曲线",{"type":17,"tag":692,"props":842,"children":844},{"code":843},"net = SequentialCell([\n    convolution2dLayer(1, 80, (1, 251); PaddingMode=\"valid\"),\n    batchNormalization2dLayer(80),\n    leakyReluLayer(0.2),\n    maxPooling2dLayer((1, 3)),\n    convolution2dLayer(80, 60, (1, 251); PaddingMode=\"valid\"),\n    batchNormalization2dLayer(60),\n    leakyReluLayer(0.2),\n    maxPooling2dLayer((1, 3)),\n    convolution2dLayer(60, 60, (1, 5); PaddingMode=\"valid\"),\n    batchNormalization2dLayer(60),\n    leakyReluLayer(0.2),\n    maxPooling2dLayer((1, 3)),\n    flattenLayer(),\n    fullyConnectedLayer(60 * 2490, 2048),\n    batchNormalization1dLayer(2048),\n    leakyReluLayer(0.2),\n    fullyConnectedLayer(2048, 1024),\n    batchNormalization1dLayer(1024),\n    leakyReluLayer(0.2),\n    fullyConnectedLayer(1024, 256),\n    batchNormalization1dLayer(256),\n    leakyReluLayer(0.2),\n    fullyConnectedLayer(256, 20),\n    softmaxLayer()\n])\noptions = trainingOptions(\n    \"CrossEntropyLoss\", \"Adam\", \"Accuracy\", 100, 150, 0.0005; Plots=true\n)\nnet = trainNetwork(train_data, train_label, net, options)\n",[845],{"type":17,"tag":697,"props":846,"children":847},{"__ignoreMap":7},[848],{"type":23,"value":843},{"type":17,"tag":25,"props":850,"children":851},{},[852],{"type":17,"tag":60,"props":853,"children":855},{"alt":62,"src":854},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065451.27467787926214388748473629585225:50540129085838:2400:CAD662ECCEB912DA1E11B43CE247B7E5545AF0974E4B277296F197706F825D15.png",[],{"type":17,"tag":25,"props":857,"children":858},{},[859],{"type":23,"value":860},"图12：Loss曲线",{"type":17,"tag":538,"props":862,"children":864},{"id":863},"_35-发动机剩余使用寿命预测",[865],{"type":17,"tag":31,"props":866,"children":867},{},[868],{"type":23,"value":869},"3.5 发动机剩余使用寿命预测",{"type":17,"tag":25,"props":871,"children":872},{},[873,875,880],{"type":23,"value":874},"本示例说明如何使用深度学习预测发动机的剩余使用寿命 (RUL) （",{"type":17,"tag":31,"props":876,"children":877},{},[878],{"type":23,"value":879},"基于同元-昇思MindSpore AI工具箱实现的详细代码见附录（5）",{"type":23,"value":581},{"type":17,"tag":25,"props":882,"children":883},{},[884],{"type":23,"value":885},"本示例使用涡轮风扇发动机退化仿真数据集。该示例训练一个CNN 网络，旨在根据表示发动机中各种传感器的时间序列数据来预测发动机的剩余使用寿命（预测性维护，以周期为单位度量）。训练数据包含 100 台发动机的仿真时间序列数据。每个序列的长度各不相同，对应于完整的运行至故障 (RTF) 实例。测试数据包含 100 个不完整序列，每个序列的末尾为相应的剩余使用寿命值。该数据集包含 100 个训练观测值和 100 个测试观测值。",{"type":17,"tag":25,"props":887,"children":888},{},[889],{"type":23,"value":890},"涡轮风扇发动机退化仿真数据集的每个时间序列表示一个发动机。每台发动机启动时的初始磨损程度和制造变差均未知。发动机在每个时间序列开始时运转正常，在到达序列中的某一时刻时出现故障。在训练集中，故障的规模不断增大，直到出现系统故障。",{"type":17,"tag":25,"props":892,"children":893},{},[894],{"type":23,"value":895},"数据集包含26列数值。每一行是在一个运转周期中截取的数据快照，每一列代表一个不同的变量。这些列分别对应于以下数据：",{"type":17,"tag":25,"props":897,"children":898},{},[899],{"type":23,"value":900},"- 第 1 列 - 单元编号",{"type":17,"tag":25,"props":902,"children":903},{},[904],{"type":23,"value":905},"- 第 2 列 - 周期时间",{"type":17,"tag":25,"props":907,"children":908},{},[909],{"type":23,"value":910},"- 第 3-5 列 - 操作设置",{"type":17,"tag":25,"props":912,"children":913},{},[914],{"type":23,"value":915},"- 第 6-26 列 - 传感器测量值 1-21",{"type":17,"tag":25,"props":917,"children":918},{},[919],{"type":23,"value":920},"将3-26列作为特征数据，绘制展示每个特征前100条数据。",{"type":17,"tag":25,"props":922,"children":923},{},[924],{"type":17,"tag":60,"props":925,"children":927},{"alt":62,"src":926},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065528.90650226096141746683076366211114:50540129085838:2400:C1A57B04ADD093D4CA8A077711CCB91ED5A435C9FECB4411466684E51E1920CA.png",[],{"type":17,"tag":25,"props":929,"children":930},{},[931],{"type":23,"value":932},"图13：特征曲线",{"type":17,"tag":25,"props":934,"children":935},{},[936],{"type":23,"value":937},"由图可知，特征\"op_setting_3\", Sensor1\", \"Sensor5\", \"Sensor10\", \"Sensor16\", \"Sensor18\", \"Sensor19\"保持不变，可将其删除。将删除后的剩余17个特征数据进行标准化预处理。",{"type":17,"tag":25,"props":939,"children":940},{},[941],{"type":23,"value":942},"基于同元-昇思MindSpore AI工具箱提供的卷积算子（Conv1d）、Relu激活函数等定义了如下的神经网络结构。",{"type":17,"tag":692,"props":944,"children":946},{"code":945},"# 网络构建\nlayers = SequentialCell([\n    convolution1dLayer(17, 32, 5),\n    reluLayer(),\n    convolution1dLayer(32, 64, 7),\n    reluLayer(),\n    convolution1dLayer(64, 128, 11),\n    reluLayer(),\n    convolution1dLayer(128, 256, 13),\n    reluLayer(),\n    convolution1dLayer(256, 512, 15),\n    reluLayer(),\n    flattenLayer(),\n    fullyConnectedLayer(512 * sequence_length, 100),\n    reluLayer(),\n    dropoutLayer(0.5),\n    fullyConnectedLayer(100, 1),\n])\noptions = trainingOptions(\"RMSELoss\", \"Adam\", \"MSE\", 512, 200, 0.001; Plots=true)\nnet = trainNetwork(XTrain, YTrain, layers, options)\n",[947],{"type":17,"tag":697,"props":948,"children":949},{"__ignoreMap":7},[950],{"type":23,"value":945},{"type":17,"tag":25,"props":952,"children":953},{},[954],{"type":23,"value":955},"绘制RMSE损失曲线。",{"type":17,"tag":25,"props":957,"children":958},{},[959],{"type":17,"tag":60,"props":960,"children":962},{"alt":62,"src":961},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065605.42681818845114897095379117317536:50540129085838:2400:41082730D3049EF6690FDA334A0FB9D4B723137FDFE60C94DA2779FC4C8ECA14.png",[],{"type":17,"tag":25,"props":964,"children":965},{},[966],{"type":23,"value":967},"图14：RMSE Loss曲线",{"type":17,"tag":25,"props":969,"children":970},{},[971],{"type":23,"value":972},"将训练集和测试集导入训练好的神经网络中进行推理预测，将预测值与真实标签进行对比。",{"type":17,"tag":25,"props":974,"children":975},{},[976],{"type":17,"tag":60,"props":977,"children":979},{"alt":62,"src":978},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065623.40550293210037710991184245893752:50540129085838:2400:E1117FFC36625243590E848230976EAC23ECCFE6470EA9F8B3939C3ADA95F0EA.png",[],{"type":17,"tag":25,"props":981,"children":982},{},[983],{"type":23,"value":984},"图15：预测值与真实标签对比",{"type":17,"tag":25,"props":986,"children":987},{},[988],{"type":17,"tag":60,"props":989,"children":991},{"alt":62,"src":990},"https://fileserver.developer.huaweicloud.com/FileServer/getFile/cmtybbs/e64/154/b38/90a1d5d431e64154b387b3660e356ff5.20230129065638.63678593715862458543591987741637:50540129085838:2400:B8A2D8C449A325A8DA101C616EF3DF69A4F5EE6B954239842236593F6E4A38B0.png",[],{"type":17,"tag":25,"props":993,"children":994},{},[995],{"type":23,"value":996},"图16：测试集效果对比",{"type":17,"tag":25,"props":998,"children":999},{},[1000],{"type":17,"tag":31,"props":1001,"children":1002},{},[1003],{"type":23,"value":1004},"04",{"type":17,"tag":25,"props":1006,"children":1007},{},[1008],{"type":17,"tag":31,"props":1009,"children":1010},{},[1011],{"type":23,"value":1012},"总结与展望",{"type":17,"tag":25,"props":1014,"children":1015},{},[1016],{"type":23,"value":1017},"同元软控与华为携手合作，打造了以高性能科学计算语言Julia为用户语言、以华为昇思MindSpore为底层的AI系列工具箱。如深度学习工具箱内含丰富的函数库、应用案例及完善的帮助文档。基于深度学习工具箱，用户可以采用Julia构建卷积神经网络、循环神经网络等各种类型深度学习网络模型，并进行训练计算，深度学习工具箱已在图像识别、语言识别、故障预测等方向开展了应用验证，取得了良好效果。",{"type":17,"tag":25,"props":1019,"children":1020},{},[1021],{"type":23,"value":1022},"同元软控与华为的合作开辟了国产软件携手发展的示范应用，未来，我们还会在AI领域继续展开深入合作，全新推出强化学习工具箱，并推进AI系列工具箱在航空、航天、车辆、能源等复杂系统多领域工程建模中的应用，实现机理-数据融合建模，探索智能建模仿真技术，打造新一代智能建模仿真软件平台。我们也希望能够有更多的企业、科研院所参与进来，共同打造国产智能软硬件平台。",{"type":17,"tag":25,"props":1024,"children":1025},{},[1026],{"type":23,"value":1027},"苏州同元软控信息技术有限公司：",{"type":17,"tag":25,"props":1029,"children":1030},{},[1031],{"type":23,"value":1032},"苏州同元软控信息技术有限公司（简称“同元软控”）是专业从事新一代系统级设计与仿真工业软件产品研发、工程服务及系统工程解决方案的高科技企业。团队历经二十多年技术积累与公司十多年持续研发，研制了国际先进、完全自主的科学计算与系统建模仿真平台MWORKS。",{"type":17,"tag":1034,"props":1035,"children":1037},"h2",{"id":1036},"代码附录",[1038],{"type":17,"tag":31,"props":1039,"children":1040},{},[1041],{"type":23,"value":1036},{"type":17,"tag":538,"props":1043,"children":1045},{"id":1044},"_1手写数字识别与倾斜角度预测",[1046],{"type":23,"value":1047},"（1）手写数字识别与倾斜角度预测",{"type":17,"tag":692,"props":1049,"children":1051},{"code":1050},"using TyDeepLearning\nusing TyPlot\nusing TyImages\n# 训练卷积神经网络用于图像分类\nXTrain, YTrain = DigitDatasetTrainData()\np = randperm(5000)\nindex = p[1:20]\nfigure(1)\nfor i in eachindex(range(1, 20))\n    subplot(4, 5, i)\n    imshow(XTrain[index[i], 1, :, :])\nend\nindex1 = p[1:750]\nindex2 = p[751:end]\nX_train = XTrain[index2, :, :, :]\nY_train = YTrain[index2]\nX_Test = XTrain[index1, :, :, :]\nY_Test = YTrain[index1]\n\noptions = trainingOptions(\n    \"CrossEntropyLoss\", \"Momentum\", \"Accuracy\", 128, 50, 0.001; Plots=true\n)\n\nlayers = SequentialCell([\n    convolution2dLayer(1, 20, 5),\n    reluLayer(),\n    maxPooling2dLayer(2; Stride=2),\n    flattenLayer(),\n    fullyConnectedLayer(20 * 14 * 14, 10),\n    softmaxLayer(),\n])\nnet = trainNetwork(X_train, Y_train, layers, options)\nfunction preclasses(prob, classes)\n    ypredclasses = []\n    for i in eachindex(range(1, size(prob)[1]))\n        maxindex = 0\n        maxnum = 0\n        for k in eachindex(classes)\n            if prob[i, :][k] > maxnum\n                maxnum = prob[i, :][k]\n                maxindex = k\n            end\n        end\n        ypredclasses = append!(ypredclasses, [unique(classes)[maxindex]])\n    end\n    return ypredclasses\nend\nYPred = TyDeepLearning.predict(net, X_Test)\nclasses = [i - 1 for i in range(1, 10)]\nYPred1 = preclasses(YPred, classes)\naccuracy = Accuracy(YPred, Y_Test)\n\nfigure(3)\np2 = randperm(750)\nindex = p2[1:9]\nfor i in eachindex(range(1, 9))\n    TyPlot.subplot(3, 3, i)\n    TyImages.imshow(X_Test[index[i], 1, :, :])\n    title1 = \"Prediction Label\"\n    title2 = string(YPred1[index[i]])\n    title(string(title1, \": \", title2))\nend\n# 手写数字识别角度预测\nusing TyDeepLearning\nusing TyImages\nusing TyPlot\n\nXTrain, YTrain = DigitTrain4DArrayData()\nindex = randperm(5000)[1:20]\nfigure(1)\nfor i in eachindex(range(1, 20))\n    subplot(4, 5, i)\n    imshow(XTrain[index[i], 1, :, :])\nend\nlayers = SequentialCell([\n    convolution2dLayer(1, 25, 12),\n    reluLayer(),\n    flattenLayer(),\n    fullyConnectedLayer(25 * 28 * 28, 1),\n])\noptions = trainingOptions(\"RMSELoss\", \"Adam\", \"MSE\", 10, 50, 0.0001; Plots=true)\nnet = trainNetwork(XTrain, YTrain, layers, options)\nXTest, YTest = DigitTest4DArrayData()\nYPred = TyDeepLearning.predict(net, XTest)\nrmse = sqrt(mse(YTest, YPred))\n\nindex = randperm(5000)[1:9]\nfigure(2)\nfor i in range(1, 9)\n    subplot(3, 3, i)\n    hold(\"on\")\n    imshow(XTest[index[i], 1, :, :])\n    x = [7:21...]\n    plot(x, tan((90 + YPred[index[i]]) / 180 * pi) * (x .- 14) .+ 14, \"r\")\n    ax = gca()\n    ax.set_ylim(28,0)\n    ax.set_xlim(0, 28)\n    hold(\"off\")\nend\n",[1052],{"type":17,"tag":697,"props":1053,"children":1054},{"__ignoreMap":7},[1055],{"type":23,"value":1050},{"type":17,"tag":538,"props":1057,"children":1059},{"id":1058},"_2日语元音字符分类",[1060],{"type":23,"value":1061},"（2）日语元音字符分类",{"type":17,"tag":692,"props":1063,"children":1065},{"code":1064},"using TyDeepLearning\nusing TyPlot\n# 数据导入\npath1 = \"data/JapaneseVowels/train.csv\"\npath2 = \"data/JapaneseVowels/trainlabels.csv\"\npath3 = \"data/JapaneseVowels/test.csv\"\npath4 = \"data/JapaneseVowels/testlabels.csv\"\ntrain_data = Array(CSV.read(path1, DataFrame; header=false))\ntrain_label = Array(CSV.read(path2, DataFrame; header=false))[:,1]\ntest_data = Array(CSV.read(path3, DataFrame; header=false))\ntest_label = Array(CSV.read(path4, DataFrame; header=false))[:,1]\n\nfigure(1)\nplot(train_data[1:12,1:20]')\ntitle(\"Training Observation 1\")\ntruncation_number = 29\ntrain_data = Array(train_data[:, 1:truncation_number])\nfor i in range(1, 270 * 12)\n    for j in range(1, truncation_number)\n        if train_data[i, j] === missing\n            train_data[i, j] = 0\n        end\n    end\nend\n\ntrain_data = reshape(train_data, (12, 270, truncation_number))\ntrain_data = permutedims(train_data, (2, 3, 1))\ntrain_label = Array(train_label)[:, 1]\ntest_data = Array(test_data[:, 1:truncation_number])\nfor i in range(1, 370 * 12)\n    for j in range(1, truncation_number)\n        if test_data[i, j] === missing\n            test_data[i, j] = 0\n        end\n    end\nend\ntest_data = reshape(test_data, (12, 370, truncation_number))\ntest_data = permutedims(test_data, (2, 3, 1))\ntest_label = Array(test_label)[:, 1]\n\n# 构建网络\nlayers = SequentialCell([\n    bilstmLayer(12, 100; NumLayers=3, Batch_First=true),\n    flattenLayer(),\n    fullyConnectedLayer(truncation_number * 200, 9),\n    softmaxLayer(),\n])\noptions = trainingOptions(\n    \"CrossEntropyLoss\", \"Adam\", \"Accuracy\", 27, 200, 0.001; Shuffle=true, Plots=true\n)\nnet = trainNetwork(train_data, train_label, layers, options)\nYPred = TyDeepLearning.classify(net, test_data)\naccuracy = Accuracy(YPred, test_label)\nprint(accuracy)\n",[1066],{"type":17,"tag":697,"props":1067,"children":1068},{"__ignoreMap":7},[1069],{"type":23,"value":1064},{"type":17,"tag":538,"props":1071,"children":1073},{"id":1072},"_3基于lstm的人类活动分类任务",[1074],{"type":23,"value":1075},"（3）基于LSTM的人类活动分类任务",{"type":17,"tag":692,"props":1077,"children":1079},{"code":1078},"using TyDeepLearning\nusing TyPlot\n\ntrain_data_label = CSV.read(\n    \"data/HumanActivity/HumanActivityTrain.csv\", DataFrame; header=false\n)\ntest_data_label = CSV.read(\n    \"data/HumanActivity/HumanActivityTest.csv\", DataFrame; header=false\n)\ntrain_data_label = Array(train_data_label)\ntest_data_label = Array(test_data_label)\n# 可视化\n\nfigure(2)\nline_color = [\"#0072BD\", \"#D95319\", \"#EDB120\", \"#7E2F8E\", \"#77AC30\"]\nfor i in range(1, 5)\n    classes = [i - 1 for i in range(1, 5)]\n    idx = TyBase.find(train_data_label[1:64480, 4] .== classes[i])\n    hold(\"on\")\n    plot(idx, train_data_label[idx, 1], line_color[i])\nend\nhold(\"off\")\nxlabel(\"Time Step\")\nylabel(\"Acceleration\")\ntitle(\"Training Sequence 1, Feature 1\")\nclasses = [\"Dancing\", \"Running\", \"Sitting\", \"Standing\", \"Walking\"]\nlegend(classes)\n\nfigure(3)\nplot(test_data_label[:, 1:3])\ntitle(\"Test Data\")\nxlabel(\"Time Step\")\nlegend([\"Feature 1\", \"Feature 2\", \"Feature 3\"])\n\nfunction create_datasets(data, t_window)\n    out_seq = reshape(data[1:(1 + t_window - 1), :], (1, t_window, size(data)[2]))\n    L = size(data)[1]\n    for i in range(2, L - t_window + 1)\n        train_seq = data[i:(i + t_window - 1), :]\n        train_seq = reshape(train_seq, (1, t_window, size(data)[2]))\n        out_seq = cat(out_seq, train_seq; dims=1)\n    end\n    return out_seq\nend\nt_window = 6\ntrain_data = train_data_label[:, 1:3]\ntrain_data = create_datasets(train_data, t_window)\ntrain_label = train_data_label[t_window:end, 4]\n\ntest_data = test_data_label[:, 1:3]\ntest_data = create_datasets(test_data, t_window)\ntest_label = test_data_label[t_window:end, 4]\n\nnumFeatures = 3\nnumHiddenUnits = 200\nnumClasses = 5\nlayers = SequentialCell([\n    lstmLayer(numFeatures, numHiddenUnits; NumLayers=1),\n    flattenLayer(),\n    fullyConnectedLayer(numHiddenUnits * t_window, numClasses),\n    softmaxLayer(),\n])\noptions = trainingOptions(\n    \"CrossEntropyLoss\", \"Adam\", \"Accuracy\", 512, 200, 0.005; Plots=true\n)\nnet = trainNetwork(train_data, train_label, layers, options)\npreruslt = TyDeepLearning.classify(net, test_data)\naccuracy = Accuracy(preruslt, test_label)\nprelabel = Array{Int}(undef, 53883)\nfor i in 1:53883\n    prelabel_item = findmax(preruslt[i, :])[2] - 1\n    prelabel[i] = prelabel_item\nend\nfigure(4)\nidx = [1:53883...]\nhold(\"on\")\nplot(idx, prelabel, \".\", idx, test_label, \"-\")\nlegend(\"Predicted\")\n# plot(idx, prelabel, 0.1, \"y\", idx, test_label, \"-\")\nhold(\"off\")\nxlabel(\"Time Step\")\nylabel(\"Activity\")\ntitle(\"Predicted Activities\")\nlegend([\"Predicted\", \"Test Data\"])\n",[1080],{"type":17,"tag":697,"props":1081,"children":1082},{"__ignoreMap":7},[1083],{"type":23,"value":1078},{"type":17,"tag":538,"props":1085,"children":1087},{"id":1086},"_4声纹识别",[1088],{"type":23,"value":1089},"（4）声纹识别",{"type":17,"tag":692,"props":1091,"children":1093},{"code":1092},"using TyDeepLearning\n\ntrain_data, train_label, test_data, test_label = VPR_dataset( 1500)\n\nnet = SequentialCell([\n    convolution2dLayer(1, 80, (1, 251); PaddingMode=\"valid\"),\n    batchNormalization2dLayer(80),\n    leakyReluLayer(0.2),\n    maxPooling2dLayer((1, 3)),\n    convolution2dLayer(80, 60, (1, 251); PaddingMode=\"valid\"),\n    batchNormalization2dLayer(60),\n    leakyReluLayer(0.2),\n    maxPooling2dLayer((1, 3)),\n    convolution2dLayer(60, 60, (1, 5); PaddingMode=\"valid\"),\n    batchNormalization2dLayer(60),\n    leakyReluLayer(0.2),\n    maxPooling2dLayer((1, 3)),\n    flattenLayer(),\n    fullyConnectedLayer(60 * 2490, 2048),\n    batchNormalization1dLayer(2048),\n    leakyReluLayer(0.2),\n    fullyConnectedLayer(2048, 1024),\n    batchNormalization1dLayer(1024),\n    leakyReluLayer(0.2),\n    fullyConnectedLayer(1024, 256),\n    batchNormalization1dLayer(256),\n    leakyReluLayer(0.2),\n    fullyConnectedLayer(256, 20),\n    softmaxLayer()\n])\noptions = trainingOptions(\n    \"CrossEntropyLoss\", \"Adam\", \"Accuracy\", 100, 150, 0.0005; Plots=true\n)\nnet = trainNetwork(train_data, train_label, net, options)\n\ntest_pred = TyDeepLearning.predict(net, test_data)\ntrain_pred = TyDeepLearning.predict(net, train_data)\n\ntrain_acc = Accuracy(train_pred, train_label)\ntest_acc = Accuracy(test_pred, test_label)\n",[1094],{"type":17,"tag":697,"props":1095,"children":1096},{"__ignoreMap":7},[1097],{"type":23,"value":1092},{"type":17,"tag":538,"props":1099,"children":1101},{"id":1100},"_5发动机剩余寿命预测",[1102],{"type":23,"value":1103},"（5）发动机剩余寿命预测",{"type":17,"tag":692,"props":1105,"children":1107},{"code":1106},"using TyDeepLearning\nusing DataFrames\nusing TyPlot\ndir = \"data/RUL/\"\n# 训练集数据处理\npath1 = dir * \"train_FD001.csv\"\ntrain_FD001 = CSV.read(path1, DataFrame; header=false)\n\nf1 = figure(\"Feature Data\"; figsize=[7, 8])\ncolor_list = [\"r\", \"g\", \"b\", \"c\", \"m\", \"y\", \"k\"]\nlegend_list = [\n    \"op_setting_1\",\n    \"op_setting_2\",\n    \"op_setting_3\",\n    \"Sensor1\",\n    \"Sensor2\",\n    \"Sensor3\",\n    \"Sensor4\",\n    \"Sensor5\",\n    \"Sensor6\",\n    \"Sensor7\",\n    \"Sensor8\",\n    \"Sensor9\",\n    \"Sensor10\",\n    \"Sensor11\",\n    \"Sensor12\",\n    \"Sensor13\",\n    \"Sensor14\",\n    \"Sensor15\",\n    \"Sensor16\",\n    \"Sensor17\",\n    \"Sensor18\",\n    \"Sensor19\",\n    \"Sensor20\",\n    \"Sensor21\",\n]\nfor i in range(3, 26)\n    subplot(24, 1, i - 2)\n    plot(train_FD001[1:100, i], color_list[i % 7 + 1])\n    yticklabels([])\n    legend([legend_list[i - 2]]; loc=\"northeast\")\nend\n\n# 寻找每个单元的最大cycle\ntrain_count = zeros(Int64, (100))\nfor i in range(1, size(train_FD001)[1])\n    for j in range(1, 100)\n        if train_FD001[i, 1] == j\n            train_count[j] = train_count[j] + 1\n        else\n            continue\n        end\n    end\nend\nRUL = zeros(Int64, (size(train_FD001)[1]))\nfor i in range(1, size(train_FD001)[1])\n    for j in range(1, 100)\n        if train_FD001[i, 1] == j\n            RUL[i] = train_count[j] - train_FD001[i, 2]\n        else\n            continue\n        end\n    end\nend\n# 删除某些在所有步长中保持不变的特征[\"op_setting_3\", Sensor1\", \"Sensor5\", \"Sensor10\", \"Sensor16\", \"Sensor18\", \"Sensor19\"]\nselect!(\n    train_FD001,\n    Not([:Column5, :Column6, :Column10, :Column15, :Column21, :Column23, :Column24]),\n)\nf1 = figure(\"Feature Data\"; figsize=[6, 6])\ncolor_list = [\"r\", \"g\", \"b\", \"c\", \"m\", \"y\", \"k\"]\nlegend_list = [\n    \"op_setting_1\",\n    \"op_setting_2\",\n    \"Sensor2\",\n    \"Sensor3\",\n    \"Sensor4\",\n    \"Sensor6\",\n    \"Sensor7\",\n    \"Sensor8\",\n    \"Sensor9\",\n    \"Sensor11\",\n    \"Sensor12\",\n    \"Sensor13\",\n    \"Sensor14\",\n    \"Sensor15\",\n    \"Sensor17\",\n    \"Sensor20\",\n    \"Sensor21\",\n]\nfor i in range(3, 19)\n    subplot(17, 1, i - 2)\n    plot(train_FD001[:, i], color_list[i % 7 + 1])\n    yticklabels([])\n    legend([legend_list[i - 2]]; loc=\"northeast\")\nend\n\n# 添加一列名为RUL\ntrain_FD001 = insertcols!(train_FD001, ncol(train_FD001) + 1, :RUL => RUL)\n\n# 标准化\nfeats = Array(train_FD001[!, 3:19])\ndata_max = maximum(feats; dims=1)\ndata_min = minimum(feats; dims=1)\nfeats_norm = (feats .- data_min) ./ (data_max .- data_min)\n# 滑动窗口为31\nsequence_length = 31\nfunction gen_sequence(data_array, data_label, seq_length)\n    num_elements = size(data_array)[1]\n    label = data_label[seq_length:num_elements]\n    data = reshape(data_array[1:seq_length, :], (1, seq_length, 17))\n    dict = zip(range(2, num_elements - seq_length + 1), range(seq_length + 1, num_elements))\n    for (start, stop) in dict\n        data = vcat(data, reshape(data_array[start:stop, :], (1, seq_length, 17)))\n    end\n    return data, label\nend\ncount_sum = zeros(Int64, (101))\nfor i in range(1, 100)\n    count_sum[i + 1] = train_count[i] + count_sum[i]\nend\ndata_label = RUL[1:192]\ndata_array = feats_norm[1:192, :]\ndata, label = gen_sequence(data_array, data_label, sequence_length)\nfor j in range(2, 100)\n    data_array = feats_norm[(count_sum[j] + 1):count_sum[j + 1], :]\n    data_label = RUL[(count_sum[j] + 1):count_sum[j + 1]]\n    data_id, data_id_label = gen_sequence(data_array, data_label, sequence_length)\n    data = cat(data, data_id; dims=1)\n    label = cat(label, data_id_label; dims=1)\nend\n# 裁剪响应RUL阈值为150\nRUL_Threshold = 150\nfor i in range(1, size(label)[1])\n    if label[i] > 150\n        label[i] = 150\n    end\nend\nXTrain = permutedims(data, (1, 3, 2))\nYTrain = label\n# 网络构建\nlayers = SequentialCell([\n    convolution1dLayer(17, 32, 5),\n    reluLayer(),\n    convolution1dLayer(32, 64, 7),\n    reluLayer(),\n    convolution1dLayer(64, 128, 11),\n    reluLayer(),\n    convolution1dLayer(128, 256, 13),\n    reluLayer(),\n    convolution1dLayer(256, 512, 15),\n    reluLayer(),\n    flattenLayer(),\n    fullyConnectedLayer(512 * sequence_length, 100),\n    reluLayer(),\n    dropoutLayer(0.5),\n    fullyConnectedLayer(100, 1),\n])\noptions = trainingOptions(\"RMSELoss\", \"Adam\", \"MSE\", 512, 200, 0.001; Plots=true)\nnet = trainNetwork(XTrain, YTrain, layers, options)\n# 测试集数据处理\npath2 = dir * \"test_FD001.csv\"\npath3 = dir * \"RUL_FD001.csv\"\ntest_FD001 = CSV.read(path2, DataFrame; header=false)\nRUL_FD001 = CSV.read(path3, DataFrame; header=false)\nselect!(\n    test_FD001,\n    Not([:Column5, :Column6, :Column10, :Column15, :Column21, :Column23, :Column24]),\n)\n\n# 寻找每个单元的最大cycle\ntest_count = zeros(Int64, (100))\nfor i in range(1, size(test_FD001)[1])\n    for j in range(1, 100)\n        if test_FD001[i, 1] == j\n            test_count[j] = test_count[j] + 1\n        else\n            continue\n        end\n    end\nend\n\ntest_count_sum = zeros(Int64, (101))\nfor i in range(1, 100)\n    test_count_sum[i + 1] = test_count[i] + test_count_sum[i]\nend\n\ntest_RUL = zeros(Int64, (size(test_FD001)[1]))\nfor i in range(1, size(test_FD001)[1])\n    for j in range(1, 100)\n        if test_FD001[i, 1] == j\n            test_RUL[i] = test_count[j] - test_FD001[i, 2] + RUL_FD001[j, 1]\n        else\n            continue\n        end\n    end\nend\n\n# 标准化处理\ndata_test = Array(test_FD001[!, 3:19])\ndata_max = maximum(data_test; dims=1)\ndata_min = minimum(data_test; dims=1)\ntest_norm = (data_test .- data_min) ./ (data_max .- data_min)\n\ntest_data_array = test_norm[(test_count_sum[1] + 1):test_count_sum[1 + 1], :]\ntest_data = reshape(\n    test_data_array[(test_count[1] - sequence_length + 1):end, :], (1, sequence_length, 17)\n)\nfor j in range(2, 100)\n    test_data_array = test_norm[(test_count_sum[j] + 1):test_count_sum[j + 1], :]\n    datadata = test_data_array[(test_count[j] - sequence_length + 1):end, :]\n    data_reshape = reshape(datadata, (1, sequence_length, 17))\n    test_data = cat(test_data, data_reshape; dims=1)\nend\n\n# 裁剪响应RUL阈值为150\nYTest = Array(RUL_FD001)\nRUL_Threshold = 150\nfor i in range(1, size(YTest)[1])\n    if YTest[i] > 150\n        YTest[i] = 150\n    end\nend\nXTest = permutedims(test_data, (1, 3, 2))\nY = TyDeepLearning.predict(net, XTest)\nerror = sqrt(mse(YTest, Y))\n\nhold(\"on\")\nplot(Y, \"-o\")\nplot(YTest, \"-v\")\nlegend([\"Prediction value\", \"True value\"])\nhold(\"off\")\n\nsequence_length = 31\nfunction gen_sequence(data_array, data_label, seq_length)\n    num_elements = size(data_array)[1]\n    label = data_label[seq_length:num_elements]\n    data = reshape(data_array[1:seq_length, :], (1, seq_length, 17))\n    dict = zip(range(2, num_elements - seq_length + 1), range(seq_length + 1, num_elements))\n    for (start, stop) in dict\n        data = vcat(data, reshape(data_array[start:stop, :], (1, seq_length, 17)))\n    end\n    return data, label\nend\ncount_sum = zeros(Int64, (101))\nfor i in range(1, 100)\n    count_sum[i + 1] = train_count[i] + count_sum[i]\nend\ndata_label = RUL[848:1116]\ndata_array = feats_norm[848:1116, :]\ndata, label = gen_sequence(data_array, data_label, sequence_length)\n# 裁剪响应RUL阈值为150\nRUL_Threshold = 150\nfor i in range(1, size(label)[1])\n    if label[i] > 150\n        label[i] = 150\n    end\nend\nXTrain1 = permutedims(data, (1, 3, 2))\nYTrain1 = label\nY1 = TyDeepLearning.predict(net, XTrain1)\nerror = sqrt(mse(YTrain1, Y1))\nhold(\"on\")\nplot(Y1)\nplot(YTrain1,)\nlegend([\"Prediction value\", \"True value\"])\nhold(\"off\")\n",[1108],{"type":17,"tag":697,"props":1109,"children":1110},{"__ignoreMap":7},[1111],{"type":23,"value":1106},{"title":7,"searchDepth":1113,"depth":1113,"links":1114},4,[1115,1117,1118,1119,1120,1121],{"id":540,"depth":1116,"text":543},3,{"id":546,"depth":1116,"text":552},{"id":715,"depth":1116,"text":721},{"id":804,"depth":1116,"text":810},{"id":863,"depth":1116,"text":869},{"id":1036,"depth":1122,"text":1036,"children":1123},2,[1124,1125,1126,1127,1128],{"id":1044,"depth":1116,"text":1047},{"id":1058,"depth":1116,"text":1061},{"id":1072,"depth":1116,"text":1075},{"id":1086,"depth":1116,"text":1089},{"id":1100,"depth":1116,"text":1103},"markdown","content:news:zh:2096.md","content","news/zh/2096.md","news/zh/2096","md",1776506062793]