# 训练和评估

## 构建训练和评估

### 定义数据集

[5]:

import mindspore.dataset as ds
import numpy as np

def get_data(num, w=2.0, b=3.0):
"""生成样本数据及对应的标签"""
for _ in range(num):
x = np.random.uniform(-10.0, 10.0)
noise = np.random.normal(0, 1)
y = x * w + b + noise
yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)

def create_dataset(num_data, batch_size=16):
"""生成数据集"""
dataset = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
dataset = dataset.batch(batch_size)
return dataset


### 构建前向网络

[6]:

import numpy as np
import mindspore.nn as nn
from mindspore.common.initializer import Normal

class LinearNet(nn.Cell):
def __init__(self):
super().__init__()
self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))

def construct(self, x):
return self.fc(x)


### 构建训练流程

MindSpore的nn模块提供了训练网络封装函数TrainOneStepCell，用来封装网络和优化器。其参数如下：

• network：训练网络，只支持单输出网络。

• optimizer： 用于更新网络参数的优化器。

• sens：反向传播的输入，缩放系数，默认值为1.0。

[7]:

# 生成训练数据集
train_dataset = create_dataset(num_data=160, batch_size=16)

net = LinearNet()
loss = nn.MSELoss()

# 连接前向网络与损失函数
net_with_loss = nn.WithLossCell(net, loss)
opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)

# 定义训练网络，封装网络和优化器
train_net = nn.TrainOneStepCell(net_with_loss, opt)
# 设置网络为训练模式
train_net.set_train()

# 真正训练迭代过程
step = 0
epochs = 2
steps = train_dataset.get_dataset_size()

for epoch in range(epochs):
for d in train_dataset.create_dict_iterator():
result = train_net(d["data"], d["label"])
print(f"Epoch: [{epoch} / {epochs}], "
f"step: [{step} / {steps}], "
f"loss: {result}")
step = step + 1

Epoch: [0 / 2], step: [0 / 10], loss: 139.95065
Epoch: [0 / 2], step: [1 / 10], loss: 77.1288
Epoch: [0 / 2], step: [2 / 10], loss: 23.511435
Epoch: [0 / 2], step: [3 / 10], loss: 15.275428
Epoch: [0 / 2], step: [4 / 10], loss: 80.57905
Epoch: [0 / 2], step: [5 / 10], loss: 86.396
Epoch: [0 / 2], step: [6 / 10], loss: 78.92796
Epoch: [0 / 2], step: [7 / 10], loss: 16.025606
Epoch: [0 / 2], step: [8 / 10], loss: 2.996492
Epoch: [0 / 2], step: [9 / 10], loss: 9.382026
Epoch: [1 / 2], step: [10 / 10], loss: 46.85878
Epoch: [1 / 2], step: [11 / 10], loss: 78.591515
Epoch: [1 / 2], step: [12 / 10], loss: 39.523586
Epoch: [1 / 2], step: [13 / 10], loss: 3.0048246
Epoch: [1 / 2], step: [14 / 10], loss: 7.835808
Epoch: [1 / 2], step: [15 / 10], loss: 27.37307
Epoch: [1 / 2], step: [16 / 10], loss: 34.076313
Epoch: [1 / 2], step: [17 / 10], loss: 54.53374
Epoch: [1 / 2], step: [18 / 10], loss: 19.80341
Epoch: [1 / 2], step: [19 / 10], loss: 1.8542566


### 构建评估流程

MindSpore的nn模块提供了评估网络封装函数WithEvalCell，用来在验证集上评估模型训练的效果。其参数如下：

• network：前向网络。

• loss_fn：损失函数。

• add_cast_fp32：是否将数据类型调整为float32。

nn.WithEvalCell只接受两个输入，分别为数据data及其对应的标签label，用前面定义的前向网络和损失函数构建一个评估网络，示例如下：

[8]:

eval_dataset = create_dataset(num_data=160, batch_size=16)

# 构建评估网络
eval_net = nn.WithEvalCell(net, loss)
eval_net.set_train(False)
loss = nn.Loss()
mae = nn.MAE()

mae.clear()
loss.clear()

# 真正验证迭代过程
for data in eval_dataset.create_dict_iterator():
outputs = eval_net(data["data"], data["label"])
mae.update(outputs[1], outputs[2])
loss.update(outputs[0])

# 评估结果
mae_result = mae.eval()
loss_result = loss.eval()

print("mae: ", mae_result)
print("loss: ", loss_result)

mae:  2.9597126245498657
loss:  11.539738941192628


## 自定义训练和评估

### 自定义训练网络

[10]:

import mindspore.ops as ops

class CustomTrainOneStepCell(nn.Cell):
"""自定义训练网络"""

def __init__(self, network, optimizer):
"""入参有两个：训练网络，优化器"""
super(CustomTrainOneStepCell, self).__init__(auto_prefix=False)
self.network = network                           # 定义前向网络
self.optimizer = optimizer                       # 定义优化器
self.weights = self.optimizer.parameters         # 待更新参数

def construct(self, *inputs):
loss = self.network(*inputs)                            # 计算当前输入的损失函数值
return loss

net1 = LinearNet()   # 定义前向网络
loss = nn.MSELoss()  # 损失函数

# 连接前向网络与损失函数
net_with_loss = nn.WithLossCell(net1, loss)
opt = nn.Momentum(net1.trainable_params(), learning_rate=0.005, momentum=0.9)

# 定义训练网络，封装网络和优化器
train_net = CustomTrainOneStepCell(net_with_loss, opt)
# 设置网络为训练模式
train_net.set_train()

# 真正训练迭代过程
step = 0
epochs = 2
steps = train_dataset.get_dataset_size()
for epoch in range(epochs):
for d in train_dataset.create_dict_iterator():
result = train_net(d["data"], d["label"])
print(f"Epoch: [{epoch} / {epochs}], "
f"step: [{step} / {steps}], "
f"loss: {result}")
step = step + 1

Epoch: [0 / 2], step: [0 / 10], loss: 70.774574
Epoch: [0 / 2], step: [1 / 10], loss: 71.33737
Epoch: [0 / 2], step: [2 / 10], loss: 63.126896
Epoch: [0 / 2], step: [3 / 10], loss: 8.946123
Epoch: [0 / 2], step: [4 / 10], loss: 32.131054
Epoch: [0 / 2], step: [5 / 10], loss: 38.90644
Epoch: [0 / 2], step: [6 / 10], loss: 126.410255
Epoch: [0 / 2], step: [7 / 10], loss: 41.496185
Epoch: [0 / 2], step: [8 / 10], loss: 5.7309575
Epoch: [0 / 2], step: [9 / 10], loss: 16.104172
Epoch: [1 / 2], step: [10 / 10], loss: 26.39038
Epoch: [1 / 2], step: [11 / 10], loss: 52.73621
Epoch: [1 / 2], step: [12 / 10], loss: 38.053413
Epoch: [1 / 2], step: [13 / 10], loss: 4.555399
Epoch: [1 / 2], step: [14 / 10], loss: 1.8704597
Epoch: [1 / 2], step: [15 / 10], loss: 11.614007
Epoch: [1 / 2], step: [16 / 10], loss: 25.868422
Epoch: [1 / 2], step: [17 / 10], loss: 26.153322
Epoch: [1 / 2], step: [18 / 10], loss: 9.847598
Epoch: [1 / 2], step: [19 / 10], loss: 2.0711172


### 自定义评估网络

[11]:

class CustomWithEvalCell(nn.Cell):

def __init__(self, network):
super(CustomWithEvalCell, self).__init__(auto_prefix=False)
self.network = network

def construct(self, data, label1, label2):
"""输入数据为三个：一个数据及其对应的两个标签"""
outputs = self.network(data)
return outputs, label1, label2

custom_eval_net = CustomWithEvalCell(net)
custom_eval_net.set_train(False)

[11]:

CustomWithEvalCell<
(network): LinearNet<
(fc): Dense<input_channels=1, output_channels=1, has_bias=True>
>
>


## 网络的权重共享

[12]:

# 实例化前向网络
net = LinearNet()
# 设定损失函数并连接前向网络与损失函数
loss = nn.MSELoss()
net_with_loss = nn.WithLossCell(net, loss)
# 设定优化器

# 定义训练网络
train_net = nn.TrainOneStepCell(net_with_loss, opt)
train_net.set_train()

# 构建评估网络
eval_net = nn.WithEvalCell(net, loss)
eval_net.set_train(False)

[12]:

WithEvalCell<
(_network): LinearNet<
(fc): Dense<input_channels=1, output_channels=1, has_bias=True>
>
(_loss_fn): MSELoss<>
>


train_neteval_net均在net实例的基础上封装，因此在进行模型评估时，不需要加载train_net的权重。

[13]:

# 定义训练网络
train_net = nn.TrainOneStepCell(net_with_loss, opt)
train_net.set_train()

# 再次实例化前向网络
net2 = LinearNet()
# 构建评估网络
eval_net = nn.WithEvalCell(net2, loss)
eval_net.set_train(False)

[13]:

WithEvalCell<
(_network): LinearNet<
(fc): Dense<input_channels=1, output_channels=1, has_bias=True>
>
(_loss_fn): MSELoss<>
>