pytorch构建模型
构建模型过程
- 设计自己的模型类
class Net(nn.Module): def __init__(self): super(Net,self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16*5*5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self,x): x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) x = F.max_pool2d(F.relu(self.conv2(x)),2) x = x.view(x.size()[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net()
- 构建loss和optimizer
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数 optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
- 训练循环(前向,后向,更新)
def train(epoch): net.train() running_loss = 0.0 for batch_idx,(data,target) in enumerate(trainloader): data, target = Variable(data),Variable(target) optimizer.zero_grad() output = net(data) loss = criterion(output,target) loss.backward() optimizer.step() running_loss += loss.data[0] if batch_idx % 2000 == 1999: print('Train Epoch: {} [{}]\tLoss: {:.3f}'.format( epoch, batch_idx + 1, running_loss / 2000)) running_loss = 0.0
- 验证
def evaluate(data_loader): model.eval() loss = 0 correct = 0 for data, target in data_loader: data, target = Variable(data, volatile=True), Variable(target) if torch.cuda.is_available(): data = data.cuda() target = target.cuda() output = model(data) loss += F.cross_entropy(output, target, size_average=False).data[0] pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() loss /= len(data_loader.dataset) print('\nAverage loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(loss, correct, len(data_loader.dataset),100. * correct /len(data_loader.dataset)))