r>    out = self.max_pooling2(out)

    out = out.view(batchsize, -1)

    out = self.fc(out)

    out = F.log_softmax(out, dim=1)

    return out

    class TrainingDataSet(Dataset):

    def __init__(self):

    super(TrainingDataSet, self).__init__()

    self.data_dict_X = X_train

    self.data_dict_y = y_train

    def __getitem__(self, index):

    t = self.data_dict_X[index, 0:36]

    t = torch.tensor(t).view(6, 6)

    return t, self.data_dict_y[index]

    def __len__(self):

    return len(self.data_dict_y)

    class TestDataSet(Dataset):

    def __init__(self):

    super(TestDataSet, self).__init__()

    self.data_dict_X = X_validate

    self.data_dict_y = y_validate

    def __getitem__(self, index):

    t = self.data_dict_X[index, 0:36]

    t = torch.tensor(t).view(6, 6)

    return t, self.data_dict_y[index]

    def __len__(self):

    return len(self.data_dict_y)

    def cnn_classification():

    batch_size = 256

    trainDataLoader = DataLoader(TrainingDataSet(), batch_size=batch_size, shuffle=False)

    testDataLoader = DataLoader(TestDataSet(), batch_size=batch_size, shuffle=False)

    epoch_num = 200

    #lr = 0.001

    lr = 0.001

    net = VGGBaseSimpleS2().to(device)

    print(net)

    # loss

    loss_func = nn.CrossEntropyLoss()

    # optimizer

    optimizer = torch.optim.Adam(net.parameters(), lr=lr-->>

本章未完,点击下一页继续阅读