数据处理
import tensorflow as tfmnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
构建模型
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28, 28)),tf.keras.layers.Dense(128, activation='relu'),tf.keras.layers.Dense(64, activation='relu'),tf.keras.layers.Dense(10, activation='softmax')
])
训练模型
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10)
测试模型
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test accuracy:', test_acc)
import matplotlib.pyplot as plt
import numpy as nppredictions = model.predict(x_test)
indices = np.random.choice(range(len(x_test)), 10)predictions = model.predict(x_test)
fig, axs = plt.subplots(2,5, figsize=(20,8))
for i, ax in zip(indices, axs.flatten()):ax.imshow(x_test[i], cmap='gray')ax.set_title(f"Predicted label: {np.argmax(predictions[i])}")
plt.show()
Pytorch版本
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(28*28, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) def forward(self, x): x = x.view(-1, 28*28) x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = torch.softmax(self.fc3(x), dim=1) return x net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
for epoch in range(10): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() print(f'Epoch [{epoch+1}/10], Loss: {running_loss/len(trainloader):.4f}')
correct = 0
total = 0
with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy of the network on the 10000 test images: {100 * correct / total:.2f}%')
predictions = []
test_images, test_labels = next(iter(testloader))
with torch.no_grad(): test_outputs = net(test_images) _, predicted_labels = torch.max(test_outputs, 1) predictions.append(predicted_labels.numpy()) predictions = np.concatenate(predictions)
indices = np.random.choice(range(len(test_images)), 10) fig, axs = plt.subplots(2, 5, figsize=(20, 8))
for i, ax in zip(indices, axs.flatten()): ax.imshow(test_images[i].squeeze().numpy(), cmap='gray') ax.set_title(f"Predicted label: {predictions[i]}")
plt.show()