其他
PyTorch 教程-卷积神经网络模型测试
在上一节中,我们实现了一个神经网络或创建了一个模型,用于对手写数字进行分类。现在,我们通过从网络中获取一张图像来测试我们的模型。我们使用了以下图像:
在看到这个图像后,我们会意识到这是数字5。现在,我们将尝试让我们的网络对其进行预测。
我们有以下步骤来预测数字图像:
步骤1:
import requests
url=' http://calstormbasketball.com/wp-content/uploads/2018/08/5020657994731_01c.jpeg '
步骤2:
response=requests.get(url,stream=True)
步骤3:
from PIL import Image
img=Image.open(response.raw)
plt.imshow(img)
plt.show()
当我们运行它时,它将生成一个错误,因为PIL。我们必须首先安装pillow才能运行此代码。我们必须在anaconda命令提示符上运行conda install -c anaconda pillow命令来安装pillow。
运行代码后,将得到预期的输出。
步骤4:
我们需要确保图像与神经网络训练学习的内容相对应。我们的图像是1000 * 1000像素,因此我们需要将其转换为28 * 28的灰度图像,就像训练数据中的图像一样。在我们训练的图像数据集中,图像具有黑色背景和白色前景,在上面的图像中,有一个白色背景和黑色前景。现在,我们的第一个任务是对这个图像进行预处理。
img=PIL.ImageOps.invert(img)
img=img.convert('1')
transform1=transforms.Compose([transforms.Resize((28,28)),transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
img=transform1(img)
import PIL.ImageOps
plt.imshow(im_convert(img))
步骤5:
image=img.to(device)
image=image[0].unsqueeze().unsqueeze(0)
output=model(image)
_,pred=torch.max(output,1)
print(pred.item())
这将给出我们期望的预测。
步骤6:
dataiter=iter(validation_loader)
images,labels=dataiter.next()
images_=images.to(device)
labels=labels.to(device)
output=model(images_)
_,preds=torch.max(output,1)
步骤7:
fig=plt.figure(figsize=(25,4))
ax=fig.add_subplot(2,10,idx+1)
plt.imshow(im_convert(images[idx]))
ax.set_title("{}({})".format(str(preds[idx].item()),str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
image=tensor.cpu().clone().detach().numpy()
现在,我们将重新调用plt.show(),它将给出我们期望的输出。
完整代码:
import torch
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as func
import PIL.ImageOps
from torch import nn
from torchvision import datasets,transforms
import requests
from PIL import Image
transform1=transforms.Compose([transforms.Resize((28,28)),transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
training_dataset=datasets.MNIST(root='./data',train=True,download=True,transform=transform1)
validation_dataset=datasets.MNIST(root='./data',train=False,download=True,transform=transform1)
training_loader=torch.utils.data.DataLoader(dataset=training_dataset,batch_size=100,shuffle=True)
validation_loader=torch.utils.data.DataLoader(dataset=validation_dataset,batch_size=100,shuffle=False)
def im_convert(tensor):
image=tensor.cpu().clone().detach().numpy()
image=image.transpose(1,2,0)
print(image.shape)
image=image*(np.array((0.5,0.5,0.5))+np.array((0.5,0.5,0.5)))
image=image.clip(0,1)
return image
dataiter=iter(training_loader)
images,labels=dataiter.next()
fig=plt.figure(figsize=(25,4))
for idx in np.arange(20):
ax=fig.add_subplot(2,10,idx+1)
plt.imshow(im_convert(images[idx]))
ax.set_title([labels[idx].item()])class classification1(nn.Module):
def __init__(self,input_layer,hidden_layer1,hidden_layer2,output_layer):
super().__init__()
self.linear1=nn.Linear(input_layer,hidden_layer1)
self.linear2=nn.Linear(hidden_layer1,hidden_layer2)
self.linear3=nn.Linear(hidden_layer2,output_layer)
def forward(self,x):
x=func.relu(self.linear1(x))
x=func.relu(self.linear2(x))
x=self.linear3(x)
return x
model=classification1(784,125,65,10)
criteron=nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)
epochs=12
loss_history=[]
correct_history=[]
val_loss_history=[]
val_correct_history=[]
for e in range(epochs):
loss=0.0
correct=0.0
val_loss=0.0
val_correct=0.0
for input,labels in training_loader:
inputs=input.view(input.shape[0],-1)
outputs=model(inputs)
loss1=criteron(outputs,labels)
optimizer.zero_grad()
loss1.backward()
optimizer.step()
_,preds=torch.max(outputs,1)
loss+=loss1.item()
correct+=torch.sum(preds==labels.data)
else:
with torch.no_grad():
for val_input,val_labels in validation_loader:
val_inputs=val_input.view(val_input.shape[0],-1)
val_outputs=model(val_inputs)
val_loss1=criteron(val_outputs,val_labels)
_,val_preds=torch.max(val_outputs,1)
val_loss+=val_loss1.item()
val_correct+=torch.sum(val_preds==val_labels.data)
epoch_loss=loss/len(training_loader.dataset)
epoch_acc=correct.float()/len(training_dataset)
loss_history.append(epoch_loss)
correct_history.append(epoch_acc)
val_epoch_loss=val_loss/len(validation_loader.dataset)
val_epoch_acc=val_correct.float()/len(validation_dataset)
val_loss_history.append(val_epoch_loss)
val_correct_history.append(val_epoch_acc)
print('training_loss:{:.4f},{:.4f}'.format(epoch_loss,epoch_acc.item()))
print('validation_loss:{:.4f},{:.4f}'.format(val_epoch_loss,val_epoch_acc.item()))
url='http://calstormbasketball.com/wp-content/uploads/2018/08/5020657994731_01c.jpeg'
response=requests.get(url,stream=True)
img=Image.open(response.raw)
img=PIL.ImageOps.invert(img)
img=img.convert('1')
img=transform1(img)
plt.imshow(im_convert(img))
images=img.to(device)
images=images[0].unsqueeze(0).unsqueeze(0)
output=model(images)
_,pred=torch.max(output,1)
print(pred.item())
dataiter=iter(validation_loader)
images,labels=dataiter.next()
images_=images.to(device)
labels=labels.to(device)
output=model(images_)
_,preds=torch.max(output,1)
fig=plt.figure(figsize=(25,4))
for idx in np.arange(20):
ax=fig.add_subplot(2,10,idx+1,xticks=[],yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{}({})".format(str(preds[idx].item()),str(labels[idx].item())),color=("green" if preds[idx]==labels[idx] else "red"))
plt.show()