请问这段代码哪里出问题了


#1

演示图像增广效果的函数
#%%
def show_imgs(imgs,num_rows,num_cols,scale=2):
figsize=(num_rowsscale,num_colsscale)
,axes=plt.subplots(num_rows,num_cols,figsize=figsize)
for i in range(num_rows):
for j in range(num_cols):
axes[i][j].imshow(imgs[i*num_cols+j].asnumpy())
axes[i][j].axes.get_xaxis().set_visible(False)
axes[i][j].axes.get_yaxis().set_visible(False)
return axes
#%%
def apply(img,aug,num_rows=2,num_cols=4,scale=2):
Y=[aug(img) for
in range(num_rows*num_cols)]
show_imgs(Y,num_rows,num_cols,scale)
#%% md
左右旋转与上下旋转
#%%
apply(img,gdata.vision.transforms.RandomFlipLeftRight())
#%%
apply(img,gdata.vision.transforms.RandomFlipTopBottom())
#%%
shape_aug=gdata.vision.transforms.RandomResizedCrop(size=(250,180),scale=(0.2,0.8),ratio=(0.3,2))
apply(img,shape_aug)
#%%
show_imgs(gdata.vision.CIFAR10(train=True)[0:32][0],4,8,scale=0.8)
#%%
filp_aug=gdata.vision.transforms.Compose([gdata.vision.transforms.RandomFlipLeftRight(),
gdata.vision.transforms.RandomFlipTopBottom(),
gdata.vision.transforms.ToTensor()])
no_aug=gdata.vision.transforms.Compose([gdata.vision.transforms.ToTensor()])
#%%
def load_cifar10(is_train,augs,batch_size):
return gdata.DataLoader(gdata.vision.CIFAR10(train=is_train).transform_first(augs),batch_size=batch_size,
shuffle=is_train)
#%%

def train(net,train_data,test_data,learning_rate,num_epochs=10):
net.initialize(init.Xavier(),ctx=mx.gpu())
loss=gloss.SoftmaxCrossEntropyLoss()
trainer=gluon.Trainer(net.collect_params(),“Adam”,{“learning_rate”:learning_rate})

for i in range(num_epochs):
ac,n,my_l,b,start=0,0,0,0,time.time()
for X,y in train_data:
if X.dtype!=y.dtype:
y=y.astype(X.dtype)
X=X.copyto(mx.gpu())
y=y.copyto(mx.gpu())
with autograd.record():
l=loss(net(X),y)
l.backward()
trainer.step(X.shape[0])
accaury=mx.metric.Accuracy()
accaury.update(labels=y,preds=nd.softmax(net(X),axis=1))
ac+=accaury.get()[1]
my_l+=l.sum().asscalar()
b+=y.size
n+=1

train_ac=ac/n
train_loss=my_l/b
for X,y in test_data:
if X.dtype!=y.dtype:
y=y.astype(X.dtype)
X=X.copyto(mx.gpu())
y=y.copyto(mx.gpu())
accaury=mx.metric.Accuracy()
accaury.update(labels=y,preds=nd.softmax(net(X),axis=1))
test_acc=accaury.get()[1]
if (i+1)%5==0:
print(“epoch:{0},loss{1:4f},train acc:{2:4f},test_acc:{3:4f},time:{4:4f}”.format(i+1,train_loss,train_ac,test_acc,time.time()-start))
#%%
def train_with_data_aug(train_augs,test_augs,lr=0.001):
batchsize,net=256,d2l.resnet18(10)

train_iter=load_cifar10(True,train_augs,batchsize)
test_iter=load_cifar10(False,test_augs,batch_size=1)
train(net,train_iter,test_iter,lr)

#%%
train_with_data_aug(filp_aug,no_aug)

最后训练的结果是
epoch:5,loss0.619622,train acc:0.749310,test_acc:1.000000,time:90.563104
epoch:10,loss0.355833,train acc:0.868495,test_acc:1.000000,time:90.222275
为什么测试精度一直是100%