请教一下,为什么必须调用train_iter.reset()重置迭代器,才能在执行完一次for循环后继续读到数据?

请教一下,为什么必须调用train_iter.reset()重置迭代器,才能在执行完一次for循环后继续读到数据?书上例子里并没有调用train_iter.reset()

def train(net,train_iter,test_iter,batch_size,trainer,ctx,num_epochs):
print(‘training on’,ctx)
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
train_iter.reset()
for batch in train_iter:
X,y = batch.data[0].as_in_context(ctx), batch.label[0].as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat,y).sum()
l.backward()
trainer.step(batch_size)
y = y.astype(‘float32’)
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, ’
‘time %.1f sec’ % (epoch+1, train_l_sum/n, train_acc_sum/n, test_acc,
time.time()-start))