一定不要对 loss重新 变形 最好的就是 loss从 损失函数出来,直接进入 反向传播

loss=criterion(outputs.float32(),targets.float32())
print("loss:",loss)

loss.backward()
optimizer.step()

RuntimeError: expected dtype Float but got dtype Long (validate_dtype at ..\aten\src\ATen\native\TensorIterator.cpp:143)

(no backtrace available)

 loss=criterion(outputs.float(),targets.float())

.long() .float() .double()

val_mape_loss=[]
val_mse_loss=[]
train_loss=[]
for epoch in tqdm(range(epochs)):
train_epoch_loss=[]
for i in range(0,1,1):# len(train_x)
# optimizer.zero_grad()
inputs=train_x[i]
targets=train_y[i]
inputs = torch.tensor(inputs).to(device)
targets = torch.tensor(targets).to(device)
inputs= inputs.unsqueeze(0).transpose(0,1)
outputs=model(inputs)
# print(outputs.shape)
# print(targets.shape)
loss=criterion(outputs.float(),targets.float())
print("loss:",loss)
# loss=torch.tensor(loss.item(),requires_grad=True)
# mape_loss=mape(targets.cpu().detach().numpy().reshape(1,-1),outputs.cpu().detach().numpy().reshape(1,-1))
# print("mape_loss",mape_loss)
loss.backward()
optimizer.step()
train_epoch_loss.append(loss.item())
test_mse_loss,test_mape_loss=test(model)

val_mse_loss.append(test_mse_loss)
val_mape_loss.append(test_mape_loss)
train_loss.append(np.mean(np.array(train_epoch_loss)))
print("np.mean(np.array(train_epoch_loss))",np.mean(np.array(train_epoch_loss))," test_mse_loss",test_mse_loss,"----------")

loss出现nan一定要进行归一化

>>> from sklearn import preprocessing
>>> import numpy as np
>>> X = np.array([[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]])
>>> X_scaled = preprocessing.scale(X)

>>> X_scaled
array([[ 0. ..., -1.22..., 1.33...],
[ 1.22..., 0. ..., -0.26...],
[-1.22..., 1.22..., -1.06...]])