1.手工实现

import torch
from matplotlib import pyplot as plt

# 损失率:
learn_rate = 0.1
# 训练数据
x = torch.rand([500,1])
y = 3*x + 0.8

# 参数
w = torch.rand([1,1],requires_grad=True)
b = torch.tensor(0,requires_grad=True,dtype=torch.float32)

for i in range(500):
# 预测值
y_predict = torch.matmul(x,w) + b
# 算出标准差
loss = (y-y_predict).pow(2).mean()
# 调用backward()函数之前都要将梯度清零,因为如果梯度不清零,pytorch中会将上次计算的梯度和本次计算的梯度累加
if w.grad is not None:
w.grad.data.zero_()
if b.grad is not None:
b.grad.data.zero_()
# 反向传播,更新参数
loss.backward()
w.data = w.data- learn_rate * w.grad
b.data = b.data - learn_rate * b.grad
# 画图
plt.figure(figsize=(20,8))
plt.scatter(x.numpy().reshape(-1),y.numpy().reshape(-1))
plt.plot(x.numpy().reshape(-1),y_predict.detach().numpy().reshape(-1),c="red")

2.torch api实现

import torch
from torch import nn
from torch import optim
from matplotlib import pyplot as plt
import numpy as np

# 准备数据
x = torch.rand([50,1])
y = 3*x + 0.8

# 定义模型
class Lr(nn.Module):
def __init__(self):
super(Lr,self).__init__()
self.linear = nn.Linear(1,1)
def forward(self,x):
out = self.linear(x)
return out
# 实例化模型,loss,优化器
model = Lr()
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(),lr=1e-3)
# 模型训练
for i in range(30000):
out = model(x) # 预测值
loss = criterion(y,out) # 损失
optimizer.zero_grad() # 梯度归零
loss.backward() # 计算梯度
optimizer.step() # 更新梯度

model.eval() # 设置模型为评估模式,即预测模式
predict = model(x)
predict = predict.data.numpy()
plt.scatter(x.data.numpy(),y.data.numpy(),c="r")
plt.plot(x.data.numpy(),predict)
plt.show()