import numpy as np
import matplotlib.pyplot as plt
from torch import nn,optim
from torch.autograd import Variable
import torch
x_data = np.linspace(-2,2,200)[:,np.newaxis]
noise = np.random.normal(0,0.2,x_data.shape)
y_data = np.square(x_data) + noise
#plt.scatter(x_data,y_data)
#plt.show()
x_data = x_data.reshape(-1,1)
y_data = y_data.reshape(-1,1)
#把numpy转换成tensor
x_data = torch.FloatTensor(x_data)
y_data = torch.FloatTensor(y_data)
inputs = Variable(x_data)
target = Variable(y_data)
#构造神经网络模型
#一般把网络中具有可学习参数的层放在__init__()中
class LinearRegression(nn.Module):
#定义网络结构
def __init__(self):
#初始化nn.Module
super(LinearRegression,self).__init__()
#1 - 10 - 1
self.fc1 = nn.Linear(1,10) #输入一个神经元,输出一个神经元
self.tanh = nn.Tanh()
self.fc2 = nn.Linear(10,1)
#定义网络计算
def forward(self,x):
x = self.fc1(x)
x = self.tanh(x)
x = self.fc2(x)
return x
#定义模型
model = LinearRegression()
#定义代价函数
mse_loss = nn.MSELoss() #均方差
#定义优化器
optimizer = optim.SGD(model.parameters(),lr=0.3)
for name,parameters in model.named_parameters():
print('name:{},param{}'.format(name,parameters))
for i in range(2001):
out = model(inputs)
#计算loss
loss = mse_loss(out,target)
#梯度清0
optimizer.zero_grad()
#计算梯度
loss.backward()
#修改权值
optimizer.step()
if i % 200 == 0:
print(i, loss.item())
y_pred = model(inputs)
plt.scatter(x_data, y_data)
plt.plot(x_data, y_pred.data.numpy(), 'r-', lw=3)
plt.show()
输出的结果
name:fc1.weight,paramParameter containing:
tensor([[-0.8655],
[ 0.5277],
[ 0.4221],
[-0.2834],
[ 0.9853],
[-0.0767],
[-0.8628],
[-0.0492],
[ 0.6841],
[ 0.2099]], requires_grad=True)
name:fc1.bias,paramParameter containing:
tensor([-0.7933, 0.5301, 0.0711, -0.8077, 0.5706, -0.4823, 0.9381, 0.7663,
-0.3518, -0.7270], requires_grad=True)
name:fc2.weight,paramParameter containing:
tensor([[-0.1524, -0.0411, 0.0731, -0.3065, -0.1577, -0.0627, -0.0596, -0.2775,
0.0461, 0.1572]], requires_grad=True)
name:fc2.bias,paramParameter containing:
tensor([-0.1086], requires_grad=True)
0 3.856107711791992
200 0.2011813372373581
400 0.09159507602453232
600 0.06196943297982216
800 0.05489378049969673
1000 0.05240625888109207
1200 0.05162370577454567
1400 0.05135105550289154
1600 0.05146040394902229
1800 0.051464591175317764
2000 0.05113458260893822
Process finished with exit code 0