基本原理
降维基本原理
备注:读文档和基本架构能力很重要,具备此技能能够拥有强的泛化能力
上课代码
import torch
import numpy as np
import torch.nn.functional as F
import matplotlib.pyplot as plt
from sklearn import datasets
xy = np.loadtxt('diabetes.csv.gz',delimiter = ',',dtype = np.float32) #以","为分隔符,加载训练集
x_data = torch.from_numpy(xy[:,:-1]) #取前八列
y_data = torch.from_numpy(xy[:,[-1]]) #取最后一列,拿出的是一个矩阵
test = np.loadtxt('./data/Diabetes_class.csv.gz',delimiter = ',',dtype = np.float32) #加载测试集
test_x = torch.from_numpy(test)
class Model(torch.nn.Module):
def __init__(self): #构造函数
super(Model,self).__init__()
#进行降维
self.linear1 = torch.nn.Linear(8,6)
self.linear1 = torch.nn.Linear(6,4)
self.linear1 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid() #改变激活函数
def forward(self,x):
x = self.sigmoid(self.linear1(x)) #加入逻辑变换
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x #全程只用一个x
model = Model() #模型实例化
criterion = torch.nn.BCELoss(size_average = False) #将原来MSE变为BCE
optimizer = torch.optim.SGD(model.parameters(),lr = 0.01) #优化器
for epoch in range(1000): #training cycle
#forward
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
print(epoch,loss.item())
#backward
optimizer.zero_grad()
loss.backward()
#update
optimizer.step()
y_pred = model(x_data)
print(y_pred.detach().numpy())
y_pred2 = model(test_x)
print(y_pred2.data.item())