以下代码可直接运行,注意注释
从零实现线性回归
import torch as t
from matplotlib import pyplot as plt
import numpy as np
import random
num_input = 2
num_example = 1000
true_w = [2,-3.4]
true_b = 4.2
features = t.randn(num_example,num_input,dtype = t.float32)
labels = true_w[0]*features[:,0]+true_w[1]*features[:,1]+true_b # labels也是一个tensor
labels += t.tensor(np.random.normal(0,0.01,size=labels.size()))
# plt.scatter(feature[:,0].numpy(),labels.numpy(),1) # 参数为x,y。1表示点的大小。
# plt.scatter(feature[:,1].numpy(),labels.numpy(),1)
# plt.show()
def data_iter(batch_size,features,labels):
num_examples = len(features)
indices = list(range(num_example))
random.shuffle(indices)
for i in range(0,num_examples,batch_size):
j = t.LongTensor(indices[i:min(i+batch_size,num_examples)])
yield features.index_select(0,j),labels.index_select(0,j)
batch_size = 10
# for x,y in data_iter(batch_size,features,labels):
# print(x,y)
# 随机初始化参数
w = t.tensor(np.random.normal(0,0.01,(num_input,1)),dtype=t.float32)
b = t.tensor(1,dtype=t.float32)
# 确认可导,我们求的是w和b的导数
w.requires_grad_(True)
b.requires_grad_(True)
# 定义模型(这里就是那个公式)
def linreg(X,w,b):
return t.mm(X,w)+b
# 定义损失函数
def squared_loss(y_hat, y):
return (y_hat-y.view(y_hat.size()))**2/2
# 定义优化函数
def sgd(params,lr,batch_size):
for param in params:
param.data -= lr*param.grad/batch_size
# 超参数初始化
lr = 0.03
num_epochs = 5
net = linreg
loss = squared_loss
# 训练
for epoch in range(num_epochs):
for X,y in data_iter(batch_size,features,labels):
# 计算损失
l = loss(net(X,w,b),y).sum()
# 计算梯度
l.backward()
# 优化,更新参数
sgd([w,b],lr,batch_size)
# 重置梯度
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features,w,b),labels) # 是一个tensor,含所有样本的loss
print("epoch: %d , loss: %f"%(epoch+1,train_l.mean().item()))
# 训练出来的参数和实际参数对比
print(w,true_w,b,true_b)
输出:
epoch: 1 , loss: 0.029120
epoch: 2 , loss: 0.000124
epoch: 3 , loss: 0.000052
epoch: 4 , loss: 0.000052
epoch: 5 , loss: 0.000052
tensor([[ 1.9999],
[-3.4001]], requires_grad=True) [2, -3.4] tensor(4.1997, requires_grad=True) 4.2
以上程序的一些解释:
1、plt.scatter函数的第三个参数表示点的大小
2、torch.LongTensor表示64bit也就是8字节的有符号数据。 参考 https://pytorch-cn.readthedocs.io/zh/latest/package_references/Tensor/
3、index_select(1, indices)
1代表维度1,即列,indices是筛选的索引序号。 https://blog.csdn.net/jacke121/article/details/83044660
4、np.random.normal https://blog.csdn.net/qq_37701443/article/details/82797944
用pytorch的简洁实现线性回归
import torch as t
import numpy as np
from matplotlib import pyplot as plt
from torch import nn
t.set_default_tensor_type("torch.FloatTensor")
# 生成数据集
num_examples = 1000
num_inputs = 2
true_w = [2,-3.4]
true_b = 4.2
features = t.tensor(np.random.normal(0,1,size = (num_examples,num_inputs)),dtype = t.float)
labels = true_w[0]*features[:,0] + true_w[1]*features[:,1] + true_b
labels += t.tensor(np.random.normal(0,0.01,size = labels.size()),dtype = t.float)
# plt.scatter(features[:,1],labels,1)
# plt.show()
# 读取数据集
import torch.utils.data as Data
batch_size = 10
# combine数据集中的特征和标签
dataset = Data.TensorDataset(features,labels)
# 把数据集放入DataLoader中
data_iter = Data.DataLoader(dataset = dataset,batch_size = batch_size,shuffle = True)
# for X,y in data_iter:
# print(X,"\n",y)
# break
# 下面的方式一,方式二,方式三选一种即可
# 方式一
net = nn.Sequential(
nn.Linear(num_inputs,1) # 这里可以继续添加层数
)
# 方式二
# net = nn.Sequential()
# net.add_module("linear",nn.Linear(num_inputs,1)) # 可以继续用add_module来添加层
# 方式三
# from collections import OrderedDict
# net = nn.Sequential(OrderedDict([
# ("linear",nn.Linear(num_inputs,1)) # 这里可以继续添加层
# ]))
print(net)
print(net[0])
# 初始化模型参数
from torch.nn import init
init.normal_(net[0].weight,mean=0.0,std=0.01)
init.constant_(net[0].bias,val=0) # 也可以用net[0].bias.data.fill_(0)
# for param in net.parameters():
# print(param)
# 定义损失函数
loss = nn.MSELoss()
# 定义优化函数
from torch import optim
optimizer = optim.SGD(net.parameters(),lr=0.03)
# print(optimizer)
# 训练
num_epoch=3
for epoch in range(num_epoch):
for X,y in data_iter:
output = net(X)
l = loss(output,y.view(-1,1)) # y本身是一个list的tensor,通过y.view(-1,1)后得到多行1列,行数为样本数
optimizer.zero_grad()
l.backward()
optimizer.step()
print("epoch:%d , loss:%f" % (epoch+1,l.item())) # l.item()是把tensor转换为实数
dense = net[0]
print(true_w,dense.weight.data)
print(true_b,dense.bias.data)
输出:
Sequential(
(0): Linear(in_features=2, out_features=1, bias=True)
)
Linear(in_features=2, out_features=1, bias=True)
epoch:1 , loss:0.000438
epoch:2 , loss:0.000039
epoch:3 , loss:0.000112
[2, -3.4] tensor([[ 1.9991, -3.4000]])
4.2 tensor([4.1991])
其他题目:
在刚开始训练时,训练数据集上的准确率低于测试数据集上的准确率,原因是:
训练集上的准确率是在一个epoch的过程中计算得到的,测试集上的准确率是在一个epoch结束后计算得到的,后者的模型参数更优。
在多层感知机中引入激活函数的原因是,将多个无激活函数的线性层叠加起来,其表达能力与单个线性层相同。
来源:CSDN
作者:人工智能小白菜
链接:https://blog.csdn.net/zgj_gutou/article/details/104319645