def synthetic_data(w , b , num_examples) : #
"""生成y = X * w + b + noise"""
X = torch.normal(mean=0 , std=1 , size=(num_examples , len(w)) )#生成正态分布
y = torch.matmul(X , w) + b # matmul矩阵乘法 mv只支持2维张量和一维张量
y += torch.normal(0 , 0.01 , y.shape) #误差
return X , y.reshape((-1,1))
feartures: tensor([0.2132, 0.3922])
normal: tensor([3.2994])
def data_iter(batch_size , features , labels) :
num_examples = len(features)
indice = list(range(num_examples))
random.shuffle(indice)
for i in range(0 , num_examples , batch_size) :
batch_indice = torch.tensor(indice[i:min(i + batch_size , num_examples)])
yield features[batch_indice] , labels[batch_indice]
features , labelss = synthetic_data(true_w ,true_b , 1000)
for x , y in data_iter(batch_size , features , labelss) :
print(x , '\n' , y)
break
# 示例模型
class LinearRegressionModel(nn.Module) :
def __init__(self , in_dim , out_dim) :
super(LinearRegressionModel , self).__init__() # 继承
self.linear = nn.Linear(in_dim , out_dim)# 输入维度和输出维度 输入进(batch_size , in_dim )输出(batch_size ,out_dim)
def forward(self , x) :
out = self.linear(x)
return out
def forward(self , x) :
out = self.linear(x)
return out
for epoch in range(num_epochs) :
for X , y in data_iter(batch_size , features , labelss) :
l = loss(net(X , w , b) , y) # 计算损失函数 最后l是一个(batch_size , 1)的二维张量,
l.sum().backward()#这里的sum其实就是变相的降维
sgd([w,b] , lr , batch_size) #梯度下降法更新梯度,使得损失函数最小
with torch.no_grad():
train_l = loss(net(features , w , b) , labelss)
print(f'epoch {epoch + 1} , loss {float(train_l.mean()): f}')