numpy實現dropout與l1,l2正則化請參考我另一篇部落格
pytorch使用dropout與l2
import torch
import matplotlib.pyplot as plt
torch.manual_seed(1) # sets the seed for generating random numbers.reproducible
n_samples = 20
n_hidden = 300
# training data
x = torch.unsqueeze(torch.linspace(-1, 1, n_samples), 1)
print('x.size()',x.size())
# torch.normal(mean, std, out=none) → tensor
y = x + 0.3*torch.normal(torch.zeros(n_samples, 1), torch.ones(n_samples, 1))
print(y.shape)
print(y)
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, n_samples), 1)
test_y = test_x + 0.3*torch.normal(torch.zeros(n_samples, 1), torch.ones(n_samples, 1))
# show data
plt.scatter(x.numpy(), y.numpy(), c='red', s=50, alpha=0.5, label='train')
plt.scatter(test_x.numpy(), test_y.numpy(), c='blue', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
net_overfitting = torch.nn.sequential(
torch.nn.linear(1,n_hidden),
torch.nn.relu(),
torch.nn.linear(n_hidden,n_hidden),
torch.nn.relu(),
torch.nn.linear(n_hidden,1),
)net_dropped = torch.nn.sequential(
torch.nn.linear(1,n_hidden),
torch.nn.dropout(0.5), # 0.5的概率失活
torch.nn.relu(),
torch.nn.linear(n_hidden,n_hidden),
torch.nn.dropout(0.5),
torch.nn.relu(),
torch.nn.linear(n_hidden,1),
)#no dropout
optimizer_ofit = torch.optim.adam(net_overfitting.parameters(), lr=0.001)
#add dropout
optimizer_drop = torch.optim.adam(net_dropped.parameters(), lr=0.01)
#add l2 penalty weight_decay
# optimizer_ofit = torch.optim.adam(net_overfitting.parameters(), lr=0.001,weight_decay=0.001)
loss = torch.nn.mseloss()
for epoch in range(500):
pred_ofit = net_overfitting(x)
loss_ofit = loss(pred_ofit, y)
optimizer_ofit.zero_grad()
loss_ofit.backward()
optimizer_ofit.step()
#drop out
pred_drop = net_dropped(x)
loss_drop = loss(pred_drop, y)
optimizer_drop.zero_grad()
loss_drop.backward()
optimizer_drop.step()
if epoch % 250 == 0:
net_overfitting.eval() # 將神經網路轉換成測試形式,此時不會對神經網路dropout
net_dropped.eval() # 此時不會對神經網路dropout
test_pred_ofit = net_overfitting(test_x)
test_pred_drop = net_dropped(test_x)
# show data
plt.scatter(x.numpy(), y.numpy(), c='red', s=50, alpha=0.5, label='train')
plt.scatter(test_x.numpy(), test_y.numpy(), c='blue', s=50, alpha=0.5, label='test')
plt.plot(test_x.numpy(), test_pred_ofit.detach().numpy(), 'r-', lw=3, label='overfitting')
plt.plot(test_x.numpy(), test_pred_drop.detach().numpy(), 'b--', lw=3, label='l2')
plt.text(0, -1.2, 'overfitting loss=%.4f' % loss(test_pred_ofit, test_y).detach().numpy(),
fontdict=)
plt.text(0, -1.5, 'l2 loss=%.4f' % loss(test_pred_drop, test_y).detach().numpy(),
fontdict=)
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.pause(0.1)
net_overfitting.train()
net_dropped.train()
plt.ioff()
plt.show()
資料:
使用dropout對比:可看出使用dropout具有防止過擬合的作用。
使用l2對比:可看出使用l2也具有防止過擬合作用。
Pytorch 通過pytorch實現線性回歸
linear regression 線性回歸是分析乙個變數與另外乙個 多個 變數之間關係的方法 因變數 y 自變數 x 關係 線性 y wx b 分析 求解w,b 求解步驟 1.確定模型 2.選擇損失函式 3.求解梯度並更新w,b 此題 1.model y wx b 下為 實現 import tor...
pytorch實現分類
完整 實現分類 import torch import torch.nn.functional as f from torch.autograd import variable import matplotlib.pyplot as plt import torch.optim as optim 生...
TensorRT優化過程中的dropout問題
使用tensorrt之前,你一定要注意你的網路結構是否能夠得到trt的支援,無論是cnn還是rnn都會有trt的操作。例如 tf.nn.dropout features,keep prob trt就不支援。這個也不奇怪,因為trt在要求輸入中,只要你傳入樣本資料,那你就不能feed乙個數值,所以以後...