【邏輯回歸函式模型】
1、訓練資料繪圖
cd d:\study\ai\data\ex2
data = load('ex2data1.txt');
x = data(:, [1, 2]);
y = data(:, 3);
pos = find(y==1); % y取1的所有行
plot(x(pos, 1), x(pos, 2), 'k+','linewidth', 2, 'markersize', 7);
hold on;
neg = find(y == 0); % y取0的所有行
plot(x(neg, 1), x(neg, 2), 'ko', 'marke***cecolor', 'y', 'markersize', 7);
xlabel('exam 1 score');
ylabel('exam 2 score');
% specified in plot order
legend('admitted', 'not admitted');
2、計算損失函式和梯度
[m, n] = size(x);
x = [ones(m, 1) x];
initial_theta = zeros(n + 1, 1);
test_theta = [-24; 0.2; 0.2];
[cost, grad] = costfunction(test_theta, x, y);
fprintf('\ncost at test theta: %f\n', cost);
fprintf('gradient at test theta: \n');
fprintf(' %f \n', grad);
【正則化】
1、訓練資料繪圖
cd d:\study\ai\data\ex2
data = load('ex2data2.txt');
x = data(:, [1, 2]);
y = data(:, 3);
pos = find(y==1); % y取1的所有行
plot(x(pos, 1), x(pos, 2), 'k+','linewidth', 2, 'markersize', 7);
hold on;
neg = find(y == 0); % y取0的所有行
plot(x(neg, 1), x(neg, 2), 'ko', 'marke***cecolor', 'y', 'markersize', 7);
xlabel('exam 1 score');
ylabel('exam 2 score');
% specified in plot order
legend('admitted', 'not admitted');
2、正則化計算
% 多項式特徵對映
x = mapfeature(x(:,1), x(:,2));
initial_theta = zeros(size(x, 2), 1);
[cost, grad] = costfunctionreg(initial_theta, x, y, 1);
fprintf('cost at initial theta (zeros): %f\n', cost);
fprintf(' %f \n', grad);
test_theta = ones(size(x,2),1);
[cost, grad] = costfunctionreg(test_theta, x, y, 10);
fprintf('\ncost at test theta (with lambda = 10): %f\n', cost);
fprintf(' %f \n', grad);
【工具函式】
% logistic函式
function g = sigmoid(z)
g = zeros(size(z));
g = 1./(1 + exp(-z));
end;
% 損失函式和梯度
function [j, grad] = costfunction(theta, x, y)
m = length(y);
j = 0;
grad = zeros(size(theta));
% j(θ) = −y * log(g(x)) − (1 − y) * log(1 − g(x))
j = (-y'*log(sigmoid(x*theta)) - (1-y)'*(log(1-sigmoid(x*theta))))/m;
% ▽j(θ)= x′*(g(θ)-y)/m
grad = x'*(sigmoid(x*theta) - y)/m;
end;
% 正則約束損失函式和梯度
function [j, grad] = costfunctionreg(theta, x, y, lambda)
m = length(y);
grad = zeros(size(theta));
j = 1/m * (-y' * log(sigmoid(x*theta)) - (1 - y') * log(1 - sigmoid(x * theta))) + lambda/2/m*sum(theta(2:end).^2);
grad(1,:) = 1/m * (x(:, 1)' * (sigmoid(x*theta) - y));
grad(2:size(theta), :) = 1/m * (x(:, 2:size(theta))' * (sigmoid(x*theta) - y)) + lambda/m*theta(2:size(theta), :);
end;
% **函式
function p = predict(theta, x)
m = size(x, 1); % number of training examples
p = zeros(m, 1);
p = sigmoid(x * theta)>=0.5;
end;
% 多項式特徵對映
function out = mapfeature(x1, x2)
degree = 6;
out = ones(size(x1(:,1)));
for i = 1:degree
for j = 0:i
out(:, end+1) = (x1.^(i-j)).*(x2.^j);
endend
end;
% 決策邊界繪圖
function plotdecisionboundary(theta, x, y)
pos = find(y==1); % y取1的所有行
plot(x(pos, 2), x(pos, 3), 'k+','linewidth', 2, 'markersize', 7);
hold on;
neg = find(y == 0); % y取0的所有行
plot(x(neg, 2), x(neg, 3), 'ko', 'marke***cecolor', 'y', 'markersize', 7);
xlabel('exam 1 score');
ylabel('exam 2 score');
% specified in plot order
legend('admitted', 'not admitted');
hold on
% 線性一階決策邊界直接繪製曲線圖
if size(x, 2) <= 3
plot_x = [min(x(:,2))-2, max(x(:,2))+2];
plot_y = (-1./theta(3)).*(theta(2).*plot_x + theta(1));
plot(plot_x, plot_y)
legend('admitted', 'not admitted', 'decision boundary')
axis([30, 100, 30, 100])
else % 高階決策邊界通過等高線繪製z=0切線圖
u = linspace(-1, 1.5, 50);
v = linspace(-1, 1.5, 50);
z = zeros(length(u), length(v));
for i = 1:length(u)
for j = 1:length(v)
% 多項式特徵對映的結構和x完全一樣
z(i,j) = mapfeature(u(i), v(j))*theta;
endend
z = z';
contour(u, v, z, [0, 0], 'linewidth', 2)
endhold off
end;
2 邏輯回歸
coding utf 8 import torch import torch.nn as nn import matplotlib.pyplot as plt import numpy as np torch.manual seed 10 step 1 5 生成資料 sample nums 100 ...
CTR預估(2) 邏輯回歸
1 前面的知識基礎 關於ctr 常用的模型就是邏輯回歸,線性 可以直觀的反應出各個變數在 中的權重比較有利於運營部門,大約70 的模型都是採用邏輯回歸模型。首先就是從使用者資訊廣告資訊以及上下文資訊中提取出特徵來然後進行訓練。2 數學基礎 區域性最優解如何成為全域性最優解?對於凸函式來說以上問題成立...
02演算法梳理2 邏輯回歸
2.邏輯回歸的原理 3.邏輯回歸損失函式推導及優化 4.正則化與模型評估方法 5.邏輯回歸優缺點 6.樣本不均衡問題解決方案 7.sklearn引數 兩者都屬與廣義線性回歸模型。通過sigimoid函式,將線性線性轉化成非線性函式。數值越大越趨向於0,越小越趨向於1.在損失函式後加乙個正則化項,酒時...