久久福利_99r_国产日韩在线视频_直接看av的网站_中文欧美日韩_久久一

您的位置:首頁技術文章
文章詳情頁

python 牛頓法實現邏輯回歸(Logistic Regression)

瀏覽:2日期:2022-07-08 11:01:21

本文采用的訓練方法是牛頓法(Newton Method)。

代碼

import numpy as npclass LogisticRegression(object): ''' Logistic Regression Classifier training by Newton Method ''' def __init__(self, error: float = 0.7, max_epoch: int = 100): ''' :param error: float, if the distance between new weight and old weight is less than error, the process of traing will break. :param max_epoch: if training epoch >= max_epoch the processof traing will break. ''' self.error = error self.max_epoch = max_epoch self.weight = None self.sign = np.vectorize(lambda x: 1 if x >= 0.5 else 0) def p_func(self, X_): '''Get P(y=1 | x) :param X_: shape = (n_samples + 1, n_features) :return: shape = (n_samples) ''' tmp = np.exp(self.weight @ X_.T) return tmp / (1 + tmp) def diff(self, X_, y, p): '''Get derivative :param X_: shape = (n_samples, n_features + 1) :param y: shape = (n_samples) :param p: shape = (n_samples) P(y=1 | x) :return: shape = (n_features + 1) first derivative ''' return -(y - p) @ X_ def hess_mat(self, X_, p): '''Get Hessian Matrix :param p: shape = (n_samples) P(y=1 | x) :return: shape = (n_features + 1, n_features + 1) second derivative ''' hess = np.zeros((X_.shape[1], X_.shape[1])) for i in range(X_.shape[0]): hess += self.X_XT[i] * p[i] * (1 - p[i]) return hess def newton_method(self, X_, y): '''Newton Method to calculate weight :param X_: shape = (n_samples + 1, n_features) :param y: shape = (n_samples) :return: None ''' self.weight = np.ones(X_.shape[1]) self.X_XT = [] for i in range(X_.shape[0]): t = X_[i, :].reshape((-1, 1)) self.X_XT.append(t @ t.T) for _ in range(self.max_epoch): p = self.p_func(X_) diff = self.diff(X_, y, p) hess = self.hess_mat(X_, p) new_weight = self.weight - (np.linalg.inv(hess) @ diff.reshape((-1, 1))).flatten() if np.linalg.norm(new_weight - self.weight) <= self.error: break self.weight = new_weight def fit(self, X, y): ''' :param X_: shape = (n_samples, n_features) :param y: shape = (n_samples) :return: self ''' X_ = np.c_[np.ones(X.shape[0]), X] self.newton_method(X_, y) return self def predict(self, X) -> np.array: ''' :param X: shape = (n_samples, n_features] :return: shape = (n_samples] ''' X_ = np.c_[np.ones(X.shape[0]), X] return self.sign(self.p_func(X_))

測試代碼

import matplotlib.pyplot as pltimport sklearn.datasetsdef plot_decision_boundary(pred_func, X, y, title=None): '''分類器畫圖函數,可畫出樣本點和決策邊界 :param pred_func: predict函數 :param X: 訓練集X :param y: 訓練集Y :return: None ''' # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral) if title: plt.title(title) plt.show()

效果

python 牛頓法實現邏輯回歸(Logistic Regression)

更多機器學習代碼,請訪問 https://github.com/WiseDoge/plume

以上就是python 牛頓法實現邏輯回歸(Logistic Regression)的詳細內容,更多關于python 邏輯回歸的資料請關注好吧啦網其它相關文章!

標簽: Python 編程
相關文章:
主站蜘蛛池模板: 一级欧美一级日韩片 | 国产精品视频免费观看 | 99视频免费播放 | 久久免费电影 | 国产精品有限公司 | 日韩免费av一区二区 | 亚洲视频在线看 | 欧美日韩一区二区在线 | 在线欧美亚洲 | 一区二区三区四区在线 | 久久国产精品99久久久久久老狼 | 一区二区三区精品 | 成人综合在线观看 | 一区在线视频 | 成人精品国产一区二区4080 | 国产中文在线 | 国产91视频一区二区 | 国产精品精品视频一区二区三区 | 北条麻妃国产九九九精品小说 | 久久精品这里只有精品 | 欧美一级黄色片免费看 | 中文字幕一区二区三区四区不卡 | 黄色网址av | 亚洲人成人一区二区在线观看 | 天天天天天天天天操 | 久久综合久久综合久久综合 | 性一交一乱一透一a级 | 在线激情视频 | 欧美卡一卡二 | 求av网站| 天天夜夜操 | 亚洲精品在线国产 | 日本中文一区二区 | 色香蕉久久 | 亚洲综合精品 | 日本成年人免费网站 | 国产欧美精品一区二区三区 | 久久久久久成人精品 | 国产成人中文字幕 | 国产日韩一区 | 黄色毛片在线看 |