梯度下降法 python实现
import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScalerclass LinearRegression: def __init__(self): self.coeff_ = None self.interception_ = None self._theta = None def fit_gd(self, x_train, y_train,eta=0.01,n_iters=1e4): assert x_train.shape[0] == y_train.shape[0],'特征维度应当相等' def J(theta, X_b, y): try: return np.sum(y - X_b.dot(theta)) / len(y) except: return float('inf') def dJ(theta, X_b, y): return 2 * (X_b.dot(theta) - y).dot(X_b) / len(y) def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4