python实现K近邻回归,采用等权重和不等权重的方法
如下所示:
from sklearn.datasets import load_boston boston = load_boston() from sklearn.cross_validation import train_test_split import numpy as np; X = boston.data y = boston.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 33, test_size = 0.25) print 'The max target value is: ', np.max(boston.target) print 'The min target value is: ', np.min(boston.target) print 'The average terget value is: ', np.mean(boston.target) from sklearn.preprocessing import StandardScaler ss_X = StandardScaler() ss_y = StandardScaler() X_train = ss_X.fit_transform(X_train) X_test = ss_X.transform(X_test) y_train = ss_y.fit_transform(y_train) y_test = ss_y.transform(y_test) from sklearn.neighbors import KNeighborsRegressor uni_knr = KNeighborsRegressor(weights = 'uniform') uni_knr.fit(X_train, y_train) uni_knr_y_predict = uni_knr.predict(X_test) dis_knr = KNeighborsRegressor(weights = 'distance') dis_knr.fit(X_train, y_train) dis_knr_y_predict = dis_knr.predict(X_test) from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error print 'R-squared value of uniform weights KNeighorRegressor is: ', uni_knr.score(X_test, y_test) print 'The mean squared error of uniform weights KNeighorRegressor is: ', mean_squared_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(uni_knr_y_predict)) print 'The mean absolute error of uniform weights KNeighorRegressor is: ', mean_absolute_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(uni_knr_y_predict)) print 'R-squared of distance weights KNeighorRegressor is: ', dis_knr.score(X_test, y_test) print 'the value of mean squared error of distance weights KNeighorRegressor is: ', mean_squared_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(dis_knr_y_predict)) print 'the value of mean ssbsolute error of distance weights KNeighorRegressor is: ', mean_absolute_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(dis_knr_y_predict))
以上这篇python实现K近邻回归,采用等权重和不等权重的方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持我们。
赞 (0)