基于数十种统计类型特征,构建LR回归模型。代码逻辑包含:样本切分、特征预处理、模型训练、模型评估、特征重要性的可视化。
步骤一:导入所需库
import pandas as pdimport numpy as npimport matplotlib.pyplot as pltfrom sklearn.pipeline import Pipelinefrom sklearn.linear_model import LinearRegressionfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScaler, PolynomialFeaturesfrom sklearn.metrics import mean_squared_error, r2_score步骤二:读取数据
data = pd.read_csv('data.csv')步骤三:数据预处理
# 去除缺失值data.dropna(inplace=True)# 划分自变量和因变量X = data.iloc[:, :-1]y = data.iloc[:, -1]# 划分训练集和测试集X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)# 构建pipelinepipeline = Pipeline([('scaler', StandardScaler()),('poly', PolynomialFeatures(degree=2, include_bias=False)),('reg', LinearRegression())])# 训练模型pipeline.fit(X_train, y_train)# 预测结果y_pred = pipeline.predict(X_test)步骤四:模型评估
# 均方误差mse = mean_squared_error(y_test, y_pred)# R2值r2 = r2_score(y_test, y_pred)print('MSE: %.3f' % mse)print('R2 score: %.3f' % r2)步骤五:特征重要性的可视化
# 获取特征重要性importance = pipeline.named_steps['reg'].coef_# 将特征重要性与对应特征名对应feature_names = pipeline.named_steps['poly'].get_feature_names(X.columns)feature_importance = pd.DataFrame({'Feature': feature_names, 'Importance': importance})feature_importance = feature_importance.sort_values('Importance', ascending=False)# 绘制水平条形图plt.figure(figsize=(10, 8))plt.barh(feature_importance['Feature'], feature_importance['Importance'])plt.title('Feature importance')plt.xlabel('Importance')plt.ylabel('Feature')plt.show()