{ "cells": [ { "cell_type": "markdown", "id": "28eab8b4", "metadata": {}, "source": [ "# 更好的手动特征工程\n", "虽然featuretools提供了自动特征工程,但是dfs巨慢。\n", "\n", "我们应该构建新领域特征,这会大幅提升模型。通过比例、product、minus、count以及之前的agg方法。这个过程中,我们会做一些必要的异常值处理。\n", "\n", "最后,通过kfold-lgbm得到稳定的结果,并剔除之前记录的no_importance_features 加快模型训练" ] }, { "cell_type": "code", "execution_count": 19, "id": "ba72692a", "metadata": {}, "outputs": [], "source": [ "import pandas as pd \n", "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "import gc\n", "import lightgbm as lgb\n", "from lightgbm import LGBMClassifier\n", "import warnings\n", "from sklearn.metrics import roc_curve, roc_auc_score\n", "from sklearn.preprocessing import OrdinalEncoder\n", "from sklearn.model_selection import KFold,StratifiedKFold\n", "import re\n", "from contextlib import contextmanager\n", "import time\n", "import os\n", "from lightgbm import early_stopping, log_evaluation\n", "\n", "gc.enable()\n", "warnings.filterwarnings('ignore')\n", "plt.rcParams['font.sans-serif'] = ['SimHei'] \n", "plt.rcParams['axes.unicode_minus'] = False \n", "plt.rcParams['figure.figsize'] = (8,6) \n", "plt.rcParams['figure.dpi'] = 100" ] }, { "cell_type": "markdown", "id": "4e96a46a", "metadata": {}, "source": [ "## 公共函数" ] }, { "cell_type": "code", "execution_count": 2, "id": "96c5695a", "metadata": {}, "outputs": [], "source": [ "def submit(ids, pred, name, feature_count=None):\n", " \"\"\"\n", " ids: 测试集的 SK_ID_CURR\n", " pred: 模型预测概率\n", " name: 你的实验备注 (如 'lgb_v1', 'baseline')\n", " feature_count: 可选,记录模型使用了多少个特征\n", " \"\"\"\n", " # 1. 创建提交 DataFrame\n", " submit_df = pd.DataFrame({\n", " 'SK_ID_CURR': ids,\n", " 'TARGET': pred\n", " })\n", "\n", " # 2. 生成时间戳 (格式: 0213_1530)\n", " timestamp = time.strftime(\"%m%d_%H%M\")\n", " \n", " # 3. 构造文件名\n", " # 格式: 0213_1530_lgb_v1_f542.csv\n", " f_str = f\"_f{feature_count}\" if feature_count else \"\"\n", " filename = f\"{timestamp}_{name}{f_str}.csv\"\n", " \n", " # 4. 确保保存目录存在 (可选)\n", " if not os.path.exists('submissions'):\n", " os.makedirs('submissions')\n", " \n", " save_path = os.path.join('submissions', filename)\n", " \n", " # 5. 保存并打印提示\n", " submit_df.to_csv(save_path, index=False)\n", " \n", " return submit_df\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "3345da3b", "metadata": {}, "outputs": [], "source": [ "def onehot_encoder(df, nan_as_category=True):\n", " \"\"\" \n", " df: 输入的 DataFrame\n", " nan_as_category: 是否将 NaN 视为一个独立的类别进行编码\n", "\n", " return: 新的cols\n", " \"\"\"\n", " original_columns = df.columns.tolist()\n", " categorical_cols = df.select_dtypes(include=['object']).columns.tolist()\n", " df = pd.get_dummies(df, columns=categorical_cols, dummy_na=nan_as_category)\n", " new_columns = [col for col in df.columns if col not in original_columns]\n", " return df, new_columns" ] }, { "cell_type": "markdown", "id": "44930749", "metadata": {}, "source": [ "kfold将数据分为几份,每次用其中一份做验证,其他做训练。得到更稳定结果\n", "- stratified=True 代表分层抽样,保持每份数据中正负样本比例相同" ] }, { "cell_type": "code", "execution_count": 4, "id": "d549f6f5", "metadata": {}, "outputs": [], "source": [ "def submit(ids, pred, name, feature_count=None):\n", " \"\"\"\n", " ids: 测试集的 SK_ID_CURR\n", " pred: 模型预测概率\n", " name: 你的实验备注 (如 'lgb_v1', 'baseline')\n", " feature_count: 可选,记录模型使用了多少个特征\n", " \"\"\"\n", " # 1. 创建提交 DataFrame\n", " submit_df = pd.DataFrame({\n", " 'SK_ID_CURR': ids,\n", " 'TARGET': pred\n", " })\n", "\n", " # 2. 生成时间戳 (格式: 0213_1530)\n", " timestamp = time.strftime(\"%m%d_%H%M\")\n", " \n", " # 3. 构造文件名\n", " # 格式: 0213_1530_lgb_v1_f542.csv\n", " f_str = f\"_f{feature_count}\" if feature_count else \"\"\n", " filename = f\"{timestamp}_{name}{f_str}.csv\"\n", " \n", " # 4. 确保保存目录存在 (可选)\n", " if not os.path.exists('submissions'):\n", " os.makedirs('submissions')\n", " \n", " save_path = os.path.join('submissions', filename)\n", " \n", " # 5. 保存并打印提示\n", " submit_df.to_csv(save_path, index=False)\n", " \n", " return submit_df\n" ] }, { "cell_type": "code", "execution_count": null, "id": "3e99a6eb", "metadata": {}, "outputs": [], "source": [ "def kfold_lightgbm(df, num_folds, stratified=False, debug=True):\n", " def clean_names(df):\n", " # 替换所有非字母、数字的字符为下划线\n", " # 这里的正则 [^A-Za-z0-9_] 会匹配空格、斜杠、括号等所有特殊字符\n", " df.columns = [re.sub(r'[^A-Za-z0-9_]+', '_', col) for col in df.columns]\n", " # 顺便处理一下可能出现的重复下划线,比如 __\n", " df.columns = [re.sub(r'_+', '_', col).strip('_') for col in df.columns]\n", " return df\n", " df = clean_names(df)\n", " \n", " train_df = df[df['TARGET'].notnull()]\n", " test_df = df[df['TARGET'].isnull()]\n", " if stratified:\n", " fold = StratifiedKFold(n_splits=num_folds, shuffle=True)\n", " else:\n", " fold = KFold(n_splits=num_folds, shuffle=True)\n", "\n", " features = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n", " feature_importance_df = pd.DataFrame()\n", " out_of_fold_preds = np.zeros(train_df.shape[0])\n", " submit_preds = np.zeros(test_df.shape[0])\n", " for n_fold, (train_idx, valid_idx) in enumerate(fold.split(train_df[features], train_df['TARGET'])):\n", " dtrain = lgb.Dataset(data=train_df[features].iloc[train_idx],\n", " label = train_df['TARGET'].iloc[train_idx],\n", " free_raw_data=False\n", " )\n", " dvalid = lgb.Dataset(data=train_df[features].iloc[valid_idx],\n", " label = train_df['TARGET'].iloc[valid_idx],\n", " free_raw_data=False\n", " )\n", " params = {\n", " 'objective': 'binary',\n", " 'metric': 'auc',\n", " 'max_depth': 8,\n", " 'num_leaves': 40,\n", " 'min_child_samples': 30,\n", " 'learning_rate': 0.02,\n", " 'verbosity': -1, \n", "\n", " # 特征与数据采样(增加随机性,防止过拟合)\n", " 'feature_fraction': 0.8, # 每次迭代只用 80% 的特征\n", " 'bagging_fraction': 0.8, # 每次迭代只用 80% 的数据\n", " 'bagging_freq': 5, # 每 5 轮进行一次采样\n", " \n", " 'lambda_l1': 0.1,\n", " 'lambda_l2': 0.1 \n", " }\n", " clf = lgb.train(\n", " params = params,\n", " train_set=dtrain,\n", " valid_sets=[dtrain, dvalid],\n", " num_boost_round=10000, # 10000个树\n", " callbacks=[\n", " early_stopping(stopping_rounds=200), # 如果连续200迭代没有提升auc,就自动停止 \n", " log_evaluation(period=100) # 每100轮提醒你一下进度\n", " ]\n", " )\n", " out_of_fold_preds[valid_idx] = clf.predict(dvalid.data)\n", " submit_preds += clf.predict(test_df[features]) / fold.n_splits\n", "\n", " fold_importance_df = pd.DataFrame()\n", " fold_importance_df[\"feature\"] = features\n", " fold_importance_df[\"importance\"] = clf.feature_importance(importance_type='gain')\n", " fold_importance_df[\"fold\"] = n_fold + 1\n", " feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n", " print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(dvalid.label, out_of_fold_preds[valid_idx])))\n", " del clf, dtrain, dvalid\n", " gc.collect()\n", "\n", "\n", " if not debug:\n", " submit(test_df['SK_ID_CURR'], submit_preds, 'lgbm_folds', feature_count=len(features))\n", "\n", " print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], out_of_fold_preds))\n", " return feature_importance_df\n" ] }, { "cell_type": "code", "execution_count": 6, "id": "2a86c497", "metadata": {}, "outputs": [], "source": [ "def plot_importances(feature_importance_df):\n", " cols = feature_importance_df[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False)[:30].index\n", " best_features = feature_importance_df.loc[feature_importance_df.feature.isin(cols)]\n", " plt.figure(figsize=(8, 10))\n", " sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False))\n", " plt.title('LightGBM Features (avg over folds)')\n", " plt.tight_layout()\n" ] }, { "cell_type": "markdown", "id": "814cc953", "metadata": {}, "source": [ "## application_train/test" ] }, { "cell_type": "code", "execution_count": null, "id": "662d1d66", "metadata": {}, "outputs": [], "source": [ "def application_train_test(nrows = None, nan_as_category = True):\n", " \"\"\" \n", " \"\"\"\n", " app_train = pd.read_csv('data/application_train.csv', nrows=nrows)\n", " app_test = pd.read_csv('data/application_test.csv', nrows=nrows)\n", " print(f'train {len(app_train)}, test {len(app_test)}')\n", " app = pd.concat([app_train, app_test])\n", " app = app.reset_index()\n", " print(f'app {len(app)}')\n", "\n", " # \n", " app['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)\n", "\n", " app['NEW_CREDIT_TO_ANNUITY_RATIO'] = app['AMT_CREDIT'] / (app['AMT_ANNUITY'] + 1)\n", " app['NEW_CREDIT_TO_GOODS_RATIO'] = app['AMT_CREDIT'] / (app['AMT_GOODS_PRICE'] + 1)\n", " app['NEW_EMPLOYED_TO_BIRTH_RATIO'] = app['DAYS_EMPLOYED'] / (app['DAYS_BIRTH'] + 1)\n", " app['NEW_ANNITY_TO_INCOME_RATIO'] = app['AMT_ANNUITY'] / (app['AMT_INCOME_TOTAL'] + 1)\n", " app['NEW_CREDIT_TO_INCOME_RATIO'] = app['AMT_CREDIT'] / (app['AMT_INCOME_TOTAL'] + 1)\n", " app['NEW_EXT_SOURCE_PROD'] = app['EXT_SOURCE_1'] * app['EXT_SOURCE_2'] * app['EXT_SOURCE_3']\n", " app['NEW_EXT_SOURCE_MEAN'] = app[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)\n", " app['NEW_EXT_SOURCE_STD'] = app[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)\n", "\n", " for bin_feature in ['FLAG_OWN_CAR', 'FLAG_OWN_REALTY', 'CODE_GENDER']:\n", " app[bin_feature], uniques = pd.factorize(app[bin_feature])\n", " app, new_cat_features = onehot_encoder(app)\n", " \n", " del app_train, app_test\n", " gc.collect()\n", " return app\n", " " ] }, { "cell_type": "markdown", "id": "90591715", "metadata": {}, "source": [ "## bureau_and_balance" ] }, { "cell_type": "code", "execution_count": 8, "id": "975e6650", "metadata": {}, "outputs": [], "source": [ "def bureau_and_balance(nrows = None, nan_as_category = True):\n", " bureau = pd.read_csv('data/bureau.csv',nrows=nrows)\n", " balance = pd.read_csv('data/bureau_balance.csv', nrows=nrows)\n", " print(f'bureau {bureau.shape}, balance {balance.shape}')\n", " balance, balance_cat_cols = onehot_encoder(balance, nan_as_category)\n", " bureau, bureau_cat_cols = onehot_encoder(bureau, nan_as_category)\n", "\n", " # balance\n", " balance_aggregations = {\n", " 'MONTHS_BALANCE': ['min', 'max', 'size']\n", " }\n", " for col in balance_cat_cols:\n", " balance_aggregations[col] = ['mean']\n", " balance_agg = balance.groupby('SK_ID_BUREAU').agg(balance_aggregations)\n", " balance_agg.columns = pd.Index([col[0] + '_' + col[1].upper() for col in balance_agg.columns.tolist()])\n", " bureau = bureau.join(balance_agg, how='left', on='SK_ID_BUREAU')\n", " bureau = bureau.drop(columns=['SK_ID_BUREAU'])\n", " del balance, balance_agg\n", " gc.collect()\n", "\n", " # bureau\n", " num_aggregations = {\n", " 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n", " 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n", " 'DAYS_CREDIT_UPDATE': ['mean'],\n", " 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n", " 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n", " 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n", " 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n", " 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n", " 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n", " 'AMT_ANNUITY': ['max', 'mean'],\n", " 'CNT_CREDIT_PROLONG': ['sum'],\n", " 'MONTHS_BALANCE_MIN': ['min'],\n", " 'MONTHS_BALANCE_MAX': ['max'],\n", " 'MONTHS_BALANCE_SIZE': ['mean', 'sum']\n", " }\n", " category_aggregations = {}\n", " for col in bureau_cat_cols:\n", " category_aggregations[col] = ['mean']\n", " for col in balance_cat_cols:\n", " category_aggregations[col + '_MEAN'] = ['mean']\n", " bureau_agg = bureau.groupby(by='SK_ID_CURR').agg({**num_aggregations, **category_aggregations})\n", " bureau_agg.columns = pd.Index(\n", " ['BUREAU_' + col[0] + '_' + col[1].upper() for col in bureau_agg.columns.tolist()]\n", " )\n", "\n", " # bureau - active where\n", " active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]\n", " active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)\n", " cols = active_agg.columns\n", " active_agg.columns = pd.Index(\n", " ['ACTIVE_' + col[0] +'_' + col[1].upper() for col in active_agg.columns.tolist()]\n", " )\n", " bureau_agg = bureau_agg.join(active_agg, on='SK_ID_CURR', how='left')\n", " del active_agg, active\n", " gc.collect()\n", "\n", " # bureau - closed where\n", " closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]\n", " closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)\n", " closed_agg.columns = pd.Index(['CLOSED_' + e[0] + \"_\" + e[1].upper() for e in closed_agg.columns.tolist()])\n", " bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')\n", " del closed, closed_agg\n", " gc.collect()\n", "\n", " for col in cols:\n", " bureau_agg['NEW_RATIO_BUREAU_' + col[0] + \"_\" + col[1].upper()] = bureau_agg['ACTIVE_' + col[0] + \"_\" + col[1].upper()] / bureau_agg['CLOSED_' + col[0] + \"_\" + col[1].upper()]\n", " \n", " del bureau\n", " gc.collect()\n", " \n", " return bureau_agg" ] }, { "cell_type": "markdown", "id": "db10f790", "metadata": {}, "source": [ "*active*和*closed*是同样的逻辑,但是不同的业务含义。*active*代表当前还款压力,*closed*代表往日信用记录。\n", "他们是两类指标,混在一起的话,意义不明确\n", "\n", "这个类似ft工具的where" ] }, { "cell_type": "markdown", "id": "75d80713", "metadata": {}, "source": [ "## previous_applications" ] }, { "cell_type": "code", "execution_count": 9, "id": "3ae7a0bd", "metadata": {}, "outputs": [], "source": [ "def previous_applications(nrows = None, nan_as_category = True):\n", " prev = pd.read_csv('data/previous_application.csv',nrows=nrows)\n", " print(f'prev {prev.shape}')\n", " prev, cat_cols = onehot_encoder(prev, nan_as_category)\n", "\n", " prev['DAYS_FIRST_DRAWING'] = prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan)\n", " prev['DAYS_FIRST_DUE'] = prev['DAYS_FIRST_DUE'].replace(365243, np.nan)\n", " prev['DAYS_LAST_DUE_1ST_VERSION'] = prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan)\n", " prev['DAYS_LAST_DUE'] = prev['DAYS_LAST_DUE'].replace(365243, np.nan)\n", " prev['DAYS_TERMINATION'] = prev['DAYS_TERMINATION'].replace(365243, np.nan)\n", " \n", " prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']\n", "\n", " num_aggregations = {\n", " 'AMT_ANNUITY': ['min', 'max', 'mean'],\n", " 'AMT_APPLICATION': ['min', 'max', 'mean'],\n", " 'AMT_CREDIT': ['min', 'max', 'mean'],\n", " 'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],\n", " 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],\n", " 'AMT_GOODS_PRICE': ['min', 'max', 'mean'],\n", " 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n", " 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],\n", " 'DAYS_DECISION': ['min', 'max', 'mean'],\n", " 'CNT_PAYMENT': ['mean', 'sum'],\n", " }\n", "\n", " cat_aggregations = {}\n", " for cat in cat_cols:\n", " cat_aggregations[cat] = ['mean']\n", "\n", " prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n", " prev_agg.columns = pd.Index(['PREV_' + e[0] + \"_\" + e[1].upper() for e in prev_agg.columns.tolist()])\n", "\n", " # Previous Applications: Approved Applications - only numerical features\n", " approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]\n", " approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)\n", " cols = approved_agg.columns.tolist()\n", " approved_agg.columns = pd.Index(['APPROVED_' + e[0] + \"_\" + e[1].upper() for e in approved_agg.columns.tolist()])\n", " prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')\n", " \n", " # Previous Applications: Refused Applications - only numerical features\n", " refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]\n", " refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)\n", " refused_agg.columns = pd.Index(['REFUSED_' + e[0] + \"_\" + e[1].upper() for e in refused_agg.columns.tolist()])\n", " prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')\n", " del refused, refused_agg, approved, approved_agg, prev\n", "\n", " for e in cols:\n", " prev_agg['NEW_RATIO_PREV_' + e[0] + \"_\" + e[1].upper()] = prev_agg['APPROVED_' + e[0] + \"_\" + e[1].upper()] / prev_agg['REFUSED_' + e[0] + \"_\" + e[1].upper()]\n", " \n", " gc.collect()\n", " return prev_agg" ] }, { "cell_type": "markdown", "id": "8b80f537", "metadata": {}, "source": [ "同样的,*approved*和*refused*代表不同含义,如*APPROVED_AMT_CREDIT*是银行对客户信心额度,*REFUSED_AMT_CREDIT*是客户想借没借到的额度" ] }, { "cell_type": "markdown", "id": "d7953903", "metadata": {}, "source": [ "## POS_CASH_balance" ] }, { "cell_type": "code", "execution_count": 10, "id": "7a603301", "metadata": {}, "outputs": [], "source": [ "def pos_cash(nrows = None , nan_as_category = True):\n", " pos = pd.read_csv('data/POS_CASH_balance.csv', nrows = nrows)\n", " pos, cat_cols = onehot_encoder(pos, nan_as_category)\n", " aggregations = {\n", " 'MONTHS_BALANCE': ['max', 'mean', 'size'],\n", " 'SK_DPD': ['max', 'mean'],\n", " 'SK_DPD_DEF': ['max', 'mean']\n", " }\n", " for cat in cat_cols:\n", " aggregations[cat] = ['mean']\n", " pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)\n", " pos_agg.columns = pd.Index(['POS_' + e[0] + \"_\" + e[1].upper() for e in pos_agg.columns.tolist()])\n", "\n", " pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()\n", " \n", " del pos\n", " gc.collect()\n", "\n", " return pos_agg" ] }, { "cell_type": "markdown", "id": "98d4cf56", "metadata": {}, "source": [ "## installments_payments" ] }, { "cell_type": "code", "execution_count": 11, "id": "a0ebfe5b", "metadata": {}, "outputs": [], "source": [ "def installments_payments(nrows = None , nan_as_category = True):\n", " ins = pd.read_csv('data/installments_payments.csv', nrows = nrows)\n", " ins, cat_cols = onehot_encoder(ins, nan_as_category= True)\n", "\n", " ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']\n", " ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']\n", "\n", " # DPD逾期天数, DBD提前还款天数\n", " ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']\n", " ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']\n", " ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)\n", " ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)\n", "\n", " aggregations = {\n", " 'NUM_INSTALMENT_VERSION': ['nunique'],\n", " 'DPD': ['max', 'mean', 'sum'],\n", " 'DBD': ['max', 'mean', 'sum'],\n", " 'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],\n", " 'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],\n", " 'AMT_INSTALMENT': ['max', 'mean', 'sum'],\n", " 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],\n", " 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']\n", " }\n", " for cat in cat_cols:\n", " aggregations[cat] = ['mean']\n", " ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)\n", " ins_agg.columns = pd.Index(['INSTAL_' + e[0] + \"_\" + e[1].upper() for e in ins_agg.columns.tolist()])\n", "\n", " ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()\n", " \n", " del ins\n", " gc.collect()\n", " return ins_agg\n" ] }, { "cell_type": "markdown", "id": "0dabad0d", "metadata": {}, "source": [ "## credit_card_balance" ] }, { "cell_type": "code", "execution_count": 12, "id": "c702b7b2", "metadata": {}, "outputs": [], "source": [ "def credit_card_balance(nrows = None, nan_as_category = True):\n", " cc = pd.read_csv('data/credit_card_balance.csv', nrows = nrows)\n", " cc, cat_cols = onehot_encoder(cc, nan_as_category= True)\n", "\n", " cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)\n", "\n", " numeric_cols = [c for c in cc.columns if cc[c].dtype != 'object']\n", " num_aggregations = {col: [ 'mean', 'sum', 'var'] for col in numeric_cols}\n", "\n", " category_aggregations = {}\n", " for col in cat_cols:\n", " category_aggregations[col] = ['mean']\n", " \n", " cc_agg = cc.groupby('SK_ID_CURR').agg({**num_aggregations, **category_aggregations})\n", " cc_agg.columns = pd.Index(['CC_' + e[0] + \"_\" + e[1].upper() for e in cc_agg.columns.tolist()])\n", "\n", " cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()\n", " \n", " del cc\n", " gc.collect()\n", " return cc_agg" ] }, { "cell_type": "markdown", "id": "153017d9", "metadata": {}, "source": [ "## main" ] }, { "cell_type": "code", "execution_count": 13, "id": "c2a3dc7d", "metadata": {}, "outputs": [], "source": [ "@contextmanager\n", "def timer(title):\n", " t0 = time.time()\n", " yield # 执行with代码\n", " print(f'{title} done in {time.time() - t0:.0f}s') " ] }, { "cell_type": "code", "execution_count": 14, "id": "2b4b3362", "metadata": {}, "outputs": [], "source": [ "def get_last_no_imp_features():\n", " \"\"\" 获取那些不重要的特征,以供剔除\n", " \"\"\"\n", " try:\n", " features_importance_df = pd.read_feather('features_importance_df.feather')\n", " low_importance_features = features_importance_df.groupby('feature')['importance'].mean()\n", " low_importance_features = low_importance_features[low_importance_features == 0].index.tolist()\n", " return low_importance_features\n", " except FileNotFoundError:\n", " return []" ] }, { "cell_type": "code", "execution_count": 15, "id": "3bc0d710", "metadata": {}, "outputs": [], "source": [ "def main(debug = False):\n", " nrows = 10000 if debug else None\n", " df = application_train_test(nrows)\n", " with timer('Process bureau and bureau balance'):\n", " bureau = bureau_and_balance(nrows)\n", " print(f'Bureau shape :{bureau.shape}')\n", " df = df.join(bureau, on='SK_ID_CURR', how='left')\n", " del bureau\n", " gc.collect()\n", " with timer(\"Process previous_applications\"):\n", " prev = previous_applications(nrows)\n", " print(\"Previous applications df shape:\", prev.shape)\n", " df = df.join(prev, how='left', on='SK_ID_CURR')\n", " del prev\n", " gc.collect()\n", " with timer(\"Process POS-CASH balance\"):\n", " pos = pos_cash(nrows)\n", " print(\"Pos-cash balance df shape:\", pos.shape)\n", " df = df.join(pos, how='left', on='SK_ID_CURR')\n", " del pos\n", " gc.collect()\n", " with timer(\"Process installments payments\"):\n", " ins = installments_payments(nrows)\n", " print(\"Installments payments df shape:\", ins.shape)\n", " df = df.join(ins, how='left', on='SK_ID_CURR')\n", " del ins\n", " gc.collect()\n", " with timer(\"Process credit card balance\"):\n", " cc = credit_card_balance(nrows)\n", " print(\"Credit card balance df shape:\", cc.shape)\n", " df = df.join(cc, how='left', on='SK_ID_CURR')\n", " del cc\n", " gc.collect()\n", " with timer('Run lgbm with kfold'):\n", " no_imp_features = get_last_no_imp_features()\n", " features_to_drop = list(set(no_imp_features) & set(df.columns))\n", " df = df.drop(columns=features_to_drop)\n", " print(f'final DF :{df.shape}')\n", " features_importance_df = kfold_lightgbm(df, num_folds=5, debug=debug)\n", " features_importance_df.to_feather('features_importance_df.feather')" ] }, { "cell_type": "code", "execution_count": 20, "id": "01111db3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "train 307511, test 48744\n", "app 356255\n", "bureau (1716428, 17), balance (27299925, 3)\n", "Bureau shape :(305811, 143)\n", "Process bureau and bureau balance done in 17s\n", "prev (1670214, 37)\n", "Previous applications df shape: (338857, 279)\n", "Process previous_applications done in 21s\n", "Pos-cash balance df shape: (337252, 18)\n", "Process POS-CASH balance done in 12s\n", "Installments payments df shape: (339587, 26)\n", "Process installments payments done in 23s\n", "Credit card balance df shape: (103558, 72)\n", "Process credit card balance done in 14s\n", "final DF :(356255, 720)\n", "Training until validation scores don't improve for 200 rounds\n", "[100]\ttraining's auc: 0.78238\tvalid_1's auc: 0.765774\n", "[200]\ttraining's auc: 0.8061\tvalid_1's auc: 0.778186\n", "[300]\ttraining's auc: 0.822628\tvalid_1's auc: 0.784157\n", "[400]\ttraining's auc: 0.836671\tvalid_1's auc: 0.787432\n", "[500]\ttraining's auc: 0.848619\tvalid_1's auc: 0.789125\n", "[600]\ttraining's auc: 0.859644\tvalid_1's auc: 0.790174\n", "[700]\ttraining's auc: 0.869208\tvalid_1's auc: 0.79104\n", "[800]\ttraining's auc: 0.878829\tvalid_1's auc: 0.791669\n", "[900]\ttraining's auc: 0.88704\tvalid_1's auc: 0.792334\n", "[1000]\ttraining's auc: 0.894494\tvalid_1's auc: 0.792548\n", "[1100]\ttraining's auc: 0.901095\tvalid_1's auc: 0.79254\n", "Early stopping, best iteration is:\n", "[993]\ttraining's auc: 0.893959\tvalid_1's auc: 0.792591\n", "Fold 1 AUC : 0.792591\n", "Training until validation scores don't improve for 200 rounds\n", "[100]\ttraining's auc: 0.782763\tvalid_1's auc: 0.759153\n", "[200]\ttraining's auc: 0.807392\tvalid_1's auc: 0.772789\n", "[300]\ttraining's auc: 0.823934\tvalid_1's auc: 0.779224\n", "[400]\ttraining's auc: 0.837874\tvalid_1's auc: 0.783119\n", "[500]\ttraining's auc: 0.849965\tvalid_1's auc: 0.785261\n", "[600]\ttraining's auc: 0.860856\tvalid_1's auc: 0.786782\n", "[700]\ttraining's auc: 0.870219\tvalid_1's auc: 0.78738\n", "[800]\ttraining's auc: 0.879139\tvalid_1's auc: 0.787911\n", "[900]\ttraining's auc: 0.887327\tvalid_1's auc: 0.788308\n", "[1000]\ttraining's auc: 0.895203\tvalid_1's auc: 0.788714\n", "[1100]\ttraining's auc: 0.902198\tvalid_1's auc: 0.788899\n", "[1200]\ttraining's auc: 0.908633\tvalid_1's auc: 0.789061\n", "[1300]\ttraining's auc: 0.914877\tvalid_1's auc: 0.789003\n", "[1400]\ttraining's auc: 0.920659\tvalid_1's auc: 0.789187\n", "[1500]\ttraining's auc: 0.925779\tvalid_1's auc: 0.789444\n", "[1600]\ttraining's auc: 0.930208\tvalid_1's auc: 0.789508\n", "[1700]\ttraining's auc: 0.934451\tvalid_1's auc: 0.789575\n", "[1800]\ttraining's auc: 0.93854\tvalid_1's auc: 0.789671\n", "[1900]\ttraining's auc: 0.942259\tvalid_1's auc: 0.789874\n", "[2000]\ttraining's auc: 0.945858\tvalid_1's auc: 0.789743\n", "Early stopping, best iteration is:\n", "[1885]\ttraining's auc: 0.941636\tvalid_1's auc: 0.789935\n", "Fold 2 AUC : 0.789935\n", "Training until validation scores don't improve for 200 rounds\n", "[100]\ttraining's auc: 0.782443\tvalid_1's auc: 0.760835\n", "[200]\ttraining's auc: 0.806232\tvalid_1's auc: 0.774092\n", "[300]\ttraining's auc: 0.822842\tvalid_1's auc: 0.780568\n", "[400]\ttraining's auc: 0.836422\tvalid_1's auc: 0.783767\n", "[500]\ttraining's auc: 0.848625\tvalid_1's auc: 0.786248\n", "[600]\ttraining's auc: 0.859569\tvalid_1's auc: 0.787689\n", "[700]\ttraining's auc: 0.869923\tvalid_1's auc: 0.789037\n", "[800]\ttraining's auc: 0.879094\tvalid_1's auc: 0.789732\n", "[900]\ttraining's auc: 0.887098\tvalid_1's auc: 0.790292\n", "[1000]\ttraining's auc: 0.894593\tvalid_1's auc: 0.790652\n", "[1100]\ttraining's auc: 0.901143\tvalid_1's auc: 0.79099\n", "[1200]\ttraining's auc: 0.907911\tvalid_1's auc: 0.791169\n", "[1300]\ttraining's auc: 0.913411\tvalid_1's auc: 0.791089\n", "[1400]\ttraining's auc: 0.918854\tvalid_1's auc: 0.791133\n", "Early stopping, best iteration is:\n", "[1201]\ttraining's auc: 0.907944\tvalid_1's auc: 0.791183\n", "Fold 3 AUC : 0.791183\n", "Training until validation scores don't improve for 200 rounds\n", "[100]\ttraining's auc: 0.783452\tvalid_1's auc: 0.759775\n", "[200]\ttraining's auc: 0.80759\tvalid_1's auc: 0.772697\n", "[300]\ttraining's auc: 0.823811\tvalid_1's auc: 0.778664\n", "[400]\ttraining's auc: 0.83728\tvalid_1's auc: 0.781591\n", "[500]\ttraining's auc: 0.849105\tvalid_1's auc: 0.783588\n", "[600]\ttraining's auc: 0.860079\tvalid_1's auc: 0.7851\n", "[700]\ttraining's auc: 0.869841\tvalid_1's auc: 0.786359\n", "[800]\ttraining's auc: 0.879002\tvalid_1's auc: 0.787141\n", "[900]\ttraining's auc: 0.887656\tvalid_1's auc: 0.787323\n", "[1000]\ttraining's auc: 0.895257\tvalid_1's auc: 0.787467\n", "[1100]\ttraining's auc: 0.902494\tvalid_1's auc: 0.787827\n", "[1200]\ttraining's auc: 0.908634\tvalid_1's auc: 0.788189\n", "[1300]\ttraining's auc: 0.914872\tvalid_1's auc: 0.788379\n", "[1400]\ttraining's auc: 0.920066\tvalid_1's auc: 0.788461\n", "[1500]\ttraining's auc: 0.92536\tvalid_1's auc: 0.788412\n", "[1600]\ttraining's auc: 0.930068\tvalid_1's auc: 0.788697\n", "[1700]\ttraining's auc: 0.934425\tvalid_1's auc: 0.78877\n", "[1800]\ttraining's auc: 0.938771\tvalid_1's auc: 0.788662\n", "[1900]\ttraining's auc: 0.94271\tvalid_1's auc: 0.788981\n", "[2000]\ttraining's auc: 0.946599\tvalid_1's auc: 0.788999\n", "[2100]\ttraining's auc: 0.949967\tvalid_1's auc: 0.788967\n", "Early stopping, best iteration is:\n", "[1927]\ttraining's auc: 0.944003\tvalid_1's auc: 0.789081\n", "Fold 4 AUC : 0.789081\n", "Training until validation scores don't improve for 200 rounds\n", "[100]\ttraining's auc: 0.782267\tvalid_1's auc: 0.760801\n", "[200]\ttraining's auc: 0.805775\tvalid_1's auc: 0.774578\n", "[300]\ttraining's auc: 0.822456\tvalid_1's auc: 0.781059\n", "[400]\ttraining's auc: 0.837006\tvalid_1's auc: 0.784653\n", "[500]\ttraining's auc: 0.848933\tvalid_1's auc: 0.786817\n", "[600]\ttraining's auc: 0.860147\tvalid_1's auc: 0.788454\n", "[700]\ttraining's auc: 0.869955\tvalid_1's auc: 0.789242\n", "[800]\ttraining's auc: 0.879022\tvalid_1's auc: 0.789719\n", "[900]\ttraining's auc: 0.887043\tvalid_1's auc: 0.790111\n", "[1000]\ttraining's auc: 0.894709\tvalid_1's auc: 0.790376\n", "[1100]\ttraining's auc: 0.901814\tvalid_1's auc: 0.790596\n", "[1200]\ttraining's auc: 0.90801\tvalid_1's auc: 0.790917\n", "[1300]\ttraining's auc: 0.914572\tvalid_1's auc: 0.791167\n", "[1400]\ttraining's auc: 0.920002\tvalid_1's auc: 0.791147\n", "[1500]\ttraining's auc: 0.92504\tvalid_1's auc: 0.791326\n", "[1600]\ttraining's auc: 0.929904\tvalid_1's auc: 0.79151\n", "[1700]\ttraining's auc: 0.934097\tvalid_1's auc: 0.791493\n", "[1800]\ttraining's auc: 0.938231\tvalid_1's auc: 0.791423\n", "Early stopping, best iteration is:\n", "[1649]\ttraining's auc: 0.931961\tvalid_1's auc: 0.791637\n", "Fold 5 AUC : 0.791637\n", "Full AUC score 0.790754\n", "Run lgbm with kfold done in 905s\n", "full run done in 996s\n" ] } ], "source": [ "with timer('full run'):\n", " main()" ] }, { "cell_type": "markdown", "id": "4ffce3ef", "metadata": {}, "source": [ "78.9分" ] }, { "cell_type": "code", "execution_count": null, "id": "13bc0df1", "metadata": {}, "outputs": [], "source": [ "plot_importances(features_importance_df)" ] }, { "cell_type": "code", "execution_count": null, "id": "10a058f8", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "data-analysis", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.25" } }, "nbformat": 4, "nbformat_minor": 5 }