本文用到的包:

%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

from sklearn.base import ClassifierMixin
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.datasets import make_blobs, load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix

sns.set()
plt.rc('font', family='SimHei')
plt.rc('axes', unicode_minus=False)

随机森林(Random Forest)

随机森林是一种建立在决策树(Decision Tree)的基础之上的集成学习器
决策树在sklearn中由DecisionTreeClassifier/DecisionTreeRegressor类实现,随机森林由RandomForestClassifier/RandomTreeRegressor类实现;

决策树

决策树在每一个节点上都会依据某一标准将结果的可能性减半,能够很快的缩小范围;
但是随着决策树的深度加深,决策树很容易出现过拟合;
如下面的示例所示,同一组数据分成两个部分分别进行拟合得到的决策树会有很大的差别;

def decision_tree_visualization(x_train, y_train, model: ClassifierMixin, ax: plt.Axes=None) -> None:
    if not ax:
        _, ax = plt.figure(figsize=(10, 10)), plt.axes()

    ax.scatter(
        x=x_train[:, 0],
        y=x_train[:, 1],
        c=y_train,
        edgecolors='k',
        cmap=plt.cm.get_cmap('rainbow', lut=3)
    )

    model.fit(x_train, y_train)
    xx, yy = np.meshgrid(np.linspace(*ax.get_xlim(), 200), np.linspace(*ax.get_ylim(), 200))
    res = model.predict(np.vstack((xx.flatten(), yy.flatten())).T).reshape(xx.shape)
    ax.contourf(
        xx, yy, res,
        levels=np.arange(len(np.unique(res)) + 1) - 0.5,
        cmap='rainbow',
        alpha=0.3
    )


x, y = make_blobs(
    n_samples=200,
    n_features=2,
    centers=3,
    random_state=233,
    cluster_std=4,
)
x1, x2, y1, y2 = train_test_split(x, y, random_state=233, test_size=0.5)
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
decision_tree_visualization(x1, y1, DecisionTreeClassifier(), axs[0])
decision_tree_visualization(x2, y2, DecisionTreeClassifier(), axs[1])
axs[0].set_title('决策树 0 分类结果')
axs[1].set_title('决策树 1 分类结果')

结果:

python随机森林选股 python 随机森林代码_python随机森林选股

袋装算法

袋装算法指的是将多个过拟合的评估器组合起来以降低整体过拟合的程度的算法,而对应决策树的袋装算法就是随机森林;
使用随机森林对上面的数据再次进行拟合,得到的结果如下:

_, ax_random_tree = plt.figure(figsize=(10, 10)), plt.axes()
decision_tree_visualization(x, y, RandomForestClassifier(n_estimators=100, max_samples=0.75), ax_random_tree)
ax_random_tree.set_title('随机森林分类结果')

python随机森林选股 python 随机森林代码_python_02

随机森林回归

随机森林也可以用于回归任务,无参数的随机森林回归十分适合处理多周期的数据,得到的结果一般是锯齿形的线条:
下面是用RandomForestRegressor拟合两个正弦波的叠加:

x_fit = np.random.rand(100)[:, np.newaxis] * 2 * np.pi - np.pi
y_fit = np.sin(x_fit) + np.sin(2 * x_fit) + 0.5 * (np.random.rand(100)[:, np.newaxis] - 0.5)
x_test = np.linspace(-np.pi, np.pi, 200)[:, np.newaxis]
y_true = np.sin(x_test) + np.sin(2 * x_test)

model = RandomForestRegressor(n_estimators=100)
model.fit(x_fit, y_fit)
y_test = model.predict(x_test)

_, axs = plt.subplots(1, 2, figsize=(20, 10))
axs[0].plot(x_fit, y_fit, linestyle='', marker='o', label='训练数据')
axs[0].plot(x_test, y_true, label='真实曲线')
axs[0].set_title('训练数据与真实曲线')
axs[0].legend()

axs[1].plot(x_fit, y_fit, linestyle='', marker='o', label='训练数据')
axs[1].plot(x_test, y_test, label='拟合曲线')
axs[1].set_title('训练数据与拟合曲(zhe)线')
axs[1].legend()

python随机森林选股 python 随机森林代码_python随机森林选股_03

案例:使用随机森林识别手写数字

这里使用sklearn提供的python随机森林选股 python 随机森林代码_随机森林_04的手写数字,加载数据的函数是load_digits;
使用分类报告(classification_report)和混淆矩阵(confusion_matrix)查看结果,一个简单的随机森林分类器最后的效果似乎很不错(97%的准确率):

digits = load_digits()
model = RandomForestClassifier()
x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.3)

model.fit(x_train, y_train)
res = model.predict(x_test)

print('分类结果报告:')
print(classification_report(y_test, res))

print('混淆矩阵:')
mat = confusion_matrix(y_test, res)
plt.figure(figsize=(10, 10))
sns.heatmap(
    data=mat.T,
    square=True,
    annot=True,
    fmt='d',
    cbar=False,
    cmap='Greens',
)
plt.xlabel('真实值')
plt.ylabel('预测值')

python随机森林选股 python 随机森林代码_机器学习_05

完整代码(Jupyter Notebook)

#%%

%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

from sklearn.base import ClassifierMixin
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.datasets import make_blobs, load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix

sns.set()
plt.rc('font', family='SimHei')
plt.rc('axes', unicode_minus=False)

#%% md

# 随机森林

随机森林是建立在决策树的基础上的**集成学习器**;

## 决策树

决策树在每一个节点上都会依据某一标准将结果的可能性减半,能够很快的缩小范围;
但是随着决策树的深度加深,决策树很容易出现过拟合;
如下图所示,同一组数据分成两个部分进行拟合得到的决策树会有很大的差别;

#%%

def decision_tree_visualization(x_train, y_train, model: ClassifierMixin, ax: plt.Axes=None) -> None:
    if not ax:
        _, ax = plt.figure(figsize=(10, 10)), plt.axes()

    ax.scatter(
        x=x_train[:, 0],
        y=x_train[:, 1],
        c=y_train,
        edgecolors='k',
        cmap=plt.cm.get_cmap('rainbow', lut=3)
    )

    model.fit(x_train, y_train)
    xx, yy = np.meshgrid(np.linspace(*ax.get_xlim(), 200), np.linspace(*ax.get_ylim(), 200))
    res = model.predict(np.vstack((xx.flatten(), yy.flatten())).T).reshape(xx.shape)
    ax.contourf(
        xx, yy, res,
        levels=np.arange(len(np.unique(res)) + 1) - 0.5,
        cmap='rainbow',
        alpha=0.3
    )


x, y = make_blobs(
    n_samples=200,
    n_features=2,
    centers=3,
    random_state=233,
    cluster_std=4,
)
x1, x2, y1, y2 = train_test_split(x, y, random_state=233, test_size=0.5)
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
decision_tree_visualization(x1, y1, DecisionTreeClassifier(), axs[0])
decision_tree_visualization(x2, y2, DecisionTreeClassifier(), axs[1])
axs[0].set_title('决策树 0 分类结果')
axs[1].set_title('决策树 1 分类结果')

#%% md

## 袋装算法

组合多个过拟合的评估器,来降低整体的过拟合程度,称为**袋装算法**,对决策树的集成得到的结果就是随机森林;

#%%

_, ax_random_tree = plt.figure(figsize=(10, 10)), plt.axes()
decision_tree_visualization(x, y, RandomForestClassifier(n_estimators=100, max_samples=0.75), ax_random_tree)
ax_random_tree.set_title('随机森林分类结果')

#%% md

## 随机森林回归

随机森林算法也可以用于回归任务,无参数的随机森林模型十分适合处理多周期的数据,拟合获得的结果一般是锯齿形的曲线;

#%%

x_fit = np.random.rand(100)[:, np.newaxis] * 2 * np.pi - np.pi
y_fit = np.sin(x_fit) + np.sin(2 * x_fit) + 0.5 * (np.random.rand(100)[:, np.newaxis] - 0.5)
x_test = np.linspace(-np.pi, np.pi, 200)[:, np.newaxis]
y_true = np.sin(x_test) + np.sin(2 * x_test)

model = RandomForestRegressor(n_estimators=100)
model.fit(x_fit, y_fit)
y_test = model.predict(x_test)

_, axs = plt.subplots(1, 2, figsize=(20, 10))
axs[0].plot(x_fit, y_fit, linestyle='', marker='o', label='训练数据')
axs[0].plot(x_test, y_true, label='真实曲线')
axs[0].set_title('训练数据与真实曲线')
axs[0].legend()

axs[1].plot(x_fit, y_fit, linestyle='', marker='o', label='训练数据')
axs[1].plot(x_test, y_test, label='拟合曲线')
axs[1].set_title('训练数据与拟合曲(zhe)线')
axs[1].legend()

#%% md

## 使用随机森林识别手写数字

一个简单的随机森林分类器对于手写数字的分类似乎很不错;

#%%

digits = load_digits()
model = RandomForestClassifier()
x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.3)

model.fit(x_train, y_train)
res = model.predict(x_test)

print('分类结果报告:')
print(classification_report(y_test, res))

print('混淆矩阵:')
mat = confusion_matrix(y_test, res)
plt.figure(figsize=(10, 10))
sns.heatmap(
    data=mat.T,
    square=True,
    annot=True,
    fmt='d',
    cbar=False,
    cmap='Greens',
)
plt.xlabel('真实值')
plt.ylabel('预测值')