def sign(x):
    """
    输入:
    x: 浮点数字
    输出:
    整型数值
    """

    if x > 0:
        return 1
    elif x < 0:
        return -1
    else:
        return 0

# 利用 numpy 对符号函数进行向量化
vec_sign = np.vectorize(sign)

# 定义 LASSO 回归损失函数
def l1_loss(X, y, w, b, alpha):
    """
    输入
    :param X: 输入变量矩阵
    :param y: 输出标签向量
    :param w: 变量参数权重矩阵
    :param b: 偏置
    :param alpha: 正则化系数
    """

    # 训练样本量
    num_train = X.shape[0]

    # 训练特征数
    num_features = X.shape[1]

    # 回归模型预测输出
    y_hat = np.dot(X, w) + b

    # L1 损失函数
    loss = np.sum((y_hat - y) ** 2) / num_train + np.sum(alpha*abs(w))

    # 基于向量化符号函数的参数梯度计算
    dw = np.dot(X.T, (y_hat - y)) / num_train + alpha * vec_sign(w)
    db = np.sum((y_hat - y)) / num_train

    """
    y_hat: 线性模型预测输出
    loss: 均方损失值
    dw: 权重系数一阶偏导
    db: 偏置一阶偏导
    """
    return y_hat, loss, dw, db


def initialize_params(dims):
    """
    输入
    :param dims: 训练数据变量维度
    """

    # 初始化权重系数为零向量
    w = np.zeros((dims, 1))

    # 初始化偏置参数为零
    b = 0

    """
    w: 初始化权重系数值
    b: 初始化偏置参数值
    """
    return w, b

# 定义 LASSO 回归模型的训练过程
def lasso_train(X, y, learning_rate=0.01, epochs=1000):
    """
    输入:
    :param X: 输入变量矩阵
    :param y: 输出标签向量
    :param learning_rate: 学习率
    :param epochs: 训练迭代次数
    """

    # 记录训练损失的空列表
    loss_his = []

    # 初始化模型参数
    w, b = initialize_params(dims=X.shape[1])

    params, grads = {}, {}

    # 迭代训练
    for i in range(1, epochs):
        # 计算当前迭代的预测值, 损失和梯度
        y_hat, loss, dw, db = l1_loss(X=X, y=y, w=w, b=b, alpha=0.1)

        # 基于梯度下降法的参数更新
        w += (-learning_rate * dw)
        b += (-learning_rate * db)

        # 记录当前迭代的损失
        loss_his.append(loss)

        # 每 50 次迭代打印当前损失信息
        if 0 == (i % 50):
            print(f'epoch: {i}, loss: {loss}.')

        # 将当前迭代步优化吼的参数保存到字典中
        params = {
            'w': w,
            'b': b
        }

        # 将当前迭代步的梯度保存到字典中
        grads = {
            'dw': dw,
            'db': db
        }

    return loss_his, params, grads