基本框架来自于大佬:https://zhuanlan.zhihu.com/p/264801614 具体原理直接跳转原帖

时隔太久,对当时的源码已经非常陌生了,不多解释了,当时的简单思路就是利用多种长短时序做个融合。

以后的项目要及时写下来。。。不要拖

代码:

import pandas as pd
import numpy as np
import keras
from keras.layers import *
from sklearn.preprocessing import StandardScaler


'''
算法思路:
以encoder-decoder LSTM模型,双向LSTM模型,堆叠LSTM 模型,卷积神经网络LSTM 四种模型为基础
再生成多个序列:
1. 一月推导一周序列:n_input = 4*7*8,n_output=7*8,
    其中特征:入库水位,降雨量,环境量
    训练集:初赛2015~2017 
    测试集:训练集分割局部
2. 两周推导一周序列:n_input = 2*7*8,n_output=7*8,
    其中特征:入库水位,降雨量,环境量,雨水预报(推后一周)
    训练集:初赛2015~2017 + 2018年3个月 + 2019年5个月
    测试集:训练集分割局部
3.一周推导一周序列:n_input = 7*8,n_output=7*8;
    其中特征:入库水位,降雨量,环境量,雨水预报(推后一周)
    训练集:初赛2015~2017 + 2018年3个月 + 2019年5个月
    测试集:训练集分割局部
    
共生成3*4=12种模型的预测结果,最终去极端值后进行模型融合;
'''


def create_LSTM_model(model_mode,n_input,n_out,n_features,epochs_num,
                      train_data,train_label,vaild_data,vaild_label):

    model = keras.Sequential()
    if model_mode == 0:
        model.add(LSTM(1, activation='relu', input_shape=(n_input, n_features)))
        model.add(RepeatVector(n_out))
        model.add(LSTM(1, activation='relu', return_sequences=True))
        model.add(TimeDistributed(Dense(1, activation='relu')))
        model.add(TimeDistributed(Dense(1)))
    elif model_mode == 1:
        model.add(Bidirectional(LSTM(3, activation='relu'), input_shape=(n_input, n_features)))
        model.add(Dense(n_out))
    elif model_mode == 2:
        model.add(LSTM(3, activation='tanh', return_sequences=True, input_shape=(n_input, n_features)))
        model.add(LSTM(3, activation='tanh', return_sequences=True, dropout=0.2, recurrent_dropout=0.2))
        model.add(LSTM(3, activation='tanh', dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(n_out))
    elif model_mode == 3:
        model.add(Conv1D(filters=16, kernel_size=3, activation='relu', input_shape=(n_input, n_features)))
        model.add(Conv1D(filters=16, kernel_size=3, activation='relu'))
        model.add(MaxPooling1D(pool_size=2))
        model.add(Flatten())
        model.add(RepeatVector(n_out))
        model.add(LSTM(10, activation='relu', return_sequences=True))
        model.add(TimeDistributed(Dense(5, activation='relu')))
        model.add(TimeDistributed(Dense(1)))
    # 模型编译
    model.compile(optimizer='adam', loss='mse')
    model.fit(train_data, train_label, epochs=epochs_num, batch_size=None, shuffle=False,
              validation_data=(vaild_data, vaild_label))
    return model

def to_supervised(train, n_input, n_out, n_features):
    data = train
    X, y = list(), list()
    in_start = 0
    for _ in range(len(data)):
        in_end = in_start + n_input
        out_end = in_end + n_out
        if out_end <= len(data):
            X.append(data[in_start:in_end, 0:n_features])  # 使用几个特征
            y.append(data[in_end:out_end, 0])
        in_start += 1
    return np.array(X), np.array(y)

def to_supervised_list(train_list, n_input, n_out, n_features):
    X, y = list(), list()
    for data in train_list:
        in_start = 0
        for _ in range(len(data)):
            in_end = in_start + n_input
            out_end = in_end + n_out
            if out_end <= len(data):
                X.append(data[in_start:in_end, 0:n_features])  # 使用几个特征
                y.append(data[in_end:out_end, 0])
            in_start += 1
    return np.array(X), np.array(y)
'''                 载入初赛数据              '''

data = pd.read_excel('Qidata.xlsx') # 读取流量表
train_old_1 = data[5507:14275].copy().reset_index(drop=True) # 第二段数据
train_old_2 = data[14275:].copy().reset_index(drop=True) # 测试数据
# 降雨数据
Raindata = pd.read_excel('Raindata.xlsx')
raindata_new = pd.DataFrame()
raindata_new['TimeStample'] = Raindata['TimeStample'].copy()
Raindata.drop('TimeStample',axis=1,inplace=True)
# 将所有测点的数据求和作为总降雨
raindata_new['Rain_sum'] = Raindata.apply(lambda x: x.sum(), axis=1)
Rain_train1 = raindata_new[0:43824].copy().reset_index(drop=True)
Rain_train2 = raindata_new[43824:].copy().reset_index(drop=True)

#为了构造和水库流量采样频率相同的时间序列,对降雨数据进行重采样
Rain_train1.set_index('TimeStample', inplace=True)
# 流量的采样频率为(每3个小时),而降雨为每1个小时
Rain_train = Rain_train1.resample('3H').sum()
#print(Qi_train1)
#print(Qi_train1)
train_old_1['Rain_sum'] = Rain_train['Rain_sum'].values[5507+333:]
Rain_train2.set_index('TimeStample', inplace=True)
a1 = Rain_train2[0:31*24].resample('3H').sum()
a2 = Rain_train2[31*24:2*31*24].resample('3H').sum()
a3 = Rain_train2[2*31*24:3*31*24].resample('3H').sum()
train_old_2['Rain_sum'] = pd.concat([a1,a2,a3],axis=0,ignore_index=True)




#主办方提供的第三个表为环境数据,这里也有缺失值需要填充
Environmentdata = pd.read_excel('Environmentdata.xlsx')
# 填充缺失
Environmentdata['T'].fillna(method='ffill',inplace=True)
Environmentdata['w'].fillna(method='ffill',inplace=True)
# wd需要归一化

ss = StandardScaler()
Environmentdata['wd'] = ss.fit_transform(Environmentdata['wd'].values.reshape(-1,1))
# 对于环境变量的采样频率为1天,所以也需要进行重采样(3*8=24)
Environment = pd.DataFrame()
Environment['T'] = np.zeros(len(Environmentdata)*8)
Environment['w'] = np.zeros(len(Environmentdata)*8)
Environment['wd'] = np.zeros(len(Environmentdata)*8)
for i in range(len(Environment)):
    Environment['T'][i] = Environmentdata['T'][int(i / 8)]
    Environment['w'][i] = Environmentdata['w'][int(i / 8)]
    Environment['wd'][i] = Environmentdata['wd'][int(i / 8)]
train_old_1['T']= Environment['T'][5840:(5840+len(train_old_1))].values
train_old_1['w']= Environment['w'][5840:(5840+len(train_old_1))].values
train_old_1['wd']= Environment['wd'][5840:(5840+len(train_old_1))].values
train_old_2['T']= Environment['T'][(5840+len(train_old_1)):(5840+len(train_old_1)+len(train_old_2))].values
train_old_2['w']= Environment['w'][(5840+len(train_old_1)):(5840+len(train_old_1)+len(train_old_2))].values
train_old_2['wd']= Environment['wd'][(5840+len(train_old_1)):(5840+len(train_old_1)+len(train_old_2))].values

# 添加降雨预报
# Pre_Qi_Raindata = pd.read_excel('降雨预报数据.xlsx')
# pre_raindata_tmp1 = pd.DataFrame()
# pre_raindata_tmp1['TimeStample'] = Pre_Qi_Raindata['TimeStample'].copy()
# Pre_Qi_Raindata.drop('TimeStample',axis=1,inplace=True)
# # 将所有测点的数据求和作为总预测降雨
# pre_raindata_tmp1['pre_rain'] = Pre_Qi_Raindata.apply(lambda x: x.sum(), axis=1)
# Pre_Qi_Raindata = pre_raindata_tmp1.copy().reset_index(drop=True)
# pre_raindata_tmp2 = pd.DataFrame()
# pre_raindata_tmp2['pre_rain'] = Pre_Qi_Raindata['pre_rain'][656:1722]
# pre_raindata_train_1 = pd.DataFrame()
# pre_raindata_train_1['pre_rain'] = np.zeros(len(pre_raindata_tmp2)*8)
# for i in range(len(pre_raindata_train_1)):
#     pre_raindata_train_1['pre_rain'][i] = pre_raindata_tmp2['pre_rain'][int(i / 8)]
# pre_raindata_tmp3 = pd.DataFrame()
# pre_raindata_tmp3['pre_rain'] = Pre_Qi_Raindata['pre_rain'][1722:]
# pre_raindata_train_2 = pd.DataFrame()
# pre_raindata_train_2['pre_rain'] = np.zeros(len(pre_raindata_tmp3)*8)
# for i in range(len(pre_raindata_train_2)):
#     pre_raindata_train_2['pre_rain'][i] = pre_raindata_tmp3['pre_rain'][int(i / 8)]




# for i in range(len(Environment)):
#     Pre_Qi_Raindata['']






'''                 载入决赛数据              '''

data = pd.read_excel('决赛/入库流量数据.xlsx') # 读取流量表
test_data = data.copy().reset_index(drop=True)
# Qi_test1 = data[0:248].copy().reset_index(drop=True)  # 第一段数据
# Qi_test2 = data[248:496].copy().reset_index(drop=True) # 第二段数据
# Qi_test3 = data[496:744].copy().reset_index(drop=True) # 第三段数据
# Qi_test4 = data[744:992].copy().reset_index(drop=True) # 第四段数据
# Qi_test5 = data[992:1240].copy().reset_index(drop=True) # 第五段数据


# 降雨数据
Raindata = pd.read_excel('决赛/遥测站降雨数据.xlsx')
raindata_new = pd.DataFrame()
raindata_new['TimeStample'] = Raindata['TimeStample'].copy()
Raindata.drop('TimeStample',axis=1,inplace=True)
#print(Raindata)

# 将所有测点的数据求和作为总降雨
raindata_new['Rain_sum'] = Raindata.apply(lambda x: x.sum(), axis=1)
Rain_test =  raindata_new.copy().reset_index(drop=True)
# Rain_test1 = raindata_new[0:744].copy().reset_index(drop=True)
# Rain_test2 = raindata_new[744:1488].copy().reset_index(drop=True)
# Rain_test3 = raindata_new[1488:2232].copy().reset_index(drop=True)
# Rain_test4 = raindata_new[2232:2976].copy().reset_index(drop=True)
# Rain_test5 = raindata_new[2976:3720].copy().reset_index(drop=True)


#为了构造和水库流量采样频率相同的时间序列,对降雨数据进行重采样
Rain_test.set_index('TimeStample', inplace=True)
# Rain_test1.set_index('TimeStample', inplace=True)
# Rain_test2.set_index('TimeStample', inplace=True)
# Rain_test3.set_index('TimeStample', inplace=True)
# Rain_test4.set_index('TimeStample', inplace=True)
# Rain_test5.set_index('TimeStample', inplace=True)

# 流量的采样频率为(每3个小时),而降雨为每1个小时
#print(len(Rain_test1))
# Rain_test = Rain_test.resample('3H').sum()
#print(len(Rain_test1))
a1 = Rain_test[0:744].resample('3H').sum()
a2 = Rain_test[744:1488].resample('3H').sum()
a3 = Rain_test[1488:2232].resample('3H').sum()
a4 = Rain_test[2232:2976].resample('3H').sum()


a5 = Rain_test[2976:3720].resample('3H').sum()
test_data['Rain_sum'] = pd.concat([a1,a2,a3,a4,a5],axis=0,ignore_index=True)
#print(test_data)

#主办方提供的第三个表为环境数据,这里也有缺失值需要填充
Environmentdata = pd.read_excel('决赛/环境表.xlsx')
print(len(Environmentdata))
# 填充缺失
Environmentdata['T'].fillna(method='ffill',inplace=True)
Environmentdata['w'].fillna(method='ffill',inplace=True)
# wd需要归一化
ss = StandardScaler()
Environmentdata['wd'] = ss.fit_transform(Environmentdata['wd'].values.reshape(-1,1))
# 对于环境变量的采样频率为1天,所以也需要进行重采样(3*8=24)
Environment = pd.DataFrame()
Environment['T'] = np.zeros(len(Environmentdata)*8)
Environment['w'] = np.zeros(len(Environmentdata)*8)
Environment['wd'] = np.zeros(len(Environmentdata)*8)
for i in range(len(Environment)):
    Environment['T'][i] = Environmentdata['T'][int(i / 8)]
    Environment['w'][i] = Environmentdata['w'][int(i / 8)]
    Environment['wd'][i] = Environmentdata['wd'][int(i / 8)]
# 然后分配给测试数据
test_data['T']= Environment['T'][:].values
test_data['w']= Environment['w'][:].values
test_data['wd']= Environment['wd'][:].values


# 添加降雨预报
# Pre_Raindata = pd.read_excel('决赛/降雨预报数据.xlsx')
# pre_raindata_tmp1 = pd.DataFrame()
# pre_raindata_tmp1['TimeStample'] = Pre_Qi_Raindata['TimeStample'].copy()
# Pre_Qi_Raindata.drop('TimeStample',axis=1,inplace=True)
# # 将所有测点的数据求和作为总预测降雨
# pre_raindata_tmp1['pre_rain'] = Pre_Qi_Raindata.apply(lambda x: x.sum(), axis=1)
# Pre_Qi_Raindata = pre_raindata_tmp1.copy().reset_index(drop=True)
# pre_raindata_tmp2 = pd.DataFrame()
# pre_raindata_tmp2['pre_rain'] = Pre_Qi_Raindata['pre_rain']
# pre_raindata_test = pd.DataFrame()
# pre_raindata_test['pre_rain'] = np.zeros(len(pre_raindata_tmp2)*8)
# for i in range(len(pre_raindata_test)):
#     pre_raindata_test['pre_rain'][i] = pre_raindata_tmp2['pre_rain'][int(i / 8)]
# print(pre_raindata_test)


'''                 制作训练集序列              '''

train_old_1.to_csv('决赛/trainData1.csv')
train_old_2.to_csv('决赛/trainData2.csv')
test_data.to_csv('决赛/trainData3.csv')

train_old_1 = pd.read_csv('决赛/trainData1.csv',usecols=['Qi', 'Rain_sum','T','w','wd']).values
train_old_2 = pd.read_csv('决赛/trainData2.csv',usecols=['Qi', 'Rain_sum','T','w','wd']).values
test_data = pd.read_csv('决赛/trainData3.csv',usecols=['Qi', 'Rain_sum','T','w','wd']).values
# 1.制作一月推导一周序列
# 训练集仅限制在 train_old_1中:
# def to_supervised(train, n_input, n_out, n_features):
# trainData_30_7 = pd.DataFrame()
# trainData_30_7['Qi'] = train_old_1['Qi'].values
# trainData_30_7['Rain_sum'] = train_old_1['Rain_sum'].values
# trainData_30_7['T'] = train_old_1['T'].values
# trainData_30_7['w'] = train_old_1['w'].values
# trainData_30_7['wd'] = train_old_1['wd'].values
# print(trainData_30_7)
n_input = 4*7*8
n_out = 7*8
n_features = 5
len_train = (len(train_old_1)*8) // 10

train_data = train_old_1[:len_train].copy()
vaild_data = train_old_1[len_train:].copy()
train_x_30_7, train_y_30_7 = to_supervised(train_data, n_input, n_out,n_features)
vaild_x_30_7, vaild_y_30_7 = to_supervised(vaild_data, n_input, n_out,n_features)

# 2.制作两周推导一周序列
# 训练集在 train_old_1,train_old_2,test_data:
# 特征添加降雨预报
trainData_14_7_1 = train_old_1.copy()
n_input = 2*7*8
n_out = 7*8
n_features = 5
train_data = train_old_1[:len_train].copy()
vaild_data = train_old_1[len_train:].copy()
trainData_14_7_2 = train_old_2[:31*7*8,0:n_features]
trainData_14_7_3 = train_old_2[31*7*8:2*31*7*8,0:n_features]
trainData_14_7_4 = train_old_2[2*31*7*8:,0:n_features]
trainData_14_7_5 = test_data[:31*7*8,0:n_features]
trainData_14_7_6 = test_data[31*7*8:2*31*7*8,0:n_features]
trainData_14_7_7 = test_data[2*31*7*8:3*31*7*8,0:n_features]
trainData_14_7_8 = test_data[3*31*7*8:4*31*7*8,0:n_features]
trainData_14_7_9 = test_data[4*31*7*8:,0:n_features]

train_14_7_list,vaild_14_7_list = list(),list()
train_14_7_list.append(train_data)
train_14_7_list.append(trainData_14_7_2)
train_14_7_list.append(trainData_14_7_3)
train_14_7_list.append(trainData_14_7_5)
train_14_7_list.append(trainData_14_7_6)
train_14_7_list.append(trainData_14_7_7)
train_14_7_list.append(trainData_14_7_8)
vaild_14_7_list.append(vaild_data)
vaild_14_7_list.append(trainData_14_7_4)
vaild_14_7_list.append(trainData_14_7_9)

train_x_14_7,train_y_14_7 = to_supervised_list(train_14_7_list, n_input, n_out,n_features)
vaild_x_14_7,vaild_y_14_7 = to_supervised_list(vaild_14_7_list, n_input, n_out,n_features)


# 3.制作一周推导一周序列
# 训练集在 train_old_1,train_old_2,test_data:
n_input = 1*7*8
n_out = 7*8
n_features = 5
train_x_7_7,train_y_7_7 = to_supervised_list(train_14_7_list, n_input, n_out,n_features)
vaild_x_7_7,vaild_y_7_7 = to_supervised_list(vaild_14_7_list, n_input, n_out,n_features)

# 制作测试集
#30_7序列
n_weeks=4
n_features = 5
test_30_7_1 = test_data[248-n_weeks*7*8:248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_30_7_2 = test_data[2*248-n_weeks*7*8:2*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_30_7_3 = test_data[3*248-n_weeks*7*8:3*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_30_7_4 = test_data[4*248-n_weeks*7*8:4*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_30_7_5 = test_data[5*248-n_weeks*7*8:5*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_30_7 = np.vstack((test_30_7_1,test_30_7_2,test_30_7_3,test_30_7_4,test_30_7_5))
n_weeks=2
test_14_7_1 = test_data[248-n_weeks*7*8:248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_14_7_2 = test_data[2*248-n_weeks*7*8:2*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_14_7_3 = test_data[3*248-n_weeks*7*8:3*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_14_7_4 = test_data[4*248-n_weeks*7*8:4*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_14_7_5 = test_data[5*248-n_weeks*7*8:5*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_14_7 = np.vstack((test_14_7_1,test_14_7_2,test_14_7_3,test_14_7_4,test_14_7_5))
n_weeks=1
test_7_7_1 = test_data[248-n_weeks*7*8:248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_7_7_2 = test_data[2*248-n_weeks*7*8:2*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_7_7_3 = test_data[3*248-n_weeks*7*8:3*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_7_7_4 = test_data[4*248-n_weeks*7*8:4*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_7_7_5 = test_data[5*248-n_weeks*7*8:5*248,0:n_features].reshape((1, n_weeks*7*8, n_features))
test_7_7 = np.vstack((test_7_7_1,test_7_7_2,test_7_7_3,test_7_7_4,test_7_7_5))

# 模型训练
start_index =2
end_index = 4
# 30_7序列
for i in range(start_index,end_index):
    print("begin ",str(i)," model:")
    # model train
    n_input = 4*7*8
    n_out = 7*8
    n_features = 5
    epochs_num = 32
    model = create_LSTM_model(i,n_input,n_out,n_features,epochs_num,
                              train_x_30_7,train_y_30_7,vaild_x_30_7,vaild_y_30_7)

    if i == start_index:
        yhat = model.predict(test_30_7).reshape((5, 56))
    else:
        yhat += model.predict(test_30_7).reshape((5, 56))

# 14_7序列
for i in range(start_index,end_index):
    print("begin ",str(i)," model:")
    # model train
    n_input = 2*7*8
    n_out = 7*8
    n_features = 5
    epochs_num = 32
    model = create_LSTM_model(i,n_input,n_out,n_features,epochs_num,
                              train_x_14_7,train_y_14_7,vaild_x_14_7,vaild_y_14_7)
    yhat += model.predict(test_14_7).reshape((5, 56))


# 7_7序列
for i in range(start_index,end_index):
    print("begin ",str(i)," model:")
    # model train
    n_input = 1*7*8
    n_out = 7*8
    n_features = 5
    epochs_num = 32
    model = create_LSTM_model(i,n_input,n_out,n_features,epochs_num,
                                  train_x_7_7,train_y_7_7,vaild_x_7_7,vaild_y_7_7)
    yhat += model.predict(test_7_7).reshape((5, 56))

for i in range(len(yhat)):
    yhat[i] = yhat[i] / ((end_index-start_index)*3)

# 将结果写入CSV文件
submit = pd.read_csv('submission.csv',index_col=0)
for i in range(len(yhat)):
    submit.iloc[i] = yhat[i]
submit.to_csv("决赛/submissiontest.csv")