#%%
from sklearn.decomposition import PCA
import keras.backend as K
import numpy as np
from keras.layers import Dense, Activation, advanced_activations,Dropout
from keras.metrics import mean_absolute_error
from keras.models import Sequential
from keras.optimizers import Adam
from keras.losses import mean_squared_error
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import KFold,cross_val_score
from keras.layers import BatchNormalization
from keras import regularizers
import pylab as plt
%matplotlib inline


#%%
def keras_r2(y_true,y_pred):
    y_mean=K.mean(y_true)
    # ssreg=K.sum((y_pred-y_mean)**2)
    sstotal=K.sum((y_true-y_mean)**2) # denominator
    ssres=K.sum((y_true-y_pred)**2)  # numerator
    score = 1-(ssres/sstotal)
    return score
#%%
def huber(y_true, y_pred):
    return K.mean(K.square(y_pred - y_true), axis=-1)/2
#%%
def feature_normalize(feature):
    ans = feature.copy()
    for i in range(ans.shape[1]):
        ans[:, i] -= ans[:, i].min()
        q = ans[:, i].max()
        if q:
            ans[:, i] = ans[:, i] / q
    return ans
#%%

def get_all_data(in_units):
    data = np.load('featurn.npz')
    X = data['X'][:, :in_units]
    y = data['y']
    X = feature_normalize(X).astype('float32')
    y = y.astype('float32')
    indeces=np.arange(X.shape[0])
    np.random.shuffle(indeces)
    return X[indeces], y[indeces]
#%%

def get_data(in_units,ratio):
    X_, y_ = get_all_data(in_units)
    M=int(ratio*X_.shape[0])
    X_train=X_[:M,:]
    y_train=y_[:M]
    X_test=X_[M:,:]
    y_test=y_[M:]
    return (X_train,y_train),(X_test,y_test)
#%%
feature_num=24
(X_train,y_train),(X_test,y_test)=get_data(feature_num,0.8)
#%%
pca=PCA(2)
pca.fit(X_test,y_test)
dX1=pca.transform(X_test)
#%%
pivot=7.5
plt.scatter(dX1[y_test>pivot,0],dX1[y_test>pivot,1],c='b')
plt.scatter(dX1[y_test<pivot,0],dX1[y_test<pivot,1],c='r')
#%%
model=Sequential()
model.add(Dense(units=100,input_dim=feature_num,
                # kernel_regularizer=regularizers.l2(0.01),
                # activity_regularizer=regularizers.l1(0.01)
                ))
model.add(advanced_activations.ELU())
model.add(Dropout(0.08))
model.add(Dense(units=1))
model.add(advanced_activations.ELU())
model.compile(
    loss=huber,
    optimizer=Adam(),
    metrics=[keras_r2,mean_absolute_error]
)
#%%
model.fit(X_train,y_train,epochs=2000,batch_size=1000)
model.evaluate(X_test,y_test)
#%%

#%%
X_test.shape
#%%
data,target=get_all_data(100)
#%%
estimator=MLPRegressor(hidden_layer_sizes=(300,300,300),max_iter=4000)
kf=KFold(n_splits=5,shuffle=True,random_state=10)
score=cross_val_score(estimator,data,target,cv=kf)
print(score)
print(score.mean())
#%%


神经网络中间层数据可视化_其他