import numpy as np
import os
import datetime
import tensorflow as tf
import h5py
from ops import *
from read_hdf5 import *
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Activation,Convolution2D,MaxPooling2D,Flatten,Dropout
from keras.optimizers import Adam
feature_format = 'tfrecord'
feature_path = '/home/rainy/tlj/dcase/h5/train_fold1.h5'
statistical_parameter_path = '/home/rainy/Desktop/model_xception/statistical_parameter.hdf5'
save_path = '/home/rainy/Desktop/model_xception'
max_epoch = 20
high = 465
wide = 128
shape = high * wide
keep_prob = 1
max_batch_size = 50
#fp = h5py.File(statistical_parameter_path, 'r')
starttime = datetime.datetime.now()
feature, label = load_hdf5(feature_path)
index_shuffle = np.arange(feature.shape[0])
np.random.shuffle(index_shuffle)
feature = feature[index_shuffle]
label = label[index_shuffle]
feature_mean = np.zeros(wide)
feature_var = np.zeros(wide)
for i in range(feature.shape[2]):
feature_mean[i] = np.mean(feature[:,:,i])
feature_var[i] = np.var(feature[:,:,i])
for i in range(feature.shape[0]):
for j in range(feature.shape[1]):
feature[i,j,:] = (feature[i,j,:] - feature_mean)/np.sqrt(feature_var)
y_data = np.zeros((label.shape[0], 10),dtype=int)
for j in range(label.shape[0]):
y_data[j, label[j]] = 1
feature = feature.reshape([-1,1,465,128])
# load testing data
test_feature,test_label = read_data()
test_feature = test_feature.reshape([-1,1,465,128])
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.1
LEARNING_RATE_STEP = 300
gloabl_steps = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE
, gloabl_steps,
LEARNING_RATE_STEP,
LEARNING_RATE_DECAY,
staircase=True)
model = Sequential()
# 2D Convolutional layer(filters: 32, kernelsize: 7) + Batchnormalization + ReLuactivation
model.add(Convolution2D(
filters=32,
kernel_size=3,
padding='same',
))##patch 3x3 ,in size 1,out size 32, Nx465x128x32
model.add(Activation('relu'))
# 2D maxpooling(poolsize: (5, 2)) + Dropout(rate: 30 %)
model.add(MaxPooling2D(
pool_size=2,
strides=2,
padding='valid',
data_format='channels_first'
)) ## Nx232x64x32
# 2D Convolutional layer(filters: 64, kernelsize: 7) + Batchnormalization + ReLuactivation
model.add(Convolution2D(
filters=64,
kernel_size=5,
padding='same'
)) ##patch 5x5 ,in size 32,out size 64 , Nx232x64x64
model.add(Activation('relu'))
# 2D maxpooling(poolsize: (4, 100)) + Dropout(rate: 30 %)
model.add(MaxPooling2D(
pool_size=(2,2),
strides=(2,2),
padding='valid',
data_format='channels_first'
)) ## Nx116x32x64
# Flatten
model.add(Flatten()) # N x 116 x 32 x 64 =>> N x (116*32*64)
# Dense layer # 1 Dense layer(units: 100, activation: ReLu ) Dropout(rate: 30 %)
model.add(Dense(100))
model.add(Activation('relu'))
# Output layer(activation: softmax)
model.add(Dense(10))
model.add(Activation('softmax'))
adam = Adam(lr=learning_rate)
model.compile(
optimizer=adam,
loss='categorical_crossentropy',metrics=['accuracy']
)
print('Training--------------------------')
model.fit(feature,y_data,epochs=max_epoch,batch_size=max_batch_size)
print('Testing')
validation_loss = model.evaluate(feature,y_data)
print('validation loss:',validation_loss)
endtime = datetime.datetime.now()
print("code finish time is:",(endtime - starttime).seconds)