import os
import cv2
import numpy as np
import tensorflow as tf

h_pool2_flat = 12288
n_classes = 1
# 设置路径
data_home = 'data/imgdata/'

# 标签映射字典
image_to_label = {}
with open('data/label.txt', 'r') as fr:
lines = fr.readlines()
for line in lines:
line = line.strip()
key, value = line.split(' ')
image_to_label[key] = value

# 构造训练集
X_train = []
y_train = []
for jpg in os.listdir(data_home):
image = cv2.imread(data_home + jpg)
x = cv2.resize(image, (64, 64))
X_train.append(x)
y = image_to_label[jpg]
y_train.append(y)

X_train = np.array(X_train).reshape((1000, -1))
y_train = np.array(y_train).reshape((-1, 1))


# print(X_train.shape)
# print(y_train)

def get_biases(shape):
'''
函数的偏置定义。tf.get_variable(name,shape,dtype,initializer)
参数
1.name:“biases”
2.shape:[size],即[输出维度]
3.dtype:tf.float32
4.initializer:初始化方式initial,我们选择常量初始化
语法:tf.constant_initializer(value)
参数:
1)value:常数。0.1

:param shape: 偏置输出维度
:return: 偏置变量
'''
# TODO:1.使用tf.constant_initializer(0.1)函数定义初始化方式
initial = tf.constant_initializer(0.1)
# TODO:2.使用tf.get_variable函数定义偏置变量
biases = tf.get_variable("biases",
shape=shape,
dtype=tf.float32,
initializer=initial)
return biases


def get_weights(shape):
'''
定义参数,卷积核/权值
语法:tf.get_variable(name,shape,dtype,initializer)
参数:
1.name:“weights”
2.shape:根据不同的变量,输入不同的shape
3.dtype:tf.float32
4.initializer:初始化方式initial,我们选择初始化方式为截断的正太分布
语法:tf.truncated_normal_initializer(stddev,dtype)
参数:
1)stedev:要生成的随机值的标准偏差。可设置stedev=0.005
2) dtype:数据类型。只支持浮点类型 可设置dtype=tf.float32。

:param shape:参数shape
:return: 变量参数
'''
# TODO:1.使用tf.truncated_normal_initializer定义变量的初始化方式
initial = tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32)
# TODO:2.使用tf.get_variable函数定义变量,
weights = tf.get_variable("weights",
shape=shape,
dtype=tf.float32,
initializer=initial)
return weights


# 一层全连接
def add_fc_layer(inputs, insize, outsize, activation_function=None):
'''
添加全连接层
1.定义权值weights
2.定义偏置bias
3.完成全连接操作:inputs*weights+bias
4.如果没有激活函数,返回softmax结果
如果有激活函数则,返回activation_function结果

:param inputs: 输入图像
:param insize: 输入维度
:param outsize: 输出维度
:param activation_function: 激活函数
:return: 计算结果
'''
# TODO:1.调用get_weights()获取权值
weights = get_weights([insize, outsize])
# TODO:2.调用get_biases()获取偏置
bias = get_biases([outsize])
# TODO:3.使用tf.matmul完成inputs和weights乘法运算;使用+完成与bias的加法运算
wx_plus_b = tf.matmul(inputs, weights) + bias
# TODO:4.如果激活函数None,返回tf.nn.softmax结果,否则返回activation_function结果
if activation_function == None:
outputs = tf.nn.softmax(wx_plus_b)
else:
outputs = activation_function(wx_plus_b)
return outputs


# 全部模型

def inference(inputs):
# TODO:1.调用add_fc_layer完成全连接操作1
with tf.variable_scope("fc1", reuse=tf.AUTO_REUSE) as scope:
# h_pool2_flat = tf.reshape(inputs, shape=[-1, 12288])
fc1 = add_fc_layer(inputs, h_pool2_flat, 128, tf.nn.relu)
# TODO:2.调用add_fc_layer完成全连接操作2
with tf.variable_scope("fc2", reuse=tf.AUTO_REUSE) as scope:
fc2 = add_fc_layer(fc1, 128, 64, tf.nn.tanh)
# TODO:3.调用add_fc_layer完成全连接操作3
with tf.variable_scope("fc3", reuse=tf.AUTO_REUSE) as scope:
fc3 = add_fc_layer(fc2, 64, 32, tf.nn.sigmoid)
# TODO:4.调用Dropout激活概率为0.5
fc3_drop = tf.nn.dropout(fc3, 0.5)

# TODO:5.调用add_fc_layer完成全连接操作4
with tf.variable_scope("fc4", reuse=tf.AUTO_REUSE) as scope:
fc4 = add_fc_layer(fc3_drop, 32, n_classes, tf.nn.relu)
return fc4


# 占位符
x_pl = tf.placeholder(tf.float32)
y_pl = tf.placeholder(tf.float32)

# 预测值
y_pred = inference(x_pl)

# 损失值、优化方法
k = (y_train - y_pred)
loss = tf.reduce_mean(tf.square(k))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)

# 模型训练
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(150000):
_, train_loss = sess.run([train_op, loss], feed_dict={x_pl: X_train, y_pl: y_train})
if epoch % 1000 == 0:
print('epoch:{}, train_losses:{}'.format(epoch, train_loss))
saver.save(sess, "model/model.ckpt", global_step=epoch)
print('epoch:{}, save model'.format(epoch))