Tensorflow训练的模型,如何保存与载入?
目的:学习tensorflow框架的DNN,掌握如何将tensorflow训练得到的模型保存并载入,做预测?
内容:
1、tensorflow模型保存与载入的两种方法
2、实例分析1——线性回归
3、实例分析2——mnist分类
一、tensorflow模型保存与载入的两种方法
参考网址:,tensorflow模型保存与载入的两种方法:
方法一:
保存模型(定义变量 + 使用saver.save()方法保存)
import tensorflow as tf
import numpy as np
W = tf.Variable([[1,1,1],[2,2,2]],dtype = tf.float32,name='w')
b = tf.Variable([[0,1,2]],dtype = tf.float32,name='b')
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
save_path = saver.save(sess,"save/model.ckpt")
载入模型(定义变量 + 使用saver.restore()方法载入)
import tensorflow as tf
import numpy as np
W = tf.Variable(tf.truncated_normal(shape=(2,3)),dtype = tf.float32,name='w')
b = tf.Variable(tf.truncated_normal(shape=(1,3)),dtype = tf.float32,name='b')
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess,"save/model.ckpt")
该方法的缺点:在使用模型时,必须把模型的结构重新定义一次,然后载入对应名字的变量的值。但是很多时候我们都更希望能够读取一个文件然后就直接使用模型,而不是还要把模型重新定义一遍。所以就需要使用另一种方法。
方法二:不重新定义网络结构的方法
具体地址见点击打开链接
二、实例分析1——线性回归
python代码如下:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
plotdata = { "batchsize":[], "loss":[] }
def moving_average(a, w=10):
if len(a) < w:
return a[:]
return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
#生成模拟数据
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3 # y=2x,但是加入了噪声
#显示模拟数据点
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()
# 创建模型
# 占位符
X = tf.placeholder("float")
Y = tf.placeholder("float")
# 模型参数
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.zeros([1]), name="bias")
# 前向结构
z = tf.multiply(X, W)+ b
tf.summary.histogram("z",z)
#反向优化
cost =tf.reduce_mean( tf.square(Y - z))
tf.summary.scalar('loss_function',cost)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# 初始化变量
init = tf.global_variables_initializer()
# 训练参数
training_epochs = 20
display_step = 2
# 启动session
with tf.Session() as sess:
sess.run(init)
merged_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('log/mnist_with_summaries',sess.graph)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#生成summary
summary_str = sess.run(merged_summary_op,feed_dict={X:x,Y:y})
summary_writer.add_summary(summary_str,epoch) #将summary写入文件
#显示训练中的详细信息
if epoch % display_step == 0:
loss = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print ("Epoch:", epoch+1, "cost=", loss,"W=", sess.run(W), "b=", sess.run(b))
if not (loss == "NA" ):
plotdata["batchsize"].append(epoch)
plotdata["loss"].append(loss)
print (" Finished!")
print ("cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), "W=", sess.run(W), "b=", sess.run(b))
#print ("cost:",cost.eval({X: train_X, Y: train_Y}))
#图形显示
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
plotdata["avgloss"] = moving_average(plotdata["loss"])
plt.figure(1)
plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
plt.xlabel('Minibatch number')
plt.ylabel('Loss')
plt.title('Minibatch run vs. Training loss')
plt.show()
print ("x=0.2,z=", sess.run(z, feed_dict={X: 0.2}))
运行结果:
三、实例分析2——mnist分类
python代码如下:
import tensorflow as tf #导入tensorflow库
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import pylab
tf.reset_default_graph()
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data维度 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 数字=> 10 classes
# Set model weights
W = tf.Variable(tf.random_normal([784, 10]))
b = tf.Variable(tf.zeros([10]))
# 构建模型
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax分类
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
#参数设置
learning_rate = 0.01
# 使用梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
training_epochs = 25
batch_size = 100
display_step = 1
saver = tf.train.Saver()
model_path = "log/521model.ckpt"
# 启动session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())# Initializing OP
# 启动循环开始训练
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# 遍历全部数据集
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += c / total_batch
# 显示训练中的详细信息
if (epoch+1) % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print( " Finished!")
# 测试 model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# 计算准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
# Save model weights to disk
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
#读取模型
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
# Restore model weights from previously saved model
saver.restore(sess, model_path)
# 测试 model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# 计算准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
output = tf.argmax(pred, 1)
batch_xs, batch_ys = mnist.train.next_batch(2)
outputval,predv = sess.run([output,pred], feed_dict={x: batch_xs})
print(outputval,predv,batch_ys)
im = batch_xs[0]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()
im = batch_xs[1]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()
运行结果:
Epoch: 0001 cost= 7.752772305
Epoch: 0002 cost= 4.151113472
Epoch: 0003 cost= 2.902867300
Epoch: 0004 cost= 2.292819615
Epoch: 0005 cost= 1.945046414
Epoch: 0006 cost= 1.721682055
Epoch: 0007 cost= 1.565224952
Epoch: 0008 cost= 1.448184885
Epoch: 0009 cost= 1.357409785
Epoch: 0010 cost= 1.283956942
Epoch: 0011 cost= 1.223152844
Epoch: 0012 cost= 1.171679115
Epoch: 0013 cost= 1.127339950
Epoch: 0014 cost= 1.089194359
Epoch: 0015 cost= 1.055257367
Epoch: 0016 cost= 1.025059551
Epoch: 0017 cost= 0.997867818
Epoch: 0018 cost= 0.973305143
Epoch: 0019 cost= 0.951017423
Epoch: 0020 cost= 0.930552574
Epoch: 0021 cost= 0.911731513
Epoch: 0022 cost= 0.894192883
Epoch: 0023 cost= 0.878128686
Epoch: 0024 cost= 0.862873784
Epoch: 0025 cost= 0.848758641
Finished!
Accuracy: 0.8355
Model saved in file: log/521model.ckpt
Starting 2nd session...
Accuracy: 0.8355
[2 6] [[ 4.44788748e-05 5.84178214e-13 9.99922991e-01 1.38609546e-09
4.55205260e-08 5.39752136e-06 2.67073501e-05 2.82684276e-16
4.42017324e-07 2.21145666e-14]
[ 1.80836395e-08 5.05934682e-18 5.30333818e-05 5.56881845e-14
2.38929709e-10 7.70143487e-08 9.99946833e-01 5.28569544e-09
9.02450684e-11 3.62926039e-14]] [[ 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]]
import tensorflow as tf #导入tensorflow库
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import pylab
########################################################################
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 若不添加,中文无法在图中显示
# import matplotlib
# matplotlib.rcParams['axes.unicode_minus']=False # 若不添加,无法在图中显示负号
###########################################################################
tf.reset_default_graph()
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data维度 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 数字=> 10 classes
# Set model weights
W = tf.Variable(tf.random_normal([784, 10]))
b = tf.Variable(tf.zeros([10]))
# 构建模型
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax分类
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
#参数设置
learning_rate = 0.01
# 使用梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
saver = tf.train.Saver()
model_path = "log/521model.ckpt"
###############################################################################
# 读取模型
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
# Restore model weights from previously saved model
saver.restore(sess, model_path)
# # 测试 model
# correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# # 计算准确率
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
output = tf.argmax(pred, 1)
batch_xs, batch_ys = mnist.train.next_batch(2)
outputval, predv = sess.run([output, pred], feed_dict={x: batch_xs})
# print(outputval, predv, batch_ys)
#######################################################################
print(outputval)
pylab.subplot(121)
im = batch_xs[0]
im = im.reshape(-1, 28)
pylab.title('该图片中的数字为:'+ str(outputval[0]))
pylab.imshow(im)
pylab.subplot(122)
im = batch_xs[1]
im = im.reshape(-1, 28)
pylab.title('该图片中的数字为:' + str(outputval[1]))
pylab.imshow(im)
pylab.show()
运行结果:
Starting 2nd session...
[6 1]