一、模型保存

 1、tf.train.saver

import tensorflow as tf 
... 
#在这里构建网络
... 
#开始保存模型
与tf.Session()作为sess:
	sess.run(tf.global_variables_initializer())#一定要先初始化整个流
	#在这里训练网络
	... 
	#保存参数
	saver = tf.train.Saver()
		saver.save(sess,PATH)#PATH就是要保存的路径

2、tf.saved_model.builder 

将tensorflow import 为tf
...
#构建网络
...
用tf.Session()作为sess:
	sess.run(tf.global_variables_initializer())#一定要先初始化整个流
	#在这里训练网络
	...
	#保存参数
	builder = tf.saved_model.builder.SaveModelBuilder(PATH)#PATH是保存路径
	builder.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.TRAINING])#保存整张网络及其变量,这种方法是可以保存多张网络的,在此不作介绍,可自行了解
	builder.save()#完成保存

二、模型调用

想要完整调用并使用一个训练好的模型,必须分为加载网络和加载关键节点两个部分。使用tf.saved_model.builder则可以完整的保存和调用模型的网络与节点

1、加载网络

将tensorflow import 为tf 
用tf.Session(graph = tf.Graph())作为sess:
	tf.saved_model.loader.load(sess,[tf.saved_model.tag_constants.TRAINING],PATH)#PATH还是路径
	... 
	...

2、 加载节点,Tensor,变量

sess.run(output,{x:_x,y:_y})#大括号的内容就是feed_dict了,使用方法类似于占位符

三、实例

import tensorflow as tf  
  
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")  
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")  
result = v1 + v2  
  
saver = tf.train.Saver()  
  
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
    saver.save(sess, "Model/model.ckpt")  
  
  
# Part2: 加载TensorFlow模型的方法  
  
import tensorflow as tf  
  
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")  
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")  
result = v1 + v2  
  
saver = tf.train.Saver()  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model.ckpt") # 注意此处路径前添加"./"  
    print(sess.run(result)) # [ 3.]  
  
  
# Part3: 若不希望重复定义计算图上的运算,可直接加载已经持久化的图  
  
import tensorflow as tf  
  
saver = tf.train.import_meta_graph("Model/model.ckpt.meta")  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model.ckpt") # 注意路径写法  
    print(sess.run(tf.get_default_graph().get_tensor_by_name("add:0"))) # [ 3.]  
  
  
# Part4: tf.train.Saver类也支持在保存和加载时给变量重命名  
  
import tensorflow as tf  
  
# 声明的变量名称name与已保存的模型中的变量名称name不一致  
u1 = tf.Variable(tf.constant(1.0, shape=[1]), name="other-v1")  
u2 = tf.Variable(tf.constant(2.0, shape=[1]), name="other-v2")  
result = u1 + u2  
  
# 若直接生命Saver类对象,会报错变量找不到  
# 使用一个字典dict重命名变量即可,{"已保存的变量的名称name": 重命名变量名}  
# 原来名称name为v1的变量现在加载到变量u1(名称name为other-v1)中  
saver = tf.train.Saver({"v1": u1, "v2": u2})  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model.ckpt")  
    print(sess.run(result)) # [ 3.]  
  
  
# Part5: 保存滑动平均模型  
  
import tensorflow as tf  
  
v = tf.Variable(0, dtype=tf.float32, name="v")  
for variables in tf.global_variables():  
    print(variables.name) # v:0  
  
ema = tf.train.ExponentialMovingAverage(0.99)  
maintain_averages_op = ema.apply(tf.global_variables())  
for variables in tf.global_variables():  
    print(variables.name) # v:0  
                          # v/ExponentialMovingAverage:0  
  
saver = tf.train.Saver()  
  
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
    sess.run(tf.assign(v, 10))  
    sess.run(maintain_averages_op)  
    saver.save(sess, "Model/model_ema.ckpt")  
    print(sess.run([v, ema.average(v)])) # [10.0, 0.099999905]  
  
  
# Part6: 通过变量重命名直接读取变量的滑动平均值  
  
import tensorflow as tf  
  
v = tf.Variable(0, dtype=tf.float32, name="v")  
saver = tf.train.Saver({"v/ExponentialMovingAverage": v})  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model_ema.ckpt")  
    print(sess.run(v)) # 0.0999999  
  
  
# Part7: 通过tf.train.ExponentialMovingAverage的variables_to_restore()函数获取变量重命名字典  
  
import tensorflow as tf  
  
v = tf.Variable(0, dtype=tf.float32, name="v")  
# 注意此处的变量名称name一定要与已保存的变量名称一致  
ema = tf.train.ExponentialMovingAverage(0.99)  
print(ema.variables_to_restore())  
# {'v/ExponentialMovingAverage': <tf.Variable 'v:0' shape=() dtype=float32_ref>}  
# 此处的v取自上面变量v的名称name="v"  
  
saver = tf.train.Saver(ema.variables_to_restore())  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model_ema.ckpt")  
    print(sess.run(v)) # 0.0999999  
  
  
# Part8: 通过convert_variables_to_constants函数将计算图中的变量及其取值通过常量的方式保存于一个文件中  
  
import tensorflow as tf  
from tensorflow.python.framework import graph_util  
  
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")  
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")  
result = v1 + v2  
  
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
    # 导出当前计算图的GraphDef部分,即从输入层到输出层的计算过程部分  
    graph_def = tf.get_default_graph().as_graph_def()  
    output_graph_def = graph_util.convert_variables_to_constants(sess,  
                                                        graph_def, ['add'])  
  
    with tf.gfile.GFile("Model/combined_model.pb", 'wb') as f:  
        f.write(output_graph_def.SerializeToString())  
  
  
# Part9: 载入包含变量及其取值的模型  
  
import tensorflow as tf  
from tensorflow.python.platform import gfile  
  
with tf.Session() as sess:  
    model_filename = "Model/combined_model.pb"  
    with gfile.FastGFile(model_filename, 'rb') as f:  
        graph_def = tf.GraphDef()  
        graph_def.ParseFromString(f.read())  
  
    result = tf.import_graph_def(graph_def, return_elements=["add:0"])  
    print(sess.run(result)) # [array([ 3.], dtype=float32)]