- 使用Graphs来表示计算任务
- 在Session的上下文context中执行图
- 使用tensor表示数据
- 通过变量Variable维护状态
- 使用feed和fetch可以为任意的操作赋值或者从其中获取数据
Tensorflow是一个编程系统,图graphs表示计算任务,图graphs中的节点称之为op(operation),一个op可以获得0个或多个Tensor,执行计算,产生0个或多个Tensor。
Tensor看作是一个n维的数组或列表。
图必须在会话Session中被启动。
常量的使用
import tensorflow as tf
#创建2个常量op
m1 = tf.constant([[3,3]]) # 一行2列
m2 = tf.constant([[2],[3]]) # 2行2列
#创建一个矩阵乘法op
product = tf.matmul(m1, m2)
print(product) #Tensor("MatMul:0", shape=(1, 1), dtype=int32)
#定义一个会话,启动默认的图
sess = tf.Session()
#调用sess的run方法来执行矩阵乘法
#run(Product)触发来图中3个op
result = sess.run(product)
print(result)
sess.close()
###################################################
m1 = tf.constant([[3,3]]) # 一行2列
m2 = tf.constant([[2],[3]]) # 2行2列
#创建一个矩阵乘法op
product = tf.matmul(m1, m2)
print(product) #Tensor("MatMul:0", shape=(1, 1), dtype=int32)
with tf.Session() as sess:
result = sess.run(product)
print(result)
变量的使用
import tensorflow as tf
x = tf.Variable([1,2])
a = tf.Variable([3,3])
sub = tf.subtract(x, a)
add = tf.add(x, sub)
#变量需要初始化操作
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init) #初始化的操作
print(sess.run(sub))
print(sess.run(add))
变量的自增++
import tensorflow as tf
#创建一个变量,初始化为0
state = tf.Variable(0, name='counter')
#创建一个op,作用使state+1
new_value = tf.add(state, 1)
#这使一个赋值op,把new_value赋值给state
update = tf.assign(state, new_value)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(state))
for _ in range(5):
sess.run(update) #相当于 state = new_value state= state+ 1
print(sess.run(state))
Fetch and Feed
Fetch可以同时执行多个op
import tensorflow as tf
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
add = tf.add(input2, input3)
mul = tf.multiply(input1, add)
with tf.Session() as sess:
result1,result2 = sess.run([mul, add]) #fetch 同时运行多个op
print(result1, result2)
Feed
import tensorflow as tf
#创建占位符
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1, input2)
with tf.Session() as sess:
#feed的数据以字典的形式传入
print(sess.run(output, feed_dict={input1:[7.0], input2:[2.0]}))
GPU版本
# 会话(Session)将图分发到设备(CPU/GPU)上:
# 当设备上不止一个GPU时,需要使用with…Device语句指定op操作到不同的CPU或GPU上。使用字符串指定设备的标识:
# “/cpu:0”:CPU
# “/gpu:0”:第一个GPU
# “/gpu:1”:第二个GPU
matrix1 = tf.Variable([[3,3]])
matrix2 = tf.Variable([[2],[3]])
product = tf.matmul(matrix1, matrix2)
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
result = sess.run([product])[0]
print(result)
case1
import tensorflow as tf
import numpy as np
x_data = np.random.rand(100)
y_data = x_data * 0.1 + 0.2
# y= 0.1x + 0.2
# 上面是一个样本,下面是模型,
# 优化k,b 来达到和样本一样
#构造一个线性模型
b = tf.Variable(0.)
k = tf.Variable(0.)
y = k*x_data + b
#二次代价函数
loss = tf.reduce_mean(tf.square(y_data - y)) # 真实值-预测值,
#定义一个梯度下降法来进行训练的优化器
optimizer = tf.train.GradientDescentOptimizer(0.2) #learnrate=0.2
#定义一个最小化代价函数
train = optimizer.minimize(loss) #loss越小,预测值越接近与真实值
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(201):
sess.run(train)
if step%20 == 0:
print(step, sess.run([k,b]))
0 [0.05564068, 0.100868106]
20 [0.105036736, 0.19720893]
40 [0.10313627, 0.1982621]
60 [0.10195288, 0.19891785]
80 [0.10121602, 0.19932617]
100 [0.100757174, 0.19958043]
120 [0.10047149, 0.19973873]
140 [0.10029358, 0.19983733]
160 [0.10018281, 0.1998987]
180 [0.10011383, 0.19993693]
200 [0.10007088, 0.19996072] 这里的k和b非常接近真实的值
case2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#生成样本点
x_data = np.linspace(-0.5,0.5, 200)[:, np.newaxis] # 200行1列
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise
#定义模型
x = tf.placeholder(tf.float32, [None, 1])# 行不确定,但只有1列
y = tf.placeholder(tf.float32, [None, 1])
#构建神经网络的中间层
weight_L1 = tf.Variable(tf.random_normal([1, 10])) # 1*10
biases_L1 = tf.Variable(tf.zeros([1,10])) #1*10
wx_plus_b_L1 = tf.matmul(x,weight_L1) + biases_L1
L1 = tf.nn.tanh(wx_plus_b_L1) # 中间层输出,激活函数用双曲正切函数作用与信号的总和
#构建输出层
weight_L2 = tf.Variable(tf.random_normal([10,1])) # 10行1列
biases_L2 = tf.Variable(tf.zeros([1,1]))
wx_plus_b_L2 = tf.matmul(L1, weight_L2) + biases_L2
prediction = tf.nn.tanh(wx_plus_b_L2)
#二次代价函数
loss = tf.reduce_mean(tf.square(y - prediction))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2000):
sess.run(train_step, feed_dict={x:x_data, y:y_data})
#获得预测值
prediction_value = sess.run(prediction,feed_dict={x:x_data})
#h画图
plt.figure()
plt.scatter(x_data, y_data)
plt.plot(x_data, prediction_value, 'r--',lw=5)
plt.show()