引言
Asynchronous Advantage Actor-Critic (A3C)
我们都知道,直接更新策略的方法,其迭代速度都是非常慢的,为了充分利用计算资源,又有了Asynchronous Advantage Actor-Critic 方法:
可以看到,我们有一个主网络,还有许多Worker,每一个Worker也是一个A2C的net,A3C主要有两个操作,一个是pull,一个是push: pull:把主网络的参数直接赋予Worker中的网络 push:使用各Worker中的梯度,对主网络的参数进行更新
A3C代码的实现地址为:https://github.com/princewen/tensorflow_practice/tree/master/RL/Basic-A3C-Demo
A3C算法流程
具体代码
# _*_ coding:utf-8 _*_
# !/usr/bin/python
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'Pendulum-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_EP_STEP = 200
MAX_GLOBAL_EP = 2000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.01
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
# 这个 class 可以被调用生成一个 global net.
# 也能被调用生成一个 worker 的 net, 因为他们的结构是一样的,
# 所以这个 class 可以被重复利用.
class ACNet(object):
def __init__(self,scope, globalAC=None):
'''
# 当创建 worker 网络的时候, 我们传入之前创建的 globalAC 给这个 worker
if 这是 global: # 判断当下建立的网络是 local 还是 global
with tf.variable_scope('Global_Net'):
self._build_net()
else:
with tf.variable_scope('worker'):
self._build_net()
# 接着计算 critic loss 和 actor loss
# 用这两个 loss 计算要推送的 gradients
with tf.name_scope('sync'): # 同步
with tf.name_scope('pull'):
# 更新去 global
with tf.name_scope('push'):
# 获取 global 参数
'''
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'): #A3C主要有两个操作,一个是pull,一个是push:
with tf.name_scope('pull'):#pull:把主网络的参数直接赋予Worker中的网络
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'): #push:使用各Worker中的梯度,对主网络的参数进行更新
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
# 在这里搭建 Actor 和 Critic 的网络
# return 均值, 方差, state_value
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict):
# 进行 push 操作
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self):
# 进行 pull 操作
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s):
# 根据 s 选动作
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})[0]
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped # 创建自己的环境
self.name = name # 自己的名字
self.AC = ACNet(name, globalAC) # 自己的 local net, 并绑定上 globalAC
def work(self):
# s, a, r 的缓存, 用于 n_steps 更新
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
if self.name == 'W_0': # 只显示第一个worker
self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
buffer_s.append(s) # 添加各种缓存
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize
# 每 UPDATE_GLOBAL_ITER 步 或者回合完了, 进行 sync 操作
if total_step % UPDATE_GLOBAL_ITER == 0 or done:
# 获得用于计算 TD error 的 下一 state 的 value
if done:
v_s_ = 0 # terminal,达到目的,对未来的期望为0
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = [] # 下 state value 的缓存, 用于算 TD
for r in buffer_r[::-1]: # 进行 n_steps forward view
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict) # 推送更新去 globalAC
buffer_s, buffer_a, buffer_r = [], [], [] # 清空缓存
self.AC.pull_global() # 获取 globalAC 的最新参数
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1 # 加一回合
break # 结束这回合
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator() # 多线程调度的工具
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads) # 主线程阻塞
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
'''
# Worker 并行工作
with tf.device("/cpu:0"):
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # 建立 Global AC
workers = []
for i in range(N_WORKERS): # 创建 worker, 之后在并行
workers.append(Worker(GLOBAL_AC)) # 每个 worker 都有共享这个 global AC
COORD = tf.train.Coordinator() # Tensorflow 用于并行的工具
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job) # 添加一个工作线程
t.start()
worker_threads.append(t)
COORD.join(worker_threads) # tf 的线程调度
'''
分布式Tensorflow的梯度累积与异步更新
最近在实现A3C论文【1】算法的过程中,发现目前目前网上还没有太多资料讲解如何进行梯度累积,对于tensorflow分布式计算的异步更新也没有实验论证。因此将自己做的一点研究整理出来,还请各路大神指正。
一、问题描述
在Asynchronous methods中,使用了target network以避免网络变化太快。每个learner在一个训练epoch开始时会拷贝target network的权值, 训练一段时间后将梯度累积并用之更新target network,之后结束这个epoch。以下是n-step Q-learning的更新方式:
因此,在实现中,首先需要让各个learner能够获得target network的权值,然后要实现learner内部的梯度累积,最后要将这个梯度返回target network。
二、梯度计算、累积与更新
将梯度在target network和learner间传递的功能在distributed tensorflow中默认已经实现好了。Between-graph的方式中,每个thread会拷贝一份Graph,计算后回传回主Graph。需要解决的主要是梯度累积的问题。
基本的思路是:
repeat:
计算梯度
存储梯度
until 一定次数
将累积的梯度回传至target network
具体要用到的是optimizer类的compute_gradients()和apply_gradients()两种方法。以下分步讲解
1. 定义操作
# Define input and output
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, name="x")
with tf.name_scope('weights'):
w = tf.Variable(2.0, name='target_w')
with tf.name_scope('output'):
y = tf.mul(x, w, name='y')
with tf.name_scope('real_output'):
y_ = tf.placeholder(tf.float32, name="y_")
# Define train op
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
with tf.name_scope('gradient'):
loss = tf.reduce_mean(tf.square(y_ - y)) # MSE loss
gradient_all = optimizer.compute_gradients(loss) # gradient of network (with NoneType)
grads_vars = [v for (g, v) in gradient_all if g is not None] # all variable that has gradients
gradient = optimizer.compute_gradients(loss, grads_vars) # gradient of network (without NoneType)
grads_holder = [(tf.placeholder(tf.float32, shape=g.get_shape()), v)
for (g, v) in gradient]
train_op = optimizer.apply_gradients(grads_holder)
y_是真实值,y是网络的输出,loss是mse损失。optimizer是一个梯度下降优化器。gradient_all是optmizer计算的梯度,返回的是一个列表,其中的元素是型为
的tuple。注意,如果网络不是单输入单输出(例如ac网络中有两个输出),那么compute_gradients可能会返回(None,v),即部分变量没有对应的梯度,在下一步的时候NoneType会导致错误。因此,需要将有梯度的变量提取出来,记为grads_vars。之后,对grads_vars再一次计算梯度,得到了gradient。最后, 生成一个placeholder用于存储梯度,并调用apply_gradients更新网络。
注意,此处定义的会出现问题,在第三节实验部分会提出一个解决办法。
2. 应用操作
# calculate gradients every
grads = []
for i in range(THREAD_STEPS):
x_i = ...
y_real = ...
y_i = sess.run(y, feed_dict={x: x_i})
loss_i = sess.run(loss, feed_dict={x: x_i, y_: y_real})
grad_i = sess.run(gradient, feed_dict={x: x_i, y_: y_real})
grads.append(grad_i)
# calculate total gradients
grads_sum = {}
# add up dθ
for i in range(len(grads_holder)):
k = grads_holder[i][0]
grads_sum[k] = sum([g[i][0] for g in grads])
# Apply gradients
_ = sess.run(train_op, feed_dict=grads_sum)
操作分为三步:
第一步,输入x_i,y_计算梯度,并使用一个列表grads保存梯度;
第二步,使用一个字典对梯度进行累积。字典的格式为
。
由于grads的每一个元素都是与grads_holder格式相同的梯度列表,grads_holder[i][0]对应的梯度值列表就是[g[i][0] for g in grads]。
第三步,将前一步生成的字典feed给apply_gradients,Mission complete。
三、实验设置与结果
简单验证了下以上方法的正确性。使用两个worker进行between-graph的异步训练,输入为[0,1,2],网络输出为
,输出真实值定为10,损失为MSE。的初始值为2。优化器为梯度下降法,学习率为1。梯度计算公式为:
我们设置了两个worker,各进行2 epoch*3 steps的训练。输出如下:
task0 - epoch0: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 2.0)] task0 - epoch0: x_i: 1. y_i: 2.0. loss: 81.0. grad: [(-18.0, 2.0)] task0 - epoch0: x_i: 2. y_i: 4.0. loss: 64.0. grad: [(-32.0, 2.0)] task1 - epoch0: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 2.0)] Final States of w in task0 - epoch0: 52.0
task0 - epoch1: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 52.0)] task1 - epoch0: x_i: 1. y_i: 52.0. loss: 1681.0. grad: [(82.0, 52.0)]
...
首先,worker:0输入x:0,网络返回y:0,梯度计算为0,第二步输入x:1,y:2,梯度为-18,依次类推。在worker:0进行到1st epoch的第三步时,worker:1启动,注意此时w仍未2,网络没有发生改变,worker输入x:0后网络返回y:0 。
随后worker:0更新了梯度,梯度累积为
, 新的权值更新为
。梯度累积的功能成功实现。
但是,我们对于graph间变量传递的机制理解有误,此时worker:1仍在第一个epoch,权值却也被更新为52(实际应保持该epoch开始时的2),本应为-18的梯度变成了82。
解决的办法是将thread和target network的权值分开。重新定义权值为:
with tf.name_scope('weights'):
target_w = tf.Variable(2.0, name='target_w')
w_list = [tf.Variable(2.0, name='target_w') for i in range(WORKER_NUM)]
w = w_list[FLAGS.task_index]
这样,我们创建了一个长度为thread数量的列表,用于存储各个进程的权值,w_list[task_index]是每个进程实际使用的权值,target_w是target network的权值。之后,定义权值更新的动作:
epoch_init = w.assign(target_w)
w_addup = tf.placeholder(tf.float32)
epoch_update = target_w.assign_add(w_addup)
在每次epoch开始前我们使用tf.assign(ref,value)将target_w的权值赋予w,epoch结束后将训练后权值与初始权值的差值增加给target_w。实验结果如下:
task0 - epoch0: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 2.0)]
task0 - epoch0: x_i: 1. y_i: 2.0. loss: 81.0. grad: [(-18.0, 2.0)]
task1 - epoch0: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 2.0)]
task0 - epoch0: x_i: 2. y_i: 4.0. loss: 64.0. grad: [(-32.0, 2.0)]
task1 - epoch0: x_i: 1. y_i: 2.0. loss: 81.0. grad: [(-18.0, 2.0)]
Final States of w in task0 - epoch0: 52.0
task0 - epoch1: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 52.0)]
task1 - epoch0: x_i: 2. y_i: 4.0. loss: 64.0. grad: [(-32.0, 2.0)]
...
可以看到,worker:0已经完成了一次更新,梯度累积为-50,在epoch1中,初试的权值已经变为52。而worker:1的权值还是保持为2.
...
task0 - epoch1: x_i: 1. y_i: 52.0. loss: 1681.0. grad: [(82.0, 52.0)]
Final States of w in task1 - epoch0: 52.0
task0 - epoch1: x_i: 2. y_i: 104.0. loss: 8464.0. grad: [(368.0, 52.0)]
Final States of w in task0 - epoch1: -398.0
Final target_w: -348.0
done
task1 - epoch1: x_i: 0. y_i: 0.0. loss: 100.0. grad: [(-0.0, 102.0)]
task1 - epoch1: x_i: 1. y_i: 102.0. loss: 8281.0. grad: [(182.0, 102.0)]
task1 - epoch1: x_i: 2. y_i: 204.0. loss: 36864.0. grad: [(768.0, 102.0)]
Final States of w in task1 - epoch1: -848.0
Final target_w: -1298.0
done
由task1-epoch1的初始权值可以看出,worker:1新的权值为
。task0-epoch1累积了
的梯度,worker:0最终输出的target_w为102-450=-348。worker:1在epoch1累积了182+768=950的梯度,最终输出target_w为-348-950=-1298.计算与输出一致。
至此,我们完成了梯度累积的异步更新的全过程。完整的Gist:allenwoods/async_grad_verify.py
Tips:
1. 多进程使用GPU会导致OUT_OF_MEMORY_ERROR,这是由于tf默认会给任一进程分配所有能分配的显存,这样除了第一个进程其他进程都无显存可用。解决办法有两个,一是在运行命令前添加 CUDA_VISIBLE_DEVICES=9999(或任一大于你的显卡数的数字)禁用显卡,推荐对ps进程使用。二是在server配置里添加gpu_options=tf.GPUOptions(allow_growth=True)(或gpu_fraction)使得tf不会一次将显存分配完而是随着使用量逐渐增加,具体在以上提供的gist中有例子。
2. 运行命令为
python async_grad_test.py --ps_hosts=0.0.0.0:53198 --worker_hosts=0.0.0.0:58557,0.0.0.0:42832 --job_name=ps --task_index=0
python async_grad_test.py --ps_hosts=0.0.0.0:53198 --worker_hosts=0.0.0.0:58557,0.0.0.0:42832 --job_name=worker --task_index=0
python async_grad_test.py --ps_hosts=0.0.0.0:53198 --worker_hosts=0.0.0.0:58557,0.0.0.0:42832 --job_name=worker --task_index=1
3. ps进程通过Ctrl+c的方式无法关闭,需要通过kill的方式杀掉。批量关闭所有训练进程可使用以下命令:
ps -ef | grep /opt/anaconda3/bin/python| grep async_grad | awk {'print $2'} | xargs kill
/opt/anaconda3/bin/python 是Python解释器,async_grad是运行py文件的关键字,根据你的具体情况进行修改。
4. 运行Gist前需要先删除checkpoint中之前的记录,否则tf会认为任务已经完成从而不进行任何操作。