1. 项目准备
1.1. 问题导入
Sarsa算法和Q-learning算法是两种基于表格的经典强化学习方法,本文将对比探究这两种方法在解决悬崖行走(Cliff Walking)问题时的表现。
1.2. 环境介绍
本次实验所用的训练环境为gym
库的“悬崖行走”(CliffWalking-v0
)环境。
如上图所示,该问题需要智能体从起点S点出发,到达终点G,同时避免掉进悬崖(cliff)。智能体每走一步就有-1
分的惩罚,掉进悬崖会有−100
分的惩罚,但游戏不会结束,智能体会回到出发点,然后游戏继续,直到智能体到达重点结束游戏。
2. SARSA算法
2.1. 算法简介
2.2. 算法伪码
2.3. 算法实现
(1) 前期准备
1 2
| import numpy as np import gym
|
1 2 3 4 5 6 7 8
| TRAIN_EPOCHS = 500 LOG_GAP = 50
LEARNING_RATE = 0.1 GAMMA = 0.95 EPSILON = 0.1
MODEL_PATH = "./sarsa.npy"
|
(2) 构建智能体
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
| class SarsaAgent(object): def __init__(self, obs_dim, act_dim, learning_rate=0.01, gamma=0.9, epsilon=0.1): self.act_dim = act_dim self.lr = learning_rate self.gamma = gamma self.epsilon = epsilon self.Q = np.zeros((obs_dim, act_dim))
def sample(self, obs): if np.random.uniform(0, 1) < self.epsilon: return np.random.choice(self.act_dim) else: return self.predict(obs)
def predict(self, obs): Q_list = self.Q[obs, :] maxQ = np.max(Q_list) act_list = np.where(Q_list == maxQ)[0] return np.random.choice(act_list)
def learn(self, obs, action, reward, next_obs, next_act, done): '''【On-Policy】 obs:交互前的状态,即s[t]; action:本次交互选择的动作,即a[t]; reward:本次动作获得的奖励,即r; next_obs:本次交互后的状态,即s[t+1]; next_act:根据当前Q表格,针对next_obs会选择的动作,即a[t+1]; done:episode是否结束; ''' current_Q = self.Q[obs, action] if done: target_Q = reward else: target_Q = reward + self.gamma * self.Q[next_obs, next_act] self.Q[obs, action] += self.lr * (target_Q - current_Q)
def save(self, path): np.save(path, self.Q) print("\033[1;32m Save data into file: `%s`. \033[0m" % path)
def restore(self, path): self.Q = np.load(path) print("\033[1;33m Load data from file: `%s`. \033[0m" % path)
|
(3) 训练与测试
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
|
def run_episode(env, agent, render=False): done, total_steps, total_reward = False, 0, 0 obs = env.reset() action = agent.sample(obs)
while not done: next_obs, reward, done, _ = env.step(action) next_act = agent.sample(next_obs) agent.learn(obs, action, reward, next_obs, next_act, done)
obs, action = next_obs, next_act total_reward += reward total_steps += 1 if render: env.render() return total_reward, total_steps
def test_episode(env, agent, render=False): agent.restore(MODEL_PATH) done, total_reward = False, 0 obs = env.reset() while not done: action = agent.predict(obs) next_obs, reward, done, _ = env.step(action) total_reward += reward obs = next_obs if render: env.render() return total_reward
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
| env = gym.make("CliffWalking-v0")
agent = SarsaAgent( env.observation_space.n, env.action_space.n, learning_rate=LEARNING_RATE, gamma=GAMMA, epsilon=EPSILON, )
for ep in range(TRAIN_EPOCHS + 1): ep_reward, ep_steps = run_episode(env, agent, False) if ep % LOG_GAP == 0: print("Episode: %3d; Steps: %3d; Reward: %.1f" % (ep, ep_steps, ep_reward))
agent.save(MODEL_PATH) test_reward = test_episode(env, agent, False) print("【Eval】\t Reward: %.1f" % test_reward)
|
实验结果如下(Reward值越大,说明学习效果越好):
1 2 3 4 5 6 7 8 9 10 11 12 13
| Episode: 0; Steps: 857; Reward: -2144.0 Episode: 50; Steps: 33; Reward: -33.0 Episode: 100; Steps: 30; Reward: -129.0 Episode: 150; Steps: 44; Reward: -44.0 Episode: 200; Steps: 15; Reward: -15.0 Episode: 250; Steps: 19; Reward: -118.0 Episode: 300; Steps: 26; Reward: -125.0 Episode: 350; Steps: 19; Reward: -19.0 Episode: 400; Steps: 17; Reward: -17.0 Episode: 450; Steps: 22; Reward: -22.0 Episode: 500; Steps: 19; Reward: -19.0
【Eval】 Reward: -15.0
|
3. Q-learning算法
3.1. 算法简介
- SARSA是on-policy的更新方式,先做出动作再更新。
- Q-learning是off-policy的更新方式,更新learn()时无需获取下一步实际做出的动作next_action,并假设下一步动作是取最大Q值的动作。
- Q-learning的更新公式为:
3.2. 算法伪码
3.3. 算法实现
(1) 前期准备
1 2
| import numpy as np import gym
|
1 2 3 4 5 6 7 8
| TRAIN_EPOCHS = 500 LOG_GAP = 50
LEARNING_RATE = 0.1 GAMMA = 0.95 EPSILON = 0.1
MODEL_PATH = "./q_learning.npy"
|
(2) 构建智能体
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
| class QLearningAgent(object): def __init__(self, obs_dim, act_dim, learning_rate=0.01, gamma=0.9, epsilon=0.1): self.act_dim = act_dim self.lr = learning_rate self.gamma = gamma self.epsilon = epsilon self.Q = np.zeros((obs_dim, act_dim))
def sample(self, obs): if np.random.uniform(0, 1) < self.epsilon: return np.random.choice(self.act_dim) else: return self.predict(obs)
def predict(self, obs): Q_list = self.Q[obs, :] maxQ = np.max(Q_list) act_list = np.where(Q_list == maxQ)[0] return np.random.choice(act_list)
def learn(self, obs, action, reward, next_obs, done): '''【Off-Policy】 obs:交互前的状态,即s[t]; action:本次交互选择的动作,即a[t]; reward:本次动作获得的奖励,即r; next_obs:本次交互后的状态,即s[t+1]; done:episode是否结束; ''' cur_Q = self.Q[obs, action] if done: target_Q = reward else: target_Q = reward + self.gamma * np.max(self.Q[next_obs, :]) self.Q[obs, action] += self.lr * (target_Q - cur_Q)
def save(self, path): np.save(path, self.Q) print("\033[1;32m Save data into file: `%s`. \033[0m" % path)
def restore(self, path): self.Q = np.load(path) print("\033[1;33m Load data from file: `%s`. \033[0m" % path)
|
(3) 训练与测试
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
|
def run_episode(env, agent, render=False): done, total_steps, total_reward = False, 0, 0 obs = env.reset()
while not done: action = agent.sample(obs) next_obs, reward, done, _ = env.step(action) agent.learn(obs, action, reward, next_obs, done)
obs = next_obs total_reward += reward total_steps += 1 if render: env.render() return total_reward, total_steps
def test_episode(env, agent, render=False): agent.restore(MODEL_PATH) done, total_reward = False, 0 obs = env.reset() while not done: action = agent.predict(obs) next_obs, reward, done, _ = env.step(action) total_reward += reward obs = next_obs if render: env.render() return total_reward
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
| env = gym.make("CliffWalking-v0")
agent = QLearningAgent( env.observation_space.n, env.action_space.n, learning_rate=LEARNING_RATE, gamma=GAMMA, epsilon=EPSILON, )
for ep in range(TRAIN_EPOCHS + 1): ep_reward, ep_steps = run_episode(env, agent, False) if ep % LOG_GAP == 0: print("Episode: %3d; Steps: %3d; Reward: %.1f" % (ep, ep_steps, ep_reward))
agent.save(MODEL_PATH) test_reward = test_episode(env, agent, False) print("【Eval】\t Reward: %.1f" % test_reward)
|
实验结果如下(Reward值越大,说明学习效果越好):
1 2 3 4 5 6 7 8 9 10 11 12 13
| Episode: 0; Steps: 519; Reward: -1608.0 Episode: 50; Steps: 20; Reward: -20.0 Episode: 100; Steps: 21; Reward: -21.0 Episode: 150; Steps: 47; Reward: -146.0 Episode: 200; Steps: 18; Reward: -18.0 Episode: 250; Steps: 30; Reward: -228.0 Episode: 300; Steps: 20; Reward: -20.0 Episode: 350; Steps: 13; Reward: -13.0 Episode: 400; Steps: 17; Reward: -17.0 Episode: 450; Steps: 14; Reward: -14.0 Episode: 500; Steps: 50; Reward: -248.0 【Eval】 Reward: -13.0
|
4. 实验结论
在解决悬崖行走问题的过程中,我们发现:
- Q-learning对环境的探索比较激进胆大,更倾向于最优路线
- SARSA对环境的探索就比较谨慎胆小,更倾向于安全路线
写在最后