強化學習Q-leaning演算法之走迷宮
阿新 • • 發佈:2018-11-26
來自於莫凡大神的強化學習教程,今天學習了走迷宮的小例子。網站網址是:https://morvanzhou.github.io/tutorials/machine-learning/reinforcement-learning/2-2-A-q-learning/
程式碼如下,一些地方我做了註釋:
分三個檔案
1.maze_env.py 用thinker實現了迷宮的佈局
#!/usr/bin/python3 # -*- coding:utf-8 -*- """ Reinforcement learning maze example. Red rectangle: explorer. Black rectangles: hells [reward = -1]. Yellow bin circle: paradise [reward = +1]. All other states: ground [reward = 0]. This script is the environment part of this example. The RL is in RL_brain.py. View more on my tutorial page: https://morvanzhou.github.io/tutorials/ """ import numpy as np import time import sys if sys.version_info.major == 2: #如果版本號是2 import Tkinter as tk else: import tkinter as tk UNIT = 40 # pixels 畫素 MAZE_H = 4 # grid height MAZE_W = 4 # grid width class Maze(tk.Tk, object): def __init__(self): super(Maze, self).__init__() self.action_space = ['u', 'd', 'l', 'r'] #行為 self.n_actions = len(self.action_space) #行為數 self.title('maze') self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT)) self._build_maze() def _build_maze(self): self.canvas = tk.Canvas(self, bg='white', height=MAZE_H * UNIT, width=MAZE_W * UNIT) # create grids for c in range(0, MAZE_W * UNIT, UNIT): x0, y0, x1, y1 = c, 0, c, MAZE_W * UNIT self.canvas.create_line(x0, y0, x1, y1) #畫一條從(x0,y0)到(x1,y1)的線 for r in range(0, MAZE_H * UNIT, UNIT): x0, y0, x1, y1 = 0, r, MAZE_H * UNIT, r self.canvas.create_line(x0, y0, x1, y1) # create origin origin = np.array([20, 20]) # hell #畫第一個黑色正方形 hell1_center = origin + np.array([UNIT * 2, UNIT]) self.hell1 = self.canvas.create_rectangle( hell1_center[0] - 15, hell1_center[1] - 15, hell1_center[0] + 15, hell1_center[1] + 15, fill='black') # hell #畫第二個黑色正方形 hell2_center = origin + np.array([UNIT, UNIT * 2]) self.hell2 = self.canvas.create_rectangle( hell2_center[0] - 15, hell2_center[1] - 15, hell2_center[0] + 15, hell2_center[1] + 15, fill='black') # create oval #畫黃色的正方形 oval_center = origin + UNIT * 2 self.oval = self.canvas.create_oval( oval_center[0] - 15, oval_center[1] - 15, oval_center[0] + 15, oval_center[1] + 15, fill='yellow') # create red rect #畫紅色的正方形 self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # pack all self.canvas.pack() def reset(self): self.update() time.sleep(0.5) self.canvas.delete(self.rect) origin = np.array([20, 20]) self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # return observation return self.canvas.coords(self.rect) def step(self, action): s = self.canvas.coords(self.rect) base_action = np.array([0, 0]) if action == 0: # up if s[1] > UNIT: base_action[1] -= UNIT #減40 elif action == 1: # down if s[1] < (MAZE_H - 1) * UNIT: base_action[1] += UNIT #加40 elif action == 2: # right if s[0] < (MAZE_W - 1) * UNIT: base_action[0] += UNIT #右移40 elif action == 3: # left if s[0] > UNIT: #左移40 base_action[0] -= UNIT self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent s_ = self.canvas.coords(self.rect) # next state # reward function if s_ == self.canvas.coords(self.oval): reward = 1 done = True s_ = 'terminal' elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]: reward = -1 done = True s_ = 'terminal' else: reward = 0 done = False return s_, reward, done def render(self): time.sleep(0.1) self.update()
2.RL_brain.py 實現Q-learning演算法的主體:
#!/usr/bin/python3 # -*- coding:utf-8 -*- """ This part of code is the Q learning brain, which is a brain of the agent. All decisions are made in here. View more on my tutorial page: https://morvanzhou.github.io/tutorials/ """ import numpy as np import pandas as pd class QLearningTable: def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9): ''' :param actions: 行為 :param learning_rate: 學習率, 來決定這次的誤差有多少是要被學習的 :param reward_decay: 是折扣因子,表示時間的遠近對回報的影響程度,為0表示之看當前狀態採取行動的reward。 :param e_greedy: 是用在決策上的一種策略, 比如 epsilon = 0.9 時, 就說明有90% 的情況我會按照 Q 表的最優值選擇行為, 10% 的時間使用隨機選行為 ''' self.actions = actions # a list self.lr = learning_rate self.gamma = reward_decay self.epsilon = e_greedy self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64) def choose_action(self, observation): self.check_state_exist(observation) # action selection if np.random.uniform() < self.epsilon: # choose best action state_action = self.q_table.loc[observation, :] # some actions may have the same value, randomly choose on in these actions action = np.random.choice(state_action[state_action == np.max(state_action)].index) else: # choose random action action = np.random.choice(self.actions) return action def learn(self, s, a, r, s_): self.check_state_exist(s_) q_predict = self.q_table.loc[s, a] if s_ != 'terminal': q_target = r + self.gamma * self.q_table.loc[s_, :].max() # next state is not terminal else: q_target = r # next state is terminal self.q_table.loc[s, a] += self.lr * (q_target - q_predict) # update def check_state_exist(self, state): if state not in self.q_table.index: # append new state to q table self.q_table = self.q_table.append( pd.Series( [0]*len(self.actions), index=self.q_table.columns, name=state, ) )
3.run_this.py 實現演算法更新
#!/usr/bin/python3 # -*- coding:utf-8 -*- from maze_env import Maze from RL_brain import QLearningTable def update(): for episode in range(100): # initial observation observation = env.reset() while True: # fresh env env.render() # RL choose action based on observation action = RL.choose_action(str(observation)) # RL take action and get next observation and reward observation_, reward, done = env.step(action) # RL learn from this transition RL.learn(str(observation), action, reward, str(observation_)) # swap observation observation = observation_ # break while loop when end of this episode if done: break print(RL.q_table) RL.q_table.to_csv("./1.csv") # end of game print('game over') env.destroy() if __name__ == "__main__": env = Maze() RL = QLearningTable(actions=list(range(env.n_actions))) #print(RL.q_table) env.after(100, update) #print("hahah") #print(RL.q_table) env.mainloop()
如果要檢視莫凡大神的github,可以去:https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/tree/master/contents/2_Q_Learning_maze