1. 程式人生 > 其它 >baselines演算法庫baselines/bench/monitor.py模組分析

baselines演算法庫baselines/bench/monitor.py模組分析

 

baselines演算法庫baselines/bench/monitor.py模組程式碼:

__all__ = ['Monitor', 'get_monitor_files', 'load_results']

from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json

class Monitor(Wrapper):
    EXT = "monitor.csv"
    f = None

    def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
        Wrapper.
__init__(self, env=env) self.tstart = time.time() if filename: self.results_writer = ResultsWriter(filename, header={"t_start": time.time(), 'env_id' : env.spec and env.spec.id}, extra_keys=reset_keywords + info_keywords )
else: self.results_writer = None self.reset_keywords = reset_keywords self.info_keywords = info_keywords self.allow_early_resets = allow_early_resets self.rewards = None self.needs_reset = True self.episode_rewards = [] self.episode_lengths
= [] self.episode_times = [] self.total_steps = 0 self.current_reset_info = {} # extra info about the current episode, that was passed in during reset() def reset(self, **kwargs): self.reset_state() for k in self.reset_keywords: v = kwargs.get(k) if v is None: raise ValueError('Expected you to pass kwarg %s into reset'%k) self.current_reset_info[k] = v return self.env.reset(**kwargs) def reset_state(self): if not self.allow_early_resets and not self.needs_reset: raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)") self.rewards = [] self.needs_reset = False def step(self, action): if self.needs_reset: raise RuntimeError("Tried to step environment that needs reset") ob, rew, done, info = self.env.step(action) self.update(ob, rew, done, info) return (ob, rew, done, info) def update(self, ob, rew, done, info): self.rewards.append(rew) if done: self.needs_reset = True eprew = sum(self.rewards) eplen = len(self.rewards) epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)} for k in self.info_keywords: epinfo[k] = info[k] self.episode_rewards.append(eprew) self.episode_lengths.append(eplen) self.episode_times.append(time.time() - self.tstart) epinfo.update(self.current_reset_info) if self.results_writer: self.results_writer.write_row(epinfo) assert isinstance(info, dict) if isinstance(info, dict): info['episode'] = epinfo self.total_steps += 1 def close(self): super(Monitor, self).close() if self.f is not None: self.f.close() def get_total_steps(self): return self.total_steps def get_episode_rewards(self): return self.episode_rewards def get_episode_lengths(self): return self.episode_lengths def get_episode_times(self): return self.episode_times class LoadMonitorResultsError(Exception): pass class ResultsWriter(object): def __init__(self, filename, header='', extra_keys=()): self.extra_keys = extra_keys assert filename is not None if not filename.endswith(Monitor.EXT): if osp.isdir(filename): filename = osp.join(filename, Monitor.EXT) else: filename = filename + "." + Monitor.EXT self.f = open(filename, "wt") if isinstance(header, dict): header = '# {} \n'.format(json.dumps(header)) self.f.write(header) self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys)) self.logger.writeheader() self.f.flush() def write_row(self, epinfo): if self.logger: self.logger.writerow(epinfo) self.f.flush() def get_monitor_files(dir): return glob(osp.join(dir, "*" + Monitor.EXT)) def load_results(dir): import pandas monitor_files = ( glob(osp.join(dir, "*monitor.json")) + glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files if not monitor_files: raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir)) dfs = [] headers = [] for fname in monitor_files: with open(fname, 'rt') as fh: if fname.endswith('csv'): firstline = fh.readline() if not firstline: continue assert firstline[0] == '#' header = json.loads(firstline[1:]) df = pandas.read_csv(fh, index_col=None) headers.append(header) elif fname.endswith('json'): # Deprecated json format episodes = [] lines = fh.readlines() header = json.loads(lines[0]) headers.append(header) for line in lines[1:]: episode = json.loads(line) episodes.append(episode) df = pandas.DataFrame(episodes) else: assert 0, 'unreachable' df['t'] += header['t_start'] dfs.append(df) df = pandas.concat(dfs) df.sort_values('t', inplace=True) df.reset_index(inplace=True) df['t'] -= min(header['t_start'] for header in headers) df.headers = headers # HACK to preserve backwards compatibility return df
View Code

 

baselines是開源的reinforcement leanring演算法庫,作為最早開源最權威的reinforcement learning演算法庫由於多年沒有進行維護了已經沒有太新的演算法被加入到這個演算法庫了,不過這個演算法庫之後的其他演算法庫雖然有一直被維護但是很多都是自行其是,很難有太過於經過檢測 的程式碼庫了,因此個人認為baselines演算法庫依舊有其意義價值,於是在疫情宅家期間對該演算法庫做了一定的原始碼解析,不過在解析的過程中不得不說發現這個開源演算法庫由於是多人合作完成的雖然被社群接受和認可但是各個模組的編寫風格十分迥異,而且很多模組存在造輪子的嫌疑,甚至很多模組程式碼優化程度不高、邏輯混亂,對此自己也只能說是不得不吐槽一下。

 

 

===============================================

 

 

該模組的核心程式碼為:

class Monitor(Wrapper):

 

 

該模組的功能就是將gym的env進行 包裝,記錄下每一個episode的長度,以及每步的step的獎勵reward,以及該episode的用時時長,然後將這些資訊寫到文字中。

由於程式碼量較大,實現國內較簡單,意義價值不大,而且程式碼較為混亂就不細解析了。

 

 

 

 

 

=================================================