-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexperiment_2.py
More file actions
108 lines (82 loc) · 2.37 KB
/
experiment_2.py
File metadata and controls
108 lines (82 loc) · 2.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Experiment 2:
# -------------
#
# We further optimize the DQN agent through parameter tuning to improve its performance.
#
import gym
import time
import tensorflow as tf
from stable_baselines.deepq.policies import CnnPolicy, MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import DQN
from stable_baselines.common.evaluation import evaluate_policy
# Agent class
class Agent:
def __init__(self, name, model, algo):
self.name = name
self.model = model
self.algo = algo
# Evaluation class
class Evaluation:
def __init__(self, name, mean, std):
self.name = name
self.mean = mean
self.std = std
# Create environment
env = gym.make('MsPacman-ram-v0')
# Disable deprecated logging on Tensorflow
tf.logging.set_verbosity(tf.logging.ERROR)
# Instantiate the agent
dqn_model = DQN(
MlpPolicy,
env,
buffer_size=10000,
learning_rate=float(1e-4),
learning_starts=10000,
target_network_update_freq=1000,
train_freq=4,
exploration_final_eps=0.01,
exploration_fraction=0.1,
prioritized_replay_alpha=0.6,
prioritized_replay=True,
verbose=1,
)
print('-----------------')
# Train agent
print("Training DQN agent")
dqn_model.learn(
total_timesteps=int(1e6),
log_interval=10,
)
# Save the agent
save_name = 'DQN_trained_v2'
print('Saving', save_name)
dqn_model.save(save_name)
print('')
print('-----------------')
# Evaluate agent
print('Evaluating DQN...',)
mean_reward, std_reward = evaluate_policy(dqn_model, env, n_eval_episodes=50)
evaluation = Evaluation('DQN', mean_reward, std_reward)
print('Evaluation:')
print('-----------------')
print(evaluation.name, ':', evaluation.mean, ',', evaluation.std)
print('-----------------')
# Load the trained agent
best_model = DQN.load('DQN_trained_v2', env=env)
game_speed = 1 # 1 is equal to real-time, increase for faster playback
# Vectorize environment
env = DummyVecEnv([lambda: env])
# View best trained agent
observation = env.reset()
for i_episode in range(10):
observation = env.reset()
for t in range(1000):
time.sleep(1/24/game_speed)
action, _states = best_model.predict(observation)
observation, reward, done, info = env.step(action)
env.render()
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
env.close()