-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
53 lines (42 loc) · 1.31 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import sys
import numpy as np
import matplotlib.pyplot as plt
from multi_agent_env import MultiAgentPathFollowingEnv
from ddpg import DDPGAgent
from utils import Memory
'''
This code it's the main file to run the environment.
'''
# Creating the environment
env = MultiAgentPathFollowingEnv(num_agents=1)
agent = DDPGAgent(state_dim=4, action_dim=2, max_action=1.0)
# agent = DDPGAgent(env)
# Número de episódios para rodar
rewards = []
avg_rewards = []
episodes = 1000
batch_size = 128
for episode in range(episodes):
state, _ = env.reset()
episode_reward = 0
for step in range(500):
action = agent.get_action(state)
new_state, reward, done, _ = env.step(action)
agent.memory.push(state, action, reward, new_state, done)
if len(agent.memory) > batch_size:
agent.update(batch_size)
state = new_state
episode_reward += reward
if done:
sys.stdout.write("episode: {}, reward: {}, average_reward {} \n".format(episode, np.round(episode_reward, decimals=2), np.mean(rewards[-10:])))
break
env.render()
rewards.append(episode_reward)
avg_rewards.append(np.mean(reward[-10:]))
agent.save_model("ddpg_model.pth")
plt.plot(rewards)
plt.plot(avg_rewards)
plt.plot()
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.show()