Source code for pettingzoo.mpe.simple_reference.simple_reference

# noqa: D212, D415
"""
# Simple Reference

```{figure} mpe_simple_reference.gif
:width: 140px
:name: simple_reference
```

This environment is part of the <a href='..'>MPE environments</a>. Please read that page first for general information.

| Import             | `from pettingzoo.mpe import simple_reference_v3` |
|--------------------|--------------------------------------------------|
| Actions            | Discrete/Continuous                              |
| Parallel API       | Yes                                              |
| Manual Control     | No                                               |
| Agents             | `agents= [agent_0, agent_1]`                     |
| Agents             | 3                                                |
| Action Shape       | (5)                                              |
| Action Values      | Discrete(5)/Box(0.0, 1.0, (5))                   |
| Observation Shape  | (8),(10)                                         |
| Observation Values | (-inf,inf)                                       |
| State Shape        | (28,)                                            |
| State Values       | (-inf,inf)                                       |


This environment has 2 agents and 3 landmarks of different colors. Each agent wants to get closer to their target landmark, which is known only by the other agents. Both agents are simultaneous speakers and listeners.

Locally, the agents are rewarded by their distance to their target landmark. Globally, all agents are rewarded by the average distance of all the agents to their respective landmarks. The relative weight of these rewards is controlled by the `local_ratio` parameter.

Agent observation space: `[self_vel, all_landmark_rel_positions, landmark_ids, goal_id, communication]`

Agent discrete action space: `[say_0, say_1, say_2, say_3, say_4, say_5, say_6, say_7, say_8, say_9] X [no_action, move_left, move_right, move_down, move_up]`

Where X is the Cartesian product (giving a total action space of 50).

Agent continuous action space: `[no_action, move_left, move_right, move_down, move_up, say_0, say_1, say_2, say_3, say_4, say_5, say_6, say_7, say_8, say_9]`

### Arguments


``` python
simple_reference_v3.env(local_ratio=0.5, max_cycles=25, continuous_actions=False)
```



`local_ratio`:  Weight applied to local reward and global reward. Global reward weight will always be 1 - local reward weight.

`max_cycles`:  number of frames (a step for each agent) until game terminates

`continuous_actions`: Whether agent action spaces are discrete(default) or continuous

"""

import numpy as np
from gymnasium.utils import EzPickle

from pettingzoo.mpe._mpe_utils.core import Agent, Landmark, World
from pettingzoo.mpe._mpe_utils.scenario import BaseScenario
from pettingzoo.mpe._mpe_utils.simple_env import SimpleEnv, make_env
from pettingzoo.utils.conversions import parallel_wrapper_fn


[docs] class raw_env(SimpleEnv, EzPickle): def __init__( self, local_ratio=0.5, max_cycles=25, continuous_actions=False, render_mode=None ): EzPickle.__init__( self, local_ratio=local_ratio, max_cycles=max_cycles, continuous_actions=continuous_actions, render_mode=render_mode, ) assert ( 0.0 <= local_ratio <= 1.0 ), "local_ratio is a proportion. Must be between 0 and 1." scenario = Scenario() world = scenario.make_world() SimpleEnv.__init__( self, scenario=scenario, world=world, render_mode=render_mode, max_cycles=max_cycles, continuous_actions=continuous_actions, local_ratio=local_ratio, ) self.metadata["name"] = "simple_reference_v3"
env = make_env(raw_env) parallel_env = parallel_wrapper_fn(env) class Scenario(BaseScenario): def make_world(self): world = World() # set any world properties first world.dim_c = 10 world.collaborative = True # whether agents share rewards # add agents world.agents = [Agent() for i in range(2)] for i, agent in enumerate(world.agents): agent.name = f"agent_{i}" agent.collide = False # add landmarks world.landmarks = [Landmark() for i in range(3)] for i, landmark in enumerate(world.landmarks): landmark.name = "landmark %d" % i landmark.collide = False landmark.movable = False return world def reset_world(self, world, np_random): # assign goals to agents for agent in world.agents: agent.goal_a = None agent.goal_b = None # want other agent to go to the goal landmark world.agents[0].goal_a = world.agents[1] world.agents[0].goal_b = np_random.choice(world.landmarks) world.agents[1].goal_a = world.agents[0] world.agents[1].goal_b = np_random.choice(world.landmarks) # random properties for agents for i, agent in enumerate(world.agents): agent.color = np.array([0.25, 0.25, 0.25]) # random properties for landmarks world.landmarks[0].color = np.array([0.75, 0.25, 0.25]) world.landmarks[1].color = np.array([0.25, 0.75, 0.25]) world.landmarks[2].color = np.array([0.25, 0.25, 0.75]) # special colors for goals world.agents[0].goal_a.color = world.agents[0].goal_b.color world.agents[1].goal_a.color = world.agents[1].goal_b.color # set random initial states for agent in world.agents: agent.state.p_pos = np_random.uniform(-1, +1, world.dim_p) agent.state.p_vel = np.zeros(world.dim_p) agent.state.c = np.zeros(world.dim_c) for i, landmark in enumerate(world.landmarks): landmark.state.p_pos = np_random.uniform(-1, +1, world.dim_p) landmark.state.p_vel = np.zeros(world.dim_p) def reward(self, agent, world): if agent.goal_a is None or agent.goal_b is None: agent_reward = 0.0 else: agent_reward = np.sqrt( np.sum(np.square(agent.goal_a.state.p_pos - agent.goal_b.state.p_pos)) ) return -agent_reward def global_reward(self, world): all_rewards = sum(self.reward(agent, world) for agent in world.agents) return all_rewards / len(world.agents) def observation(self, agent, world): # goal color goal_color = [np.zeros(world.dim_color), np.zeros(world.dim_color)] if agent.goal_b is not None: goal_color[1] = agent.goal_b.color # get positions of all entities in this agent's reference frame entity_pos = [] for entity in world.landmarks: entity_pos.append(entity.state.p_pos - agent.state.p_pos) # entity colors entity_color = [] for entity in world.landmarks: entity_color.append(entity.color) # communication of all other agents comm = [] for other in world.agents: if other is agent: continue comm.append(other.state.c) return np.concatenate([agent.state.p_vel] + entity_pos + [goal_color[1]] + comm)