2020-07-26 12:01:21 +02:00
|
|
|
import random
|
2021-09-03 05:05:04 +08:00
|
|
|
import time
|
2021-04-25 15:23:46 +08:00
|
|
|
from copy import deepcopy
|
2021-09-03 05:05:04 +08:00
|
|
|
|
|
|
|
import gym
|
|
|
|
import networkx as nx
|
|
|
|
import numpy as np
|
|
|
|
from gym.spaces import Box, Dict, Discrete, MultiDiscrete, Tuple
|
2020-03-21 10:58:01 +08:00
|
|
|
|
|
|
|
|
|
|
|
class MyTestEnv(gym.Env):
|
2020-07-13 00:24:31 +08:00
|
|
|
"""This is a "going right" task. The task is to go right ``size`` steps.
|
|
|
|
"""
|
|
|
|
|
2021-09-03 05:05:04 +08:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
size,
|
|
|
|
sleep=0,
|
|
|
|
dict_state=False,
|
|
|
|
recurse_state=False,
|
|
|
|
ma_rew=0,
|
|
|
|
multidiscrete_action=False,
|
|
|
|
random_sleep=False,
|
|
|
|
array_state=False
|
|
|
|
):
|
2021-02-19 10:33:49 +08:00
|
|
|
assert dict_state + recurse_state + array_state <= 1, \
|
|
|
|
"dict_state / recurse_state / array_state can be only one true"
|
2020-03-21 10:58:01 +08:00
|
|
|
self.size = size
|
|
|
|
self.sleep = sleep
|
2020-07-26 12:01:21 +02:00
|
|
|
self.random_sleep = random_sleep
|
2020-04-28 20:56:02 +08:00
|
|
|
self.dict_state = dict_state
|
2020-08-04 13:39:05 +08:00
|
|
|
self.recurse_state = recurse_state
|
2021-02-19 10:33:49 +08:00
|
|
|
self.array_state = array_state
|
2020-07-13 00:24:31 +08:00
|
|
|
self.ma_rew = ma_rew
|
2020-07-24 17:38:12 +08:00
|
|
|
self._md_action = multidiscrete_action
|
2020-08-27 12:15:18 +08:00
|
|
|
# how many steps this env has stepped
|
|
|
|
self.steps = 0
|
2020-08-04 13:39:05 +08:00
|
|
|
if dict_state:
|
|
|
|
self.observation_space = Dict(
|
2021-09-03 05:05:04 +08:00
|
|
|
{
|
|
|
|
"index": Box(shape=(1, ), low=0, high=size - 1),
|
|
|
|
"rand": Box(shape=(1, ), low=0, high=1, dtype=np.float64)
|
|
|
|
}
|
|
|
|
)
|
2020-08-04 13:39:05 +08:00
|
|
|
elif recurse_state:
|
|
|
|
self.observation_space = Dict(
|
2021-09-03 05:05:04 +08:00
|
|
|
{
|
|
|
|
"index":
|
|
|
|
Box(shape=(1, ), low=0, high=size - 1),
|
|
|
|
"dict":
|
|
|
|
Dict(
|
|
|
|
{
|
|
|
|
"tuple":
|
|
|
|
Tuple(
|
|
|
|
(
|
|
|
|
Discrete(2),
|
|
|
|
Box(shape=(2, ), low=0, high=1, dtype=np.float64)
|
|
|
|
)
|
|
|
|
),
|
|
|
|
"rand":
|
|
|
|
Box(shape=(1, 2), low=0, high=1, dtype=np.float64)
|
|
|
|
}
|
|
|
|
)
|
|
|
|
}
|
|
|
|
)
|
2021-02-19 10:33:49 +08:00
|
|
|
elif array_state:
|
|
|
|
self.observation_space = Box(shape=(4, 84, 84), low=0, high=255)
|
2020-08-04 13:39:05 +08:00
|
|
|
else:
|
|
|
|
self.observation_space = Box(shape=(1, ), low=0, high=size - 1)
|
2020-07-24 17:38:12 +08:00
|
|
|
if multidiscrete_action:
|
|
|
|
self.action_space = MultiDiscrete([2, 2])
|
|
|
|
else:
|
|
|
|
self.action_space = Discrete(2)
|
2022-09-26 18:31:23 +02:00
|
|
|
self.terminated = False
|
2020-08-04 13:39:05 +08:00
|
|
|
self.index = 0
|
2020-03-21 10:58:01 +08:00
|
|
|
|
2022-09-26 18:31:23 +02:00
|
|
|
def reset(self, state=0, seed=None):
|
|
|
|
super().reset(seed=seed)
|
|
|
|
self.terminated = False
|
2022-02-08 00:40:01 +08:00
|
|
|
self.do_sleep()
|
2020-04-09 19:53:45 +08:00
|
|
|
self.index = state
|
2022-09-26 18:31:23 +02:00
|
|
|
return self._get_state(), {'key': 1, 'env': self}
|
2020-07-13 00:24:31 +08:00
|
|
|
|
|
|
|
def _get_reward(self):
|
|
|
|
"""Generate a non-scalar reward if ma_rew is True."""
|
2022-09-26 18:31:23 +02:00
|
|
|
end_flag = int(self.terminated)
|
2020-07-13 00:24:31 +08:00
|
|
|
if self.ma_rew > 0:
|
2022-01-30 00:53:56 +08:00
|
|
|
return [end_flag] * self.ma_rew
|
|
|
|
return end_flag
|
2020-07-13 00:24:31 +08:00
|
|
|
|
2020-08-04 13:39:05 +08:00
|
|
|
def _get_state(self):
|
|
|
|
"""Generate state(observation) of MyTestEnv"""
|
|
|
|
if self.dict_state:
|
2021-09-03 05:05:04 +08:00
|
|
|
return {
|
|
|
|
'index': np.array([self.index], dtype=np.float32),
|
2022-09-26 18:31:23 +02:00
|
|
|
'rand': self.np_random.random(1)
|
2021-09-03 05:05:04 +08:00
|
|
|
}
|
2020-08-04 13:39:05 +08:00
|
|
|
elif self.recurse_state:
|
2021-09-03 05:05:04 +08:00
|
|
|
return {
|
|
|
|
'index': np.array([self.index], dtype=np.float32),
|
|
|
|
'dict': {
|
2022-09-26 18:31:23 +02:00
|
|
|
"tuple": (np.array([1], dtype=int), self.np_random.random(2)),
|
|
|
|
"rand": self.np_random.random((1, 2))
|
2021-09-03 05:05:04 +08:00
|
|
|
}
|
|
|
|
}
|
2021-02-19 10:33:49 +08:00
|
|
|
elif self.array_state:
|
2021-03-31 15:14:22 +08:00
|
|
|
img = np.zeros([4, 84, 84], int)
|
2021-02-19 10:33:49 +08:00
|
|
|
img[3, np.arange(84), np.arange(84)] = self.index
|
|
|
|
img[2, np.arange(84)] = self.index
|
|
|
|
img[1, :, np.arange(84)] = self.index
|
|
|
|
img[0] = self.index
|
|
|
|
return img
|
2020-08-04 13:39:05 +08:00
|
|
|
else:
|
|
|
|
return np.array([self.index], dtype=np.float32)
|
2020-03-21 10:58:01 +08:00
|
|
|
|
2022-02-08 00:40:01 +08:00
|
|
|
def do_sleep(self):
|
|
|
|
if self.sleep > 0:
|
|
|
|
sleep_time = random.random() if self.random_sleep else 1
|
|
|
|
sleep_time *= self.sleep
|
|
|
|
time.sleep(sleep_time)
|
|
|
|
|
2020-03-21 10:58:01 +08:00
|
|
|
def step(self, action):
|
2020-08-27 12:15:18 +08:00
|
|
|
self.steps += 1
|
2020-07-24 17:38:12 +08:00
|
|
|
if self._md_action:
|
|
|
|
action = action[0]
|
2022-09-26 18:31:23 +02:00
|
|
|
if self.terminated:
|
2020-03-21 10:58:01 +08:00
|
|
|
raise ValueError('step after done !!!')
|
2022-02-08 00:40:01 +08:00
|
|
|
self.do_sleep()
|
2020-03-21 10:58:01 +08:00
|
|
|
if self.index == self.size:
|
2022-09-26 18:31:23 +02:00
|
|
|
self.terminated = True
|
|
|
|
return self._get_state(), self._get_reward(), self.terminated, False, {}
|
2020-03-21 10:58:01 +08:00
|
|
|
if action == 0:
|
|
|
|
self.index = max(self.index - 1, 0)
|
2022-09-26 18:31:23 +02:00
|
|
|
return self._get_state(), self._get_reward(), self.terminated, False, \
|
2020-07-13 00:24:31 +08:00
|
|
|
{'key': 1, 'env': self} if self.dict_state else {}
|
2020-03-21 10:58:01 +08:00
|
|
|
elif action == 1:
|
|
|
|
self.index += 1
|
2022-09-26 18:31:23 +02:00
|
|
|
self.terminated = self.index == self.size
|
2020-08-04 13:39:05 +08:00
|
|
|
return self._get_state(), self._get_reward(), \
|
2022-09-26 18:31:23 +02:00
|
|
|
self.terminated, False, {'key': 1, 'env': self}
|
2021-04-25 15:23:46 +08:00
|
|
|
|
|
|
|
|
|
|
|
class NXEnv(gym.Env):
|
2021-09-03 05:05:04 +08:00
|
|
|
|
2021-04-25 15:23:46 +08:00
|
|
|
def __init__(self, size, obs_type, feat_dim=32):
|
|
|
|
self.size = size
|
|
|
|
self.feat_dim = feat_dim
|
|
|
|
self.graph = nx.Graph()
|
|
|
|
self.graph.add_nodes_from(list(range(size)))
|
|
|
|
assert obs_type in ["array", "object"]
|
|
|
|
self.obs_type = obs_type
|
|
|
|
|
|
|
|
def _encode_obs(self):
|
|
|
|
if self.obs_type == "array":
|
|
|
|
return np.stack([v["data"] for v in self.graph._node.values()])
|
|
|
|
return deepcopy(self.graph)
|
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
graph_state = np.random.rand(self.size, self.feat_dim)
|
|
|
|
for i in range(self.size):
|
|
|
|
self.graph.nodes[i]["data"] = graph_state[i]
|
2022-09-26 18:31:23 +02:00
|
|
|
return self._encode_obs(), {}
|
2021-04-25 15:23:46 +08:00
|
|
|
|
|
|
|
def step(self, action):
|
|
|
|
next_graph_state = np.random.rand(self.size, self.feat_dim)
|
|
|
|
for i in range(self.size):
|
|
|
|
self.graph.nodes[i]["data"] = next_graph_state[i]
|
2022-09-26 18:31:23 +02:00
|
|
|
return self._encode_obs(), 1.0, 0, 0, {}
|
2022-10-31 08:54:54 +09:00
|
|
|
|
|
|
|
|
|
|
|
class MyGoalEnv(MyTestEnv):
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
assert kwargs.get("dict_state", 0) + kwargs.get("recurse_state", 0) == 0, \
|
|
|
|
"dict_state / recurse_state not supported"
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
obs, _ = super().reset(state=0)
|
|
|
|
obs, _, _, _, _ = super().step(1)
|
|
|
|
self._goal = obs * self.size
|
|
|
|
super_obsv = self.observation_space
|
|
|
|
self.observation_space = gym.spaces.Dict(
|
|
|
|
{
|
|
|
|
'observation': super_obsv,
|
|
|
|
'achieved_goal': super_obsv,
|
|
|
|
'desired_goal': super_obsv,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
def reset(self, *args, **kwargs):
|
|
|
|
obs, info = super().reset(*args, **kwargs)
|
|
|
|
new_obs = {
|
|
|
|
'observation': obs,
|
|
|
|
'achieved_goal': obs,
|
|
|
|
'desired_goal': self._goal
|
|
|
|
}
|
|
|
|
return new_obs, info
|
|
|
|
|
|
|
|
def step(self, *args, **kwargs):
|
|
|
|
obs_next, rew, terminated, truncated, info = super().step(*args, **kwargs)
|
|
|
|
new_obs_next = {
|
|
|
|
'observation': obs_next,
|
|
|
|
'achieved_goal': obs_next,
|
|
|
|
'desired_goal': self._goal
|
|
|
|
}
|
|
|
|
return new_obs_next, rew, terminated, truncated, info
|
|
|
|
|
|
|
|
def compute_reward_fn(
|
|
|
|
self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: dict
|
|
|
|
) -> np.ndarray:
|
|
|
|
axis = -1
|
|
|
|
if self.array_state:
|
|
|
|
axis = (-3, -2, -1)
|
|
|
|
return (achieved_goal == desired_goal).all(axis=axis)
|