diff --git a/README.md b/README.md index d92c8e1..fdbb6e2 100644 --- a/README.md +++ b/README.md @@ -204,7 +204,7 @@ data/outputs/2023.03.01/22.13.58_train_diffusion_unet_hybrid_pusht_image ``` ## 🦾 Demo, Training and Eval on a Real Robot -Make sure your UR5 robot is running and accepting command from its network interface (emergency stop button within reach at all time), your RealSense cameras plugged in to your workstation (tested with `realsense-viewer`) and your SpaceMouse connected with the `spacenavd` deamon running (verify with `systemctl status spacenavd`). +Make sure your UR5 robot is running and accepting command from its network interface (emergency stop button within reach at all time), your RealSense cameras plugged in to your workstation (tested with `realsense-viewer`) and your SpaceMouse connected with the `spacenavd` daemon running (verify with `systemctl status spacenavd`). Start the demonstration collection script. Press "C" to start recording. Use SpaceMouse to move the robot. Press "S" to stop recording. ```console diff --git a/diffusion_policy/common/pose_trajectory_interpolator.py b/diffusion_policy/common/pose_trajectory_interpolator.py index 063943f..3e37397 100644 --- a/diffusion_policy/common/pose_trajectory_interpolator.py +++ b/diffusion_policy/common/pose_trajectory_interpolator.py @@ -28,7 +28,7 @@ class PoseTrajectoryInterpolator: poses = np.array(poses) if len(times) == 1: - # special treatement for single step interpolation + # special treatment for single step interpolation self.single_step = True self._times = times self._poses = poses @@ -69,7 +69,7 @@ class PoseTrajectoryInterpolator: should_keep = (start_t < times) & (times < end_t) keep_times = times[should_keep] all_times = np.concatenate([[start_t], keep_times, [end_t]]) - # remove duplicates, Slerp requries strictly increasing x + # remove duplicates, Slerp requires strictly increasing x all_times = np.unique(all_times) # interpolate all_poses = self(all_times) diff --git a/diffusion_policy/common/pymunk_override.py b/diffusion_policy/common/pymunk_override.py index 78ecaa2..2439020 100644 --- a/diffusion_policy/common/pymunk_override.py +++ b/diffusion_policy/common/pymunk_override.py @@ -96,7 +96,7 @@ class DrawOptions(pymunk.SpaceDebugDrawOptions): See pygame_util.demo.py for a full example - Since pygame uses a coordiante system where y points down (in contrast + Since pygame uses a coordinate system where y points down (in contrast to many other cases), you either have to make the physics simulation with Pymunk also behave in that way, or flip everything when you draw. @@ -226,7 +226,7 @@ def to_pygame(p: Tuple[float, float], surface: pygame.Surface) -> Tuple[int, int """Convenience method to convert pymunk coordinates to pygame surface local coordinates. - Note that in case positive_y_is_up is False, this function wont actually do + Note that in case positive_y_is_up is False, this function won't actually do anything except converting the point to integers. """ if positive_y_is_up: diff --git a/diffusion_policy/common/replay_buffer.py b/diffusion_policy/common/replay_buffer.py index 09dda9b..022a704 100644 --- a/diffusion_policy/common/replay_buffer.py +++ b/diffusion_policy/common/replay_buffer.py @@ -84,7 +84,7 @@ def get_optimal_chunks(shape, dtype, class ReplayBuffer: """ Zarr-based temporal datastructure. - Assumes first dimention to be time. Only chunk in time dimension. + Assumes first dimension to be time. Only chunk in time dimension. """ def __init__(self, root: Union[zarr.Group, @@ -216,7 +216,7 @@ class ReplayBuffer: Recommended """ if backend == 'numpy': - print('backend argument is depreacted!') + print('backend argument is deprecated!') store = None group = zarr.open(os.path.expanduser(zarr_path), 'r') return cls.copy_from_store(src_store=group.store, store=store, diff --git a/diffusion_policy/common/timestamp_accumulator.py b/diffusion_policy/common/timestamp_accumulator.py index e1eb285..2e72672 100644 --- a/diffusion_policy/common/timestamp_accumulator.py +++ b/diffusion_policy/common/timestamp_accumulator.py @@ -13,7 +13,7 @@ def get_accumulate_timestamp_idxs( ) -> Tuple[List[int], List[int], int]: """ For each dt window, choose the first timestamp in the window. - Assumes timestamps sorted. One timestamp might be chosen mulitple times due to dropped frames. + Assumes timestamps sorted. One timestamp might be chosen multiple times due to dropped frames. next_global_idx should start at 0 normally, and then use the returned next_global_idx. However, when overwiting previous values are desired, set last_global_idx to None. @@ -60,7 +60,7 @@ def align_timestamps( allow_negative=True ) if len(global_idxs) > len(target_global_idxs): - # if more steps avaliable, truncate + # if more steps available, truncate global_idxs = global_idxs[:len(target_global_idxs)] local_idxs = local_idxs[:len(target_global_idxs)] @@ -216,7 +216,7 @@ class TimestampActionAccumulator: self.action_buffer = np.resize(self.action_buffer, new_shape) self.timestamp_buffer = np.resize(self.timestamp_buffer, (new_size,)) - # potentally rewrite old data (as expected) + # potentially rewrite old data (as expected) self.action_buffer[global_idxs] = actions[local_idxs] self.timestamp_buffer[global_idxs] = timestamps[local_idxs] self.size = max(self.size, this_max_size) diff --git a/diffusion_policy/config/train_diffusion_unet_video_workspace.yaml b/diffusion_policy/config/train_diffusion_unet_video_workspace.yaml index 6abe7b6..4af5f1f 100644 --- a/diffusion_policy/config/train_diffusion_unet_video_workspace.yaml +++ b/diffusion_policy/config/train_diffusion_unet_video_workspace.yaml @@ -39,7 +39,7 @@ policy: norm_groups: 8 input_channel: 3 - model_depth: 50 # ResNet 50 (18,34 not yet avaliable) + model_depth: 50 # ResNet 50 (18,34 not yet available) pool: _target_: diffusion_policy.model.ibc.global_avgpool.GlobalAvgpool diff --git a/diffusion_policy/dataset/real_pusht_image_dataset.py b/diffusion_policy/dataset/real_pusht_image_dataset.py index 85017fa..c5a238f 100644 --- a/diffusion_policy/dataset/real_pusht_image_dataset.py +++ b/diffusion_policy/dataset/real_pusht_image_dataset.py @@ -84,7 +84,7 @@ class RealPushTImageDataset(BaseImageDataset): if delta_action: # replace action as relative to previous frame actions = replay_buffer['action'][:] - # suport positions only at this time + # support positions only at this time assert actions.shape[1] <= 3 actions_diff = np.zeros_like(actions) episode_ends = replay_buffer.episode_ends[:] @@ -93,8 +93,8 @@ class RealPushTImageDataset(BaseImageDataset): if i > 0: start = episode_ends[i-1] end = episode_ends[i] - # delta action is the difference between previous desired postion and the current - # it should be scheduled at the previous timestep for the curren timestep + # delta action is the difference between previous desired position and the current + # it should be scheduled at the previous timestep for the current timestep # to ensure consistency with positional mode actions_diff[start+1:end] = np.diff(actions[start:end], axis=0) replay_buffer['action'][:] = actions_diff diff --git a/diffusion_policy/env/block_pushing/block_pushing.py b/diffusion_policy/env/block_pushing/block_pushing.py index 8cc52ba..ed2310f 100644 --- a/diffusion_policy/env/block_pushing/block_pushing.py +++ b/diffusion_policy/env/block_pushing/block_pushing.py @@ -745,7 +745,7 @@ class BlockPush(gym.Env): WARNING: py_environment wrapper assumes environments aren't reset in their constructor and will often reset the environment unintentionally. It is - always recommeneded that you call env.reset on the tfagents wrapper before + always recommended that you call env.reset on the tfagents wrapper before playback (replaying pybullet_state). Args: diff --git a/diffusion_policy/env/block_pushing/block_pushing_multimodal.py b/diffusion_policy/env/block_pushing/block_pushing_multimodal.py index 3ca9212..6ec4942 100644 --- a/diffusion_policy/env/block_pushing/block_pushing_multimodal.py +++ b/diffusion_policy/env/block_pushing/block_pushing_multimodal.py @@ -615,7 +615,7 @@ class BlockPushMultimodal(block_pushing.BlockPush): WARNING: py_environment wrapper assumes environments aren't reset in their constructor and will often reset the environment unintentionally. It is - always recommeneded that you call env.reset on the tfagents wrapper before + always recommended that you call env.reset on the tfagents wrapper before playback (replaying pybullet_state). Args: diff --git a/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/franka/robot/franka_robot.py b/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/franka/robot/franka_robot.py index 087b9dd..3c6cfc3 100644 --- a/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/franka/robot/franka_robot.py +++ b/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/franka/robot/franka_robot.py @@ -24,7 +24,7 @@ import click from adept_envs import base_robot from adept_envs.utils.config import (get_config_root_node, read_config_from_node) -# obervations structure +# observations structure from collections import namedtuple observation = namedtuple('observation', ['time', 'qpos_robot', 'qvel_robot', 'qpos_object', 'qvel_object']) diff --git a/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/robot_env.py b/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/robot_env.py index 76f11fa..c50c2ee 100644 --- a/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/robot_env.py +++ b/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/robot_env.py @@ -40,7 +40,7 @@ class RobotEnv(mujoco_env.MujocoEnv): # Mapping of device path to the calibration file to use. If the device path # is not found, the 'default' key is used. - # This can be overriden by subclasses. + # This can be overridden by subclasses. CALIBRATION_PATHS = {} def __init__(self, diff --git a/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/utils/config.py b/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/utils/config.py index e6aea13..6444bf7 100644 --- a/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/utils/config.py +++ b/diffusion_policy/env/kitchen/relay_policy_learning/adept_envs/adept_envs/utils/config.py @@ -87,7 +87,7 @@ if __name__ == '__main__': float)) print("data:type \t", read_config_from_node(root, "data", "type", str)) - # read straight from xml (dum the XML data as duh.xml for this test) + # read straight from xml (dumb the XML data as duh.xml for this test) root, root_name = get_config_root_node(config_file_name="duh.xml") print("Read from xml --------------------------------") print("limit:low \t", read_config_from_xml("duh.xml", "limits", "low", diff --git a/diffusion_policy/env/kitchen/relay_policy_learning/third_party/franka/README.md b/diffusion_policy/env/kitchen/relay_policy_learning/third_party/franka/README.md index d96eaf1..45d2257 100644 --- a/diffusion_policy/env/kitchen/relay_policy_learning/third_party/franka/README.md +++ b/diffusion_policy/env/kitchen/relay_policy_learning/third_party/franka/README.md @@ -4,6 +4,6 @@ Franka panda mujoco models # Environment -franka_panda.xml | comming soon +franka_panda.xml | coming soon :-------------------------:|:-------------------------: -![Alt text](franka_panda.png?raw=false "sawyer") | comming soon +![Alt text](franka_panda.png?raw=false "sawyer") | coming soon diff --git a/diffusion_policy/env/pusht/pusht_env.py b/diffusion_policy/env/pusht/pusht_env.py index 45be598..9a25cd6 100644 --- a/diffusion_policy/env/pusht/pusht_env.py +++ b/diffusion_policy/env/pusht/pusht_env.py @@ -44,7 +44,7 @@ class PushTEnv(gym.Env): # Local controller params. self.k_p, self.k_v = 100, 20 # PD control.z self.control_hz = self.metadata['video.frames_per_second'] - # legcay set_state for data compatiblity + # legcay set_state for data compatibility self.legacy = legacy # agent_pos, block_pos, block_angle @@ -92,7 +92,7 @@ class PushTEnv(gym.Env): if self.damping is not None: self.space.damping = self.damping - # use legacy RandomState for compatiblity + # use legacy RandomState for compatibility state = self.reset_to_state if state is None: rs = np.random.RandomState(seed=seed) @@ -163,7 +163,7 @@ class PushTEnv(gym.Env): inertia = pymunk.moment_for_box(mass, (50, 100)) body = pymunk.Body(mass, inertia) # preserving the legacy assignment order for compatibility - # the order here dosn't matter somehow, maybe because CoM is aligned with body origin + # the order here doesn't matter somehow, maybe because CoM is aligned with body origin body.position = pose[:2].tolist() body.angle = pose[2] return body @@ -210,7 +210,7 @@ class PushTEnv(gym.Env): pygame.event.pump() pygame.display.update() - # the clock is aleady ticked during in step for "human" + # the clock is already ticked during in step for "human" img = np.transpose( @@ -255,7 +255,7 @@ class PushTEnv(gym.Env): # if not the same as CoM # therefore should be modified first. if self.legacy: - # for compatiblity with legacy data + # for compatibility with legacy data self.block.position = pos_block self.block.angle = rot_block else: @@ -307,7 +307,7 @@ class PushTEnv(gym.Env): self.goal_color = pygame.Color('LightGreen') self.goal_pose = np.array([256,256,np.pi/4]) # x, y, theta (in radians) - # Add collision handeling + # Add collision handling self.collision_handeler = self.space.add_collision_handler(0, 0) self.collision_handeler.post_solve = self._handle_collision self.n_contact_points = 0 diff --git a/diffusion_policy/env/pusht/pusht_keypoints_env.py b/diffusion_policy/env/pusht/pusht_keypoints_env.py index 0efea6d..eb42686 100644 --- a/diffusion_policy/env/pusht/pusht_keypoints_env.py +++ b/diffusion_policy/env/pusht/pusht_keypoints_env.py @@ -27,7 +27,7 @@ class PushTKeypointsEnv(PushTEnv): ws = self.window_size if local_keypoint_map is None: - # create default keypoint definiton + # create default keypoint definition kp_kwargs = self.genenerate_keypoint_manager_params() local_keypoint_map = kp_kwargs['local_keypoint_map'] color_map = kp_kwargs['color_map'] @@ -107,7 +107,7 @@ class PushTKeypointsEnv(PushTEnv): obs = kps.flatten() obs_mask = kps_mask.flatten() if not self.agent_keypoints: - # passing agent position when keypoints are not avaiable + # passing agent position when keypoints are not available agent_pos = np.array(self.agent.position) obs = np.concatenate([ obs, agent_pos diff --git a/diffusion_policy/env/pusht/pymunk_override.py b/diffusion_policy/env/pusht/pymunk_override.py index 78ecaa2..2439020 100644 --- a/diffusion_policy/env/pusht/pymunk_override.py +++ b/diffusion_policy/env/pusht/pymunk_override.py @@ -96,7 +96,7 @@ class DrawOptions(pymunk.SpaceDebugDrawOptions): See pygame_util.demo.py for a full example - Since pygame uses a coordiante system where y points down (in contrast + Since pygame uses a coordinate system where y points down (in contrast to many other cases), you either have to make the physics simulation with Pymunk also behave in that way, or flip everything when you draw. @@ -226,7 +226,7 @@ def to_pygame(p: Tuple[float, float], surface: pygame.Surface) -> Tuple[int, int """Convenience method to convert pymunk coordinates to pygame surface local coordinates. - Note that in case positive_y_is_up is False, this function wont actually do + Note that in case positive_y_is_up is False, this function won't actually do anything except converting the point to integers. """ if positive_y_is_up: diff --git a/diffusion_policy/env_runner/pusht_keypoints_runner.py b/diffusion_policy/env_runner/pusht_keypoints_runner.py index 9056c40..b9c107c 100644 --- a/diffusion_policy/env_runner/pusht_keypoints_runner.py +++ b/diffusion_policy/env_runner/pusht_keypoints_runner.py @@ -45,7 +45,7 @@ class PushTKeypointsRunner(BaseLowdimRunner): n_envs = n_train + n_test # handle latency step - # to mimic latency, we request n_latency_steps addtional steps + # to mimic latency, we request n_latency_steps additional steps # of past observations, and the discard the last n_latency_steps env_n_obs_steps = n_obs_steps + n_latency_steps env_n_action_steps = n_action_steps diff --git a/diffusion_policy/env_runner/robomimic_lowdim_runner.py b/diffusion_policy/env_runner/robomimic_lowdim_runner.py index cd28c8b..7246943 100644 --- a/diffusion_policy/env_runner/robomimic_lowdim_runner.py +++ b/diffusion_policy/env_runner/robomimic_lowdim_runner.py @@ -91,7 +91,7 @@ class RobomimicLowdimRunner(BaseLowdimRunner): n_envs = n_train + n_test # handle latency step - # to mimic latency, we request n_latency_steps addtional steps + # to mimic latency, we request n_latency_steps additional steps # of past observations, and the discard the last n_latency_steps env_n_obs_steps = n_obs_steps + n_latency_steps env_n_action_steps = n_action_steps diff --git a/diffusion_policy/gym_util/multistep_wrapper.py b/diffusion_policy/gym_util/multistep_wrapper.py index aae6149..cf2cfda 100644 --- a/diffusion_policy/gym_util/multistep_wrapper.py +++ b/diffusion_policy/gym_util/multistep_wrapper.py @@ -39,10 +39,10 @@ def dict_take_last_n(x, n): def aggregate(data, method='max'): if method == 'max': - # equivelent to any + # equivalent to any return np.max(data) elif method == 'min': - # equivelent to all + # equivalent to all return np.min(data) elif method == 'mean': return np.mean(data) diff --git a/diffusion_policy/model/bet/action_ae/discretizers/k_means.py b/diffusion_policy/model/bet/action_ae/discretizers/k_means.py index 534e588..9051df0 100644 --- a/diffusion_policy/model/bet/action_ae/discretizers/k_means.py +++ b/diffusion_policy/model/bet/action_ae/discretizers/k_means.py @@ -41,7 +41,7 @@ class KMeansDiscretizer(DictOfTensorMixin): @classmethod def _kmeans(cls, x: torch.Tensor, ncluster: int = 512, niter: int = 50): """ - Simple k-means clustering algorithm adapted from Karpathy's minGPT libary + Simple k-means clustering algorithm adapted from Karpathy's minGPT library https://github.com/karpathy/minGPT/blob/master/play_image.ipynb """ N, D = x.size() diff --git a/diffusion_policy/model/common/rotation_transformer.py b/diffusion_policy/model/common/rotation_transformer.py index 788928b..0697d98 100644 --- a/diffusion_policy/model/common/rotation_transformer.py +++ b/diffusion_policy/model/common/rotation_transformer.py @@ -21,7 +21,7 @@ class RotationTransformer: """ Valid representations - Alwasy use matrix as intermediate representation. + Always use matrix as intermediate representation. """ assert from_rep != to_rep assert from_rep in self.valid_reps diff --git a/diffusion_policy/real_world/rtde_interpolation_controller.py b/diffusion_policy/real_world/rtde_interpolation_controller.py index c1bca0c..af27b2e 100644 --- a/diffusion_policy/real_world/rtde_interpolation_controller.py +++ b/diffusion_policy/real_world/rtde_interpolation_controller.py @@ -21,8 +21,8 @@ class Command(enum.Enum): class RTDEInterpolationController(mp.Process): """ - To ensure sending commnd to the robot with predictable latency - this controller need its seperate process (due to python GIL) + To ensure sending command to the robot with predictable latency + this controller need its separate process (due to python GIL) """ @@ -54,7 +54,7 @@ class RTDEInterpolationController(mp.Process): tcp_offset_pose: 6d pose payload_mass: float payload_cog: 3d position, center of gravity - soft_real_time: enables round-robbin scheduling and real-time priority + soft_real_time: enables round-robin scheduling and real-time priority requires running scripts/rtprio_setup.sh before hand. """ diff --git a/diffusion_policy/real_world/spacemouse.py b/diffusion_policy/real_world/spacemouse.py index ddf7c60..c13a505 100644 --- a/diffusion_policy/real_world/spacemouse.py +++ b/diffusion_policy/real_world/spacemouse.py @@ -8,7 +8,7 @@ import time class Spacemouse(Thread): def __init__(self, max_value=500, deadzone=(0,0,0,0,0,0), dtype=np.float32): """ - Continously listen to 3D connecion space naviagtor events + Continuously listen to 3D connection space naviagtor events and update the latest state. max_value: {300, 500} 300 for wired version and 500 for wireless diff --git a/diffusion_policy/real_world/spacemouse_shared_memory.py b/diffusion_policy/real_world/spacemouse_shared_memory.py index 163877f..06102fd 100644 --- a/diffusion_policy/real_world/spacemouse_shared_memory.py +++ b/diffusion_policy/real_world/spacemouse_shared_memory.py @@ -15,7 +15,7 @@ class Spacemouse(mp.Process): n_buttons=2, ): """ - Continously listen to 3D connecion space naviagtor events + Continuously listen to 3D connection space naviagtor events and update the latest state. max_value: {300, 500} 300 for wired version and 500 for wireless @@ -36,7 +36,7 @@ class Spacemouse(mp.Process): deadzone = np.array(deadzone, dtype=dtype) assert (deadzone >= 0).all() - # copied varaibles + # copied variables self.frequency = frequency self.max_value = max_value self.dtype = dtype diff --git a/diffusion_policy/real_world/video_recorder.py b/diffusion_policy/real_world/video_recorder.py index 28ea8e0..434605f 100644 --- a/diffusion_policy/real_world/video_recorder.py +++ b/diffusion_policy/real_world/video_recorder.py @@ -128,7 +128,7 @@ class VideoRecorder: dt=1/self.fps, next_global_idx=self.next_global_idx ) - # number of apperance means repeats + # number of appearance means repeats n_repeats = len(local_idxs) if self.shape is None: diff --git a/diffusion_policy/scripts/episode_lengths.py b/diffusion_policy/scripts/episode_lengths.py index 7d80336..91b2dda 100644 --- a/diffusion_policy/scripts/episode_lengths.py +++ b/diffusion_policy/scripts/episode_lengths.py @@ -16,8 +16,8 @@ from diffusion_policy.common.replay_buffer import ReplayBuffer @click.option('--dt', default=0.1, type=float) def main(input, dt): buffer = ReplayBuffer.create_from_path(input) - lenghts = buffer.episode_lengths - durations = lenghts * dt + lengths = buffer.episode_lengths + durations = lengths * dt result = { 'duration/mean': np.mean(durations) } diff --git a/diffusion_policy/scripts/real_pusht_successrate.py b/diffusion_policy/scripts/real_pusht_successrate.py index da447f9..a5900fa 100644 --- a/diffusion_policy/scripts/real_pusht_successrate.py +++ b/diffusion_policy/scripts/real_pusht_successrate.py @@ -16,7 +16,7 @@ import json @click.command() @click.option( '--reference', '-r', required=True, - help='Reference metrics_raw.json from demostration dataset.' + help='Reference metrics_raw.json from demonstration dataset.' ) @click.option( '--input', '-i', required=True, diff --git a/diffusion_policy/shared_memory/shared_memory_ring_buffer.py b/diffusion_policy/shared_memory/shared_memory_ring_buffer.py index 5d59e89..23feef0 100644 --- a/diffusion_policy/shared_memory/shared_memory_ring_buffer.py +++ b/diffusion_policy/shared_memory/shared_memory_ring_buffer.py @@ -25,7 +25,7 @@ class SharedMemoryRingBuffer: ): """ shm_manager: Manages the life cycle of share memories - across processes. Remeber to run .start() before passing. + across processes. Remember to run .start() before passing. array_specs: Name, shape and type of arrays for a single time step. get_max_k: The maxmum number of items can be queried at once. get_time_budget: The maxmum amount of time spent copying data from diff --git a/ray_exec.py b/ray_exec.py index ed596cf..22ff39f 100644 --- a/ray_exec.py +++ b/ray_exec.py @@ -40,7 +40,7 @@ def worker_fn(command_args, data_src=None, unbuffer_python=False, use_shell=Fals if use_shell: command_args = ' '.join(command_args) - # stdout passtrough to ray worker, which is then passed to ray driver + # stdout passthrough to ray worker, which is then passed to ray driver process = subprocess.Popen( args=command_args, env=process_env,