file_path
stringlengths
22
162
content
stringlengths
19
501k
size
int64
19
501k
lang
stringclasses
1 value
avg_line_length
float64
6.33
100
max_line_length
int64
18
935
alphanum_fraction
float64
0.34
0.93
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for environment wrappers to different learning frameworks. Wrappers allow you to modify the behavior of an environment without modifying the environment itself. This is useful for modifying the observation space, action space, or reward function. Additionally, they can be used to cast a given environment into the respective environment class definition used by different learning frameworks. This operation may include handling of asymmetric actor-critic observations, casting the data between different backends such `numpy` and `pytorch`, or organizing the returned data into the expected data structure by the learning framework. All wrappers work similar to the :class:`gymnasium.Wrapper` class. Using a wrapper is as simple as passing the initialized environment instance to the wrapper constructor. However, since learning frameworks expect different input and output data structures, their wrapper classes are not compatible with each other. Thus, they should always be used in conjunction with the respective learning framework. For instance, to wrap an environment in the `Stable-Baselines3`_ wrapper, you can do the following: .. code-block:: python from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper env = Sb3VecEnvWrapper(env) .. _RL-Games: https://github.com/Denys88/rl_games .. _RSL-RL: https://github.com/leggedrobotics/rsl_rl .. _skrl: https://github.com/Toni-SM/skrl .. _Stable-Baselines3: https://github.com/DLR-RM/stable-baselines3 """
1,629
Python
45.571427
108
0.794352
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/sb3.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrapper to configure an :class:`RLTaskEnv` instance to Stable-Baselines3 vectorized environment. The following example shows how to wrap an environment for Stable-Baselines3: .. code-block:: python from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper env = Sb3VecEnvWrapper(env) """ from __future__ import annotations import gymnasium as gym import numpy as np import torch import torch.nn as nn # noqa: F401 from typing import Any from stable_baselines3.common.utils import constant_fn from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn from omni.isaac.orbit.envs import RLTaskEnv """ Configuration Parser. """ def process_sb3_cfg(cfg: dict) -> dict: """Convert simple YAML types to Stable-Baselines classes/components. Args: cfg: A configuration dictionary. Returns: A dictionary containing the converted configuration. Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/0e5eb145faefa33e7d79c7f8c179788574b20da5/utils/exp_manager.py#L358 """ def update_dict(hyperparams: dict[str, Any]) -> dict[str, Any]: for key, value in hyperparams.items(): if isinstance(value, dict): update_dict(value) else: if key in ["policy_kwargs", "replay_buffer_class", "replay_buffer_kwargs"]: hyperparams[key] = eval(value) elif key in ["learning_rate", "clip_range", "clip_range_vf", "delta_std"]: if isinstance(value, str): _, initial_value = value.split("_") initial_value = float(initial_value) hyperparams[key] = lambda progress_remaining: progress_remaining * initial_value elif isinstance(value, (float, int)): # Negative value: ignore (ex: for clipping) if value < 0: continue hyperparams[key] = constant_fn(float(value)) else: raise ValueError(f"Invalid value for {key}: {hyperparams[key]}") return hyperparams # parse agent configuration and convert to classes return update_dict(cfg) """ Vectorized environment wrapper. """ class Sb3VecEnvWrapper(VecEnv): """Wraps around Orbit environment for Stable Baselines3. Isaac Sim internally implements a vectorized environment. However, since it is still considered a single environment instance, Stable Baselines tries to wrap around it using the :class:`DummyVecEnv`. This is only done if the environment is not inheriting from their :class:`VecEnv`. Thus, this class thinly wraps over the environment from :class:`RLTaskEnv`. Note: While Stable-Baselines3 supports Gym 0.26+ API, their vectorized environment still uses the old API (i.e. it is closer to Gym 0.21). Thus, we implement the old API for the vectorized environment. We also add monitoring functionality that computes the un-discounted episode return and length. This information is added to the info dicts under key `episode`. In contrast to the Orbit environment, stable-baselines expect the following: 1. numpy datatype for MDP signals 2. a list of info dicts for each sub-environment (instead of a dict) 3. when environment has terminated, the observations from the environment should correspond to the one after reset. The "real" final observation is passed using the info dicts under the key ``terminal_observation``. .. warning:: By the nature of physics stepping in Isaac Sim, it is not possible to forward the simulation buffers without performing a physics step. Thus, reset is performed inside the :meth:`step()` function after the actual physics step is taken. Thus, the returned observations for terminated environments is the one after the reset. .. caution:: This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this wrapper. Reference: 1. https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html 2. https://stable-baselines3.readthedocs.io/en/master/common/monitor.html """ def __init__(self, env: RLTaskEnv): """Initialize the wrapper. Args: env: The environment to wrap around. Raises: ValueError: When the environment is not an instance of :class:`RLTaskEnv`. """ # check that input is valid if not isinstance(env.unwrapped, RLTaskEnv): raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}") # initialize the wrapper self.env = env # collect common information self.num_envs = self.unwrapped.num_envs self.sim_device = self.unwrapped.device self.render_mode = self.unwrapped.render_mode # obtain gym spaces # note: stable-baselines3 does not like when we have unbounded action space so # we set it to some high value here. Maybe this is not general but something to think about. observation_space = self.unwrapped.single_observation_space["policy"] action_space = self.unwrapped.single_action_space if isinstance(action_space, gym.spaces.Box) and action_space.is_bounded() != "both": action_space = gym.spaces.Box(low=-100, high=100, shape=action_space.shape) # initialize vec-env VecEnv.__init__(self, self.num_envs, observation_space, action_space) # add buffer for logging episodic information self._ep_rew_buf = torch.zeros(self.num_envs, device=self.sim_device) self._ep_len_buf = torch.zeros(self.num_envs, device=self.sim_device) def __str__(self): """Returns the wrapper name and the :attr:`env` representation string.""" return f"<{type(self).__name__}{self.env}>" def __repr__(self): """Returns the string representation of the wrapper.""" return str(self) """ Properties -- Gym.Wrapper """ @classmethod def class_name(cls) -> str: """Returns the class name of the wrapper.""" return cls.__name__ @property def unwrapped(self) -> RLTaskEnv: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. """ return self.env.unwrapped """ Properties """ def get_episode_rewards(self) -> list[float]: """Returns the rewards of all the episodes.""" return self._ep_rew_buf.cpu().tolist() def get_episode_lengths(self) -> list[int]: """Returns the number of time-steps of all the episodes.""" return self._ep_len_buf.cpu().tolist() """ Operations - MDP """ def seed(self, seed: int | None = None) -> list[int | None]: # noqa: D102 return [self.unwrapped.seed(seed)] * self.unwrapped.num_envs def reset(self) -> VecEnvObs: # noqa: D102 obs_dict, _ = self.env.reset() # convert data types to numpy depending on backend return self._process_obs(obs_dict) def step_async(self, actions): # noqa: D102 # convert input to numpy array if not isinstance(actions, torch.Tensor): actions = np.asarray(actions) actions = torch.from_numpy(actions).to(device=self.sim_device, dtype=torch.float32) else: actions = actions.to(device=self.sim_device, dtype=torch.float32) # convert to tensor self._async_actions = actions def step_wait(self) -> VecEnvStepReturn: # noqa: D102 # record step information obs_dict, rew, terminated, truncated, extras = self.env.step(self._async_actions) # update episode un-discounted return and length self._ep_rew_buf += rew self._ep_len_buf += 1 # compute reset ids dones = terminated | truncated reset_ids = (dones > 0).nonzero(as_tuple=False) # convert data types to numpy depending on backend # note: RLTaskEnv uses torch backend (by default). obs = self._process_obs(obs_dict) rew = rew.detach().cpu().numpy() terminated = terminated.detach().cpu().numpy() truncated = truncated.detach().cpu().numpy() dones = dones.detach().cpu().numpy() # convert extra information to list of dicts infos = self._process_extras(obs, terminated, truncated, extras, reset_ids) # reset info for terminated environments self._ep_rew_buf[reset_ids] = 0 self._ep_len_buf[reset_ids] = 0 return obs, rew, dones, infos def close(self): # noqa: D102 self.env.close() def get_attr(self, attr_name, indices=None): # noqa: D102 # resolve indices if indices is None: indices = slice(None) num_indices = self.num_envs else: num_indices = len(indices) # obtain attribute value attr_val = getattr(self.env, attr_name) # return the value if not isinstance(attr_val, torch.Tensor): return [attr_val] * num_indices else: return attr_val[indices].detach().cpu().numpy() def set_attr(self, attr_name, value, indices=None): # noqa: D102 raise NotImplementedError("Setting attributes is not supported.") def env_method(self, method_name: str, *method_args, indices=None, **method_kwargs): # noqa: D102 if method_name == "render": # gymnasium does not support changing render mode at runtime return self.env.render() else: # this isn't properly implemented but it is not necessary. # mostly done for completeness. env_method = getattr(self.env, method_name) return env_method(*method_args, indices=indices, **method_kwargs) def env_is_wrapped(self, wrapper_class, indices=None): # noqa: D102 raise NotImplementedError("Checking if environment is wrapped is not supported.") def get_images(self): # noqa: D102 raise NotImplementedError("Getting images is not supported.") """ Helper functions. """ def _process_obs(self, obs_dict: torch.Tensor | dict[str, torch.Tensor]) -> np.ndarray | dict[str, np.ndarray]: """Convert observations into NumPy data type.""" # Sb3 doesn't support asymmetric observation spaces, so we only use "policy" obs = obs_dict["policy"] # note: RLTaskEnv uses torch backend (by default). if isinstance(obs, dict): for key, value in obs.items(): obs[key] = value.detach().cpu().numpy() elif isinstance(obs, torch.Tensor): obs = obs.detach().cpu().numpy() else: raise NotImplementedError(f"Unsupported data type: {type(obs)}") return obs def _process_extras( self, obs: np.ndarray, terminated: np.ndarray, truncated: np.ndarray, extras: dict, reset_ids: np.ndarray ) -> list[dict[str, Any]]: """Convert miscellaneous information into dictionary for each sub-environment.""" # create empty list of dictionaries to fill infos: list[dict[str, Any]] = [dict.fromkeys(extras.keys()) for _ in range(self.num_envs)] # fill-in information for each sub-environment # note: This loop becomes slow when number of environments is large. for idx in range(self.num_envs): # fill-in episode monitoring info if idx in reset_ids: infos[idx]["episode"] = dict() infos[idx]["episode"]["r"] = float(self._ep_rew_buf[idx]) infos[idx]["episode"]["l"] = float(self._ep_len_buf[idx]) else: infos[idx]["episode"] = None # fill-in bootstrap information infos[idx]["TimeLimit.truncated"] = truncated[idx] and not terminated[idx] # fill-in information from extras for key, value in extras.items(): # 1. remap extra episodes information safely # 2. for others just store their values if key == "log": # only log this data for episodes that are terminated if infos[idx]["episode"] is not None: for sub_key, sub_value in value.items(): infos[idx]["episode"][sub_key] = sub_value else: infos[idx][key] = value[idx] # add information about terminal observation separately if idx in reset_ids: # extract terminal observations if isinstance(obs, dict): terminal_obs = dict.fromkeys(obs.keys()) for key, value in obs.items(): terminal_obs[key] = value[idx] else: terminal_obs = obs[idx] # add info to dict infos[idx]["terminal_observation"] = terminal_obs else: infos[idx]["terminal_observation"] = None # return list of dictionaries return infos
13,621
Python
39.064706
123
0.620953
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrappers and utilities to configure an :class:`RLTaskEnv` for RSL-RL library.""" from .exporter import export_policy_as_jit, export_policy_as_onnx from .rl_cfg import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg from .vecenv_wrapper import RslRlVecEnvWrapper
410
Python
36.363633
88
0.795122
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/vecenv_wrapper.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrapper to configure an :class:`RLTaskEnv` instance to RSL-RL vectorized environment. The following example shows how to wrap an environment for RSL-RL: .. code-block:: python from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlVecEnvWrapper env = RslRlVecEnvWrapper(env) """ from __future__ import annotations import gymnasium as gym import torch from rsl_rl.env import VecEnv from omni.isaac.orbit.envs import RLTaskEnv class RslRlVecEnvWrapper(VecEnv): """Wraps around Orbit environment for RSL-RL library To use asymmetric actor-critic, the environment instance must have the attributes :attr:`num_privileged_obs` (int). This is used by the learning agent to allocate buffers in the trajectory memory. Additionally, the returned observations should have the key "critic" which corresponds to the privileged observations. Since this is optional for some environments, the wrapper checks if these attributes exist. If they don't then the wrapper defaults to zero as number of privileged observations. .. caution:: This class must be the last wrapper in the wrapper chain. This is because the wrapper does not follow the :class:`gym.Wrapper` interface. Any subsequent wrappers will need to be modified to work with this wrapper. Reference: https://github.com/leggedrobotics/rsl_rl/blob/master/rsl_rl/env/vec_env.py """ def __init__(self, env: RLTaskEnv): """Initializes the wrapper. Note: The wrapper calls :meth:`reset` at the start since the RSL-RL runner does not call reset. Args: env: The environment to wrap around. Raises: ValueError: When the environment is not an instance of :class:`RLTaskEnv`. """ # check that input is valid if not isinstance(env.unwrapped, RLTaskEnv): raise ValueError(f"The environment must be inherited from RLTaskEnv. Environment type: {type(env)}") # initialize the wrapper self.env = env # store information required by wrapper self.num_envs = self.unwrapped.num_envs self.device = self.unwrapped.device self.max_episode_length = self.unwrapped.max_episode_length self.num_actions = self.unwrapped.action_manager.total_action_dim self.num_obs = self.unwrapped.observation_manager.group_obs_dim["policy"][0] # -- privileged observations if "critic" in self.unwrapped.observation_manager.group_obs_dim: self.num_privileged_obs = self.unwrapped.observation_manager.group_obs_dim["critic"][0] else: self.num_privileged_obs = 0 # reset at the start since the RSL-RL runner does not call reset self.env.reset() def __str__(self): """Returns the wrapper name and the :attr:`env` representation string.""" return f"<{type(self).__name__}{self.env}>" def __repr__(self): """Returns the string representation of the wrapper.""" return str(self) """ Properties -- Gym.Wrapper """ @property def cfg(self) -> object: """Returns the configuration class instance of the environment.""" return self.unwrapped.cfg @property def render_mode(self) -> str | None: """Returns the :attr:`Env` :attr:`render_mode`.""" return self.env.render_mode @property def observation_space(self) -> gym.Space: """Returns the :attr:`Env` :attr:`observation_space`.""" return self.env.observation_space @property def action_space(self) -> gym.Space: """Returns the :attr:`Env` :attr:`action_space`.""" return self.env.action_space @classmethod def class_name(cls) -> str: """Returns the class name of the wrapper.""" return cls.__name__ @property def unwrapped(self) -> RLTaskEnv: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. """ return self.env.unwrapped """ Properties """ def get_observations(self) -> tuple[torch.Tensor, dict]: """Returns the current observations of the environment.""" obs_dict = self.unwrapped.observation_manager.compute() return obs_dict["policy"], {"observations": obs_dict} @property def episode_length_buf(self) -> torch.Tensor: """The episode length buffer.""" return self.unwrapped.episode_length_buf @episode_length_buf.setter def episode_length_buf(self, value: torch.Tensor): """Set the episode length buffer. Note: This is needed to perform random initialization of episode lengths in RSL-RL. """ self.unwrapped.episode_length_buf = value """ Operations - MDP """ def seed(self, seed: int = -1) -> int: # noqa: D102 return self.unwrapped.seed(seed) def reset(self) -> tuple[torch.Tensor, dict]: # noqa: D102 # reset the environment obs_dict, _ = self.env.reset() # return observations return obs_dict["policy"], {"observations": obs_dict} def step(self, actions: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, dict]: # record step information obs_dict, rew, terminated, truncated, extras = self.env.step(actions) # compute dones for compatibility with RSL-RL dones = (terminated | truncated).to(dtype=torch.long) # move extra observations to the extras dict obs = obs_dict["policy"] extras["observations"] = obs_dict # move time out information to the extras dict # this is only needed for infinite horizon tasks if not self.unwrapped.cfg.is_finite_horizon: extras["time_outs"] = truncated # return the step information return obs, rew, dones, extras def close(self): # noqa: D102 return self.env.close()
6,155
Python
33.779661
119
0.651503
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/exporter.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import copy import os import torch def export_policy_as_jit(actor_critic: object, path: str, filename="policy.pt"): """Export policy into a Torch JIT file. Args: actor_critic: The actor-critic torch module. path: The path to the saving directory. filename: The name of exported JIT file. Defaults to "policy.pt". Reference: https://github.com/leggedrobotics/legged_gym/blob/master/legged_gym/utils/helpers.py#L180 """ policy_exporter = _TorchPolicyExporter(actor_critic) policy_exporter.export(path, filename) def export_policy_as_onnx(actor_critic: object, path: str, filename="policy.onnx", verbose=False): """Export policy into a Torch ONNX file. Args: actor_critic: The actor-critic torch module. path: The path to the saving directory. filename: The name of exported JIT file. Defaults to "policy.pt". verbose: Whether to print the model summary. Defaults to False. """ if not os.path.exists(path): os.makedirs(path, exist_ok=True) policy_exporter = _OnnxPolicyExporter(actor_critic, verbose) policy_exporter.export(path, filename) """ Helper Classes - Private. """ class _TorchPolicyExporter(torch.nn.Module): """Exporter of actor-critic into JIT file. Reference: https://github.com/leggedrobotics/legged_gym/blob/master/legged_gym/utils/helpers.py#L193 """ def __init__(self, actor_critic): super().__init__() self.actor = copy.deepcopy(actor_critic.actor) self.is_recurrent = actor_critic.is_recurrent if self.is_recurrent: self.rnn = copy.deepcopy(actor_critic.memory_a.rnn) self.rnn.cpu() self.register_buffer("hidden_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)) self.register_buffer("cell_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)) self.forward = self.forward_lstm self.reset = self.reset_memory def forward_lstm(self, x): x, (h, c) = self.rnn(x.unsqueeze(0), (self.hidden_state, self.cell_state)) self.hidden_state[:] = h self.cell_state[:] = c x = x.squeeze(0) return self.actor(x) def forward(self, x): return self.actor(x) @torch.jit.export def reset(self): pass def reset_memory(self): self.hidden_state[:] = 0.0 self.cell_state[:] = 0.0 def export(self, path, filename): os.makedirs(path, exist_ok=True) path = os.path.join(path, filename) self.to("cpu") traced_script_module = torch.jit.script(self) traced_script_module.save(path) class _OnnxPolicyExporter(torch.nn.Module): """Exporter of actor-critic into ONNX file.""" def __init__(self, actor_critic, verbose=False): super().__init__() self.verbose = verbose self.actor = copy.deepcopy(actor_critic.actor) self.is_recurrent = actor_critic.is_recurrent if self.is_recurrent: self.rnn = copy.deepcopy(actor_critic.memory_a.rnn) self.rnn.cpu() self.forward = self.forward_lstm def forward_lstm(self, x_in, h_in, c_in): x, (h, c) = self.rnn(x_in.unsqueeze(0), (h_in, c_in)) x = x.squeeze(0) return self.actor(x), h, c def forward(self, x): return self.actor(x) def export(self, path, filename): self.to("cpu") if self.is_recurrent: obs = torch.zeros(1, self.rnn.input_size) h_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size) c_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size) actions, h_out, c_out = self(obs, h_in, c_in) torch.onnx.export( self, (obs, h_in, c_in), os.path.join(path, filename), export_params=True, opset_version=11, verbose=self.verbose, input_names=["obs", "h_in", "c_in"], output_names=["actions", "h_out", "c_out"], dynamic_axes={}, ) else: obs = torch.zeros(1, self.actor[0].in_features) torch.onnx.export( self, obs, os.path.join(path, filename), export_params=True, opset_version=11, verbose=self.verbose, input_names=["obs"], output_names=["actions"], dynamic_axes={}, )
4,718
Python
32
107
0.583934
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.utils import configclass @configclass class RslRlPpoActorCriticCfg: """Configuration for the PPO actor-critic networks.""" class_name: str = "ActorCritic" """The policy class name. Default is ActorCritic.""" init_noise_std: float = MISSING """The initial noise standard deviation for the policy.""" actor_hidden_dims: list[int] = MISSING """The hidden dimensions of the actor network.""" critic_hidden_dims: list[int] = MISSING """The hidden dimensions of the critic network.""" activation: str = MISSING """The activation function for the actor and critic networks.""" @configclass class RslRlPpoAlgorithmCfg: """Configuration for the PPO algorithm.""" class_name: str = "PPO" """The algorithm class name. Default is PPO.""" value_loss_coef: float = MISSING """The coefficient for the value loss.""" use_clipped_value_loss: bool = MISSING """Whether to use clipped value loss.""" clip_param: float = MISSING """The clipping parameter for the policy.""" entropy_coef: float = MISSING """The coefficient for the entropy loss.""" num_learning_epochs: int = MISSING """The number of learning epochs per update.""" num_mini_batches: int = MISSING """The number of mini-batches per update.""" learning_rate: float = MISSING """The learning rate for the policy.""" schedule: str = MISSING """The learning rate schedule.""" gamma: float = MISSING """The discount factor.""" lam: float = MISSING """The lambda parameter for Generalized Advantage Estimation (GAE).""" desired_kl: float = MISSING """The desired KL divergence.""" max_grad_norm: float = MISSING """The maximum gradient norm.""" @configclass class RslRlOnPolicyRunnerCfg: """Configuration of the runner for on-policy algorithms.""" seed: int = 42 """The seed for the experiment. Default is 42.""" device: str = "cuda" """The device for the rl-agent. Default is cuda.""" num_steps_per_env: int = MISSING """The number of steps per environment per update.""" max_iterations: int = MISSING """The maximum number of iterations.""" empirical_normalization: bool = MISSING """Whether to use empirical normalization.""" policy: RslRlPpoActorCriticCfg = MISSING """The policy configuration.""" algorithm: RslRlPpoAlgorithmCfg = MISSING """The algorithm configuration.""" ## # Checkpointing parameters ## save_interval: int = MISSING """The number of iterations between saves.""" experiment_name: str = MISSING """The experiment name.""" run_name: str = "" """The run name. Default is empty string. The name of the run directory is typically the time-stamp at execution. If the run name is not empty, then it is appended to the run directory's name, i.e. the logging directory's name will become ``{time-stamp}_{run_name}``. """ ## # Logging parameters ## logger: Literal["tensorboard", "neptune", "wandb"] = "tensorboard" """The logger to use. Default is tensorboard.""" neptune_project: str = "orbit" """The neptune project name. Default is "orbit".""" wandb_project: str = "orbit" """The wandb project name. Default is "orbit".""" ## # Loading parameters ## resume: bool = False """Whether to resume. Default is False.""" load_run: str = ".*" """The run directory to load. Default is ".*" (all). If regex expression, the latest (alphabetical order) matching run will be loaded. """ load_checkpoint: str = "model_.*.pt" """The checkpoint file to load. Default is ``"model_.*.pt"`` (all). If regex expression, the latest (alphabetical order) matching file will be loaded. """
4,062
Python
25.730263
105
0.655835
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/data_collector/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for data collection utilities. All post-processed robomimic compatible datasets share the same data structure. A single dataset is a single HDF5 file. The stored data follows the structure provided `here <https://robomimic.github.io/docs/datasets/overview.html#dataset-structure>`_. The collector takes input data in its batched format and stores them as different demonstrations, each corresponding to a given environment index. The demonstrations are flushed to disk when the :meth:`RobomimicDataCollector.flush` is called for the respective environments. All the data is saved when the :meth:`RobomimicDataCollector.close()` is called. The following sample shows how to use the :class:`RobomimicDataCollector` to store random data in a dataset. .. code-block:: python import os import torch from omni.isaac.orbit_tasks.utils.data_collector import RobomimicDataCollector # name of the environment (needed by robomimic) task_name = "Isaac-Franka-Lift-v0" # specify directory for logging experiments test_dir = os.path.dirname(os.path.abspath(__file__)) log_dir = os.path.join(test_dir, "logs", "demos") # name of the file to save data filename = "hdf_dataset.hdf5" # number of episodes to collect num_demos = 10 # number of environments to simulate num_envs = 4 # create data-collector collector_interface = RobomimicDataCollector(task_name, log_dir, filename, num_demos) # reset the collector collector_interface.reset() while not collector_interface.is_stopped(): # generate random data to store # -- obs obs = { "joint_pos": torch.randn(num_envs, 10), "joint_vel": torch.randn(num_envs, 10) } # -- actions actions = torch.randn(num_envs, 10) # -- rewards rewards = torch.randn(num_envs) # -- dones dones = torch.rand(num_envs) > 0.5 # store signals # -- obs for key, value in obs.items(): collector_interface.add(f"obs/{key}", value) # -- actions collector_interface.add("actions", actions) # -- next_obs for key, value in obs.items(): collector_interface.add(f"next_obs/{key}", value.cpu().numpy()) # -- rewards collector_interface.add("rewards", rewards) # -- dones collector_interface.add("dones", dones) # flush data from collector for successful environments # note: in this case we flush all the time reset_env_ids = dones.nonzero(as_tuple=False).squeeze(-1) collector_interface.flush(reset_env_ids) # close collector collector_interface.close() """ from .robomimic_data_collector import RobomimicDataCollector
2,834
Python
32.352941
88
0.688073
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/data_collector/robomimic_data_collector.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Interface to collect and store data from the environment using format from `robomimic`.""" from __future__ import annotations import h5py import json import numpy as np import os import torch from collections.abc import Iterable import carb class RobomimicDataCollector: """Data collection interface for robomimic. This class implements a data collector interface for saving simulation states to disk. The data is stored in `HDF5`_ binary data format. The class is useful for collecting demonstrations. The collected data follows the `structure`_ from robomimic. All datasets in `robomimic` require the observations and next observations obtained from before and after the environment step. These are stored as a dictionary of observations in the keys "obs" and "next_obs" respectively. For certain agents in `robomimic`, the episode data should have the following additional keys: "actions", "rewards", "dones". This behavior can be altered by changing the dataset keys required in the training configuration for the respective learning agent. For reference on datasets, please check the robomimic `documentation`. .. _HDF5: https://www.h5py.org/ .. _structure: https://robomimic.github.io/docs/datasets/overview.html#dataset-structure .. _documentation: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/config/base_config.py#L167-L173 """ def __init__( self, env_name: str, directory_path: str, filename: str = "test", num_demos: int = 1, flush_freq: int = 1, env_config: dict | None = None, ): """Initializes the data collection wrapper. Args: env_name: The name of the environment. directory_path: The path to store collected data. filename: The basename of the saved file. Defaults to "test". num_demos: Number of demonstrations to record until stopping. Defaults to 1. flush_freq: Frequency to dump data to disk. Defaults to 1. env_config: The configuration for the environment. Defaults to None. """ # save input arguments self._env_name = env_name self._env_config = env_config self._directory = os.path.abspath(directory_path) self._filename = filename self._num_demos = num_demos self._flush_freq = flush_freq # print info print(self.__str__()) # create directory it doesn't exist if not os.path.isdir(self._directory): os.makedirs(self._directory) # placeholder for current hdf5 file object self._h5_file_stream = None self._h5_data_group = None self._h5_episode_group = None # store count of demos within episode self._demo_count = 0 # flags for setting up self._is_first_interaction = True self._is_stop = False # create buffers to store data self._dataset = dict() def __del__(self): """Destructor for data collector.""" if not self._is_stop: self.close() def __str__(self) -> str: """Represents the data collector as a string.""" msg = "Dataset collector <class RobomimicDataCollector> object" msg += f"\tStoring trajectories in directory: {self._directory}\n" msg += f"\tNumber of demos for collection : {self._num_demos}\n" msg += f"\tFrequency for saving data to disk: {self._flush_freq}\n" return msg """ Properties """ @property def demo_count(self) -> int: """The number of demos collected so far.""" return self._demo_count """ Operations. """ def is_stopped(self) -> bool: """Whether data collection is stopped or not. Returns: True if data collection has stopped. """ return self._is_stop def reset(self): """Reset the internals of data logger.""" # setup the file to store data in if self._is_first_interaction: self._demo_count = 0 self._create_new_file(self._filename) self._is_first_interaction = False # clear out existing buffers self._dataset = dict() def add(self, key: str, value: np.ndarray | torch.Tensor): """Add a key-value pair to the dataset. The key can be nested by using the "/" character. For example: "obs/joint_pos". Currently only two-level nesting is supported. Args: key: The key name. value: The corresponding value of shape (N, ...), where `N` is number of environments. Raises: ValueError: When provided key has sub-keys more than 2. Example: "obs/joints/pos", instead of "obs/joint_pos". """ # check if data should be recorded if self._is_first_interaction: carb.log_warn("Please call reset before adding new data. Calling reset...") self.reset() if self._is_stop: carb.log_warn(f"Desired number of demonstrations collected: {self._demo_count} >= {self._num_demos}.") return # check datatype if isinstance(value, torch.Tensor): value = value.cpu().numpy() else: value = np.asarray(value) # check if there are sub-keys sub_keys = key.split("/") num_sub_keys = len(sub_keys) if len(sub_keys) > 2: raise ValueError(f"Input key '{key}' has elements {num_sub_keys} which is more than two.") # add key to dictionary if it doesn't exist for i in range(value.shape[0]): # demo index if f"env_{i}" not in self._dataset: self._dataset[f"env_{i}"] = dict() # key index if num_sub_keys == 2: # create keys if sub_keys[0] not in self._dataset[f"env_{i}"]: self._dataset[f"env_{i}"][sub_keys[0]] = dict() if sub_keys[1] not in self._dataset[f"env_{i}"][sub_keys[0]]: self._dataset[f"env_{i}"][sub_keys[0]][sub_keys[1]] = list() # add data to key self._dataset[f"env_{i}"][sub_keys[0]][sub_keys[1]].append(value[i]) else: # create keys if sub_keys[0] not in self._dataset[f"env_{i}"]: self._dataset[f"env_{i}"][sub_keys[0]] = list() # add data to key self._dataset[f"env_{i}"][sub_keys[0]].append(value[i]) def flush(self, env_ids: Iterable[int] = (0,)): """Flush the episode data based on environment indices. Args: env_ids: Environment indices to write data for. Defaults to (0). """ # check that data is being recorded if self._h5_file_stream is None or self._h5_data_group is None: carb.log_error("No file stream has been opened. Please call reset before flushing data.") return # iterate over each environment and add their data for index in env_ids: # data corresponding to demo env_dataset = self._dataset[f"env_{index}"] # create episode group based on demo count h5_episode_group = self._h5_data_group.create_group(f"demo_{self._demo_count}") # store number of steps taken h5_episode_group.attrs["num_samples"] = len(env_dataset["actions"]) # store other data from dictionary for key, value in env_dataset.items(): if isinstance(value, dict): # create group key_group = h5_episode_group.create_group(key) # add sub-keys values for sub_key, sub_value in value.items(): key_group.create_dataset(sub_key, data=np.array(sub_value)) else: h5_episode_group.create_dataset(key, data=np.array(value)) # increment total step counts self._h5_data_group.attrs["total"] += h5_episode_group.attrs["num_samples"] # increment total demo counts self._demo_count += 1 # reset buffer for environment self._dataset[f"env_{index}"] = dict() # dump at desired frequency if self._demo_count % self._flush_freq == 0: self._h5_file_stream.flush() print(f">>> Flushing data to disk. Collected demos: {self._demo_count} / {self._num_demos}") # if demos collected then stop if self._demo_count >= self._num_demos: print(f">>> Desired number of demonstrations collected: {self._demo_count} >= {self._num_demos}.") self.close() # break out of loop break def close(self): """Stop recording and save the file at its current state.""" if not self._is_stop: print(f">>> Closing recording of data. Collected demos: {self._demo_count} / {self._num_demos}") # close the file safely if self._h5_file_stream is not None: self._h5_file_stream.close() # mark that data collection is stopped self._is_stop = True """ Helper functions. """ def _create_new_file(self, fname: str): """Create a new HDF5 file for writing episode info into. Reference: https://robomimic.github.io/docs/datasets/overview.html Args: fname: The base name of the file. """ if not fname.endswith(".hdf5"): fname += ".hdf5" # define path to file hdf5_path = os.path.join(self._directory, fname) # construct the stream object self._h5_file_stream = h5py.File(hdf5_path, "w") # create group to store data self._h5_data_group = self._h5_file_stream.create_group("data") # stores total number of samples accumulated across demonstrations self._h5_data_group.attrs["total"] = 0 # store the environment meta-info # -- we use gym environment type # Ref: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/envs/env_base.py#L15 env_type = 2 # -- check if env config provided if self._env_config is None: self._env_config = dict() # -- add info self._h5_data_group.attrs["env_args"] = json.dumps({ "env_name": self._env_name, "type": env_type, "env_kwargs": self._env_config, })
10,835
Python
37.425532
122
0.579788
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/setup.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Installation script for the 'omni.isaac.orbit' python package.""" import os import toml from setuptools import setup # Obtain the extension data from the extension.toml file EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) # Read the extension.toml file EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ # generic "numpy", "torch==2.0.1", "prettytable==3.3.0", "tensordict", # devices "hidapi", # gym "gymnasium==0.29.0", # procedural-generation "trimesh", "pyglet<2", ] # Installation operation setup( name="omni-isaac-orbit", author="ORBIT Project Developers", maintainer="Mayank Mittal", maintainer_email="mittalma@ethz.ch", url=EXTENSION_TOML_DATA["package"]["repository"], version=EXTENSION_TOML_DATA["package"]["version"], description=EXTENSION_TOML_DATA["package"]["description"], keywords=EXTENSION_TOML_DATA["package"]["keywords"], license="BSD-3-Clause", include_package_data=True, python_requires=">=3.10", install_requires=INSTALL_REQUIRES, packages=["omni.isaac.orbit"], classifiers=[ "Natural Language :: English", "Programming Language :: Python :: 3.10", "Isaac Sim :: 2023.1.0-hotfix.1", "Isaac Sim :: 2023.1.1", ], zip_safe=False, )
1,553
Python
26.263157
89
0.665808
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/devices/check_keyboard.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """ This script shows how to use a teleoperation device with Isaac Sim. The teleoperation device is a keyboard device that allows the user to control the robot. It is possible to add additional callbacks to it for user-defined operations. """ """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher() simulation_app = app_launcher.app """Rest everything follows.""" import ctypes from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.orbit.devices import Se3Keyboard def print_cb(): """Dummy callback function executed when the key 'L' is pressed.""" print("Print callback") def quit_cb(): """Dummy callback function executed when the key 'ESC' is pressed.""" print("Quit callback") simulation_app.close() def main(): # Load kit helper sim = SimulationContext(physics_dt=0.01, rendering_dt=0.01) # Create teleoperation interface teleop_interface = Se3Keyboard(pos_sensitivity=0.1, rot_sensitivity=0.1) # Add teleoperation callbacks # available key buttons: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput teleop_interface.add_callback("L", print_cb) teleop_interface.add_callback("ESCAPE", quit_cb) print("Press 'L' to print a message. Press 'ESC' to quit.") # Check that boundedness of articulation is correct if ctypes.c_long.from_address(id(teleop_interface)).value != 1: raise RuntimeError("Teleoperation interface is not bounded to a single instance.") # Reset interface internals teleop_interface.reset() # Play simulation sim.reset() # Simulate while simulation_app.is_running(): # If simulation is stopped, then exit. if sim.is_stopped(): break # If simulation is paused, then skip. if not sim.is_playing(): sim.step() continue # get keyboard command delta_pose, gripper_command = teleop_interface.advance() # print command if gripper_command: print(f"Gripper command: {gripper_command}") # step simulation sim.step() # check if simulator is stopped if sim.is_stopped(): break if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,625
Python
27.236559
163
0.680762
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sensors/check_contact_sensor.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the contact sensor sensor in Orbit. .. code-block:: bash ./orbit.sh -p source/extensions/omni.isaac.orbit/test/sensors/test_contact_sensor.py --num_robots 2 """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Contact Sensor Test Script") parser.add_argument("--num_robots", type=int, default=64, help="Number of robots to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils from omni.isaac.cloner import GridCloner from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.carb import set_carb_setting from omni.isaac.core.utils.viewports import set_camera_view import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.sensors.contact_sensor import ContactSensor, ContactSensorCfg ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip """ Helpers """ def design_scene(): """Add prims to the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.SphereLightCfg() cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0)) cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0)) """ Main """ def main(): """Spawns the ANYmal robot and clones it using Isaac Sim Cloner API.""" # Load kit helper sim = SimulationContext(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cuda:0") # Set main camera set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Enable hydra scene-graph instancing # this is needed to visualize the scene when flatcache is enabled set_carb_setting(sim._settings, "/persistent/omnihydra/useSceneGraphInstancing", True) # Create interface to clone the scene cloner = GridCloner(spacing=2.0) cloner.define_base_env("/World/envs") # Everything under the namespace "/World/envs/env_0" will be cloned prim_utils.define_prim("/World/envs/env_0") # Clone the scene num_envs = args_cli.num_robots cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) _ = cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) # Design props design_scene() # Spawn things into the scene robot_cfg = ANYMAL_C_CFG.replace(prim_path="/World/envs/env_.*/Robot") robot_cfg.spawn.activate_contact_sensors = True robot = Articulation(cfg=robot_cfg) # Contact sensor contact_sensor_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/Robot/.*_SHANK", track_air_time=True, debug_vis=not args_cli.headless ) contact_sensor = ContactSensor(cfg=contact_sensor_cfg) # filter collisions within each environment instance physics_scene_path = sim.get_physics_context().prim_path cloner.filter_collisions( physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"] ) # Play the simulator sim.reset() # print info print(contact_sensor) # Now we are ready! print("[INFO]: Setup complete...") # Define simulation stepping decimation = 4 physics_dt = sim.get_physics_dt() sim_dt = decimation * physics_dt sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if sim.is_stopped(): break # If simulation is paused, then skip. if not sim.is_playing(): sim.step(render=False) continue # reset if count % 1000 == 0: # reset counters sim_time = 0.0 count = 0 # reset dof state joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.reset() # perform 4 steps for _ in range(decimation): # apply actions robot.set_joint_position_target(robot.data.default_joint_pos) # write commands to sim robot.write_data_to_sim() # perform step sim.step() # fetch data robot.update(physics_dt) # update sim-time sim_time += sim_dt count += 1 # update the buffers if sim.is_playing(): contact_sensor.update(sim_dt, force_recompute=True) if count % 100 == 0: print("Sim-time: ", sim_time) print("Number of contacts: ", torch.count_nonzero(contact_sensor.data.current_air_time == 0.0).item()) print("-" * 80) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,469
Python
30.079545
118
0.655147
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sensors/check_ray_caster.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use the ray caster from the Orbit framework. .. code-block:: bash # Usage ./orbit.sh -p source/extensions/omni.isaac.orbit/test/sensors/test_ray_caster.py --headless """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Ray Caster Test Script") parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to clone.") parser.add_argument( "--terrain_type", type=str, default="generator", help="Type of terrain to import. Can be 'generator' or 'usd' or 'plane'.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils from omni.isaac.cloner import GridCloner from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.viewports import set_camera_view import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.terrains as terrain_gen from omni.isaac.orbit.sensors.ray_caster import RayCaster, RayCasterCfg, patterns from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG from omni.isaac.orbit.terrains.terrain_importer import TerrainImporter from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.timer import Timer def design_scene(sim: SimulationContext, num_envs: int = 2048): """Design the scene.""" # Create interface to clone the scene cloner = GridCloner(spacing=2.0) cloner.define_base_env("/World/envs") # Everything under the namespace "/World/envs/env_0" will be cloned prim_utils.define_prim("/World/envs/env_0") # Define the scene # -- Light cfg = sim_utils.DistantLightCfg(intensity=2000) cfg.func("/World/light", cfg) # -- Balls cfg = sim_utils.SphereCfg( radius=0.25, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=0.5), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), ) cfg.func("/World/envs/env_0/ball", cfg, translation=(0.0, 0.0, 5.0)) # Clone the scene cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) physics_scene_path = sim.get_physics_context().prim_path cloner.filter_collisions( physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) def main(): """Main function.""" # Load kit helper sim_params = { "use_gpu": True, "use_gpu_pipeline": True, "use_flatcache": True, # deprecated from Isaac Sim 2023.1 onwards "use_fabric": True, # used from Isaac Sim 2023.1 onwards "enable_scene_query_support": True, } sim = SimulationContext( physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0" ) # Set main camera set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5]) # Parameters num_envs = args_cli.num_envs # Design the scene design_scene(sim=sim, num_envs=num_envs) # Handler for terrains importing terrain_importer_cfg = terrain_gen.TerrainImporterCfg( prim_path="/World/ground", terrain_type=args_cli.terrain_type, terrain_generator=ROUGH_TERRAINS_CFG, usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd", max_init_terrain_level=None, num_envs=1, ) _ = TerrainImporter(terrain_importer_cfg) # Create a ray-caster sensor ray_caster_cfg = RayCasterCfg( prim_path="/World/envs/env_.*/ball", mesh_prim_paths=["/World/ground"], pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(1.6, 1.0)), attach_yaw_only=True, debug_vis=not args_cli.headless, ) ray_caster = RayCaster(cfg=ray_caster_cfg) # Create a view over all the balls ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False) # Play simulator sim.reset() # Initialize the views # -- balls ball_view.initialize() # Print the sensor information print(ray_caster) # Get the initial positions of the balls ball_initial_positions, ball_initial_orientations = ball_view.get_world_poses() ball_initial_velocities = ball_view.get_velocities() # Create a counter for resetting the scene step_count = 0 # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if sim.is_stopped(): break # If simulation is paused, then skip. if not sim.is_playing(): sim.step(render=False) continue # Reset the scene if step_count % 500 == 0: # sample random indices to reset reset_indices = torch.randint(0, num_envs, (num_envs // 2,)) # reset the balls ball_view.set_world_poses( ball_initial_positions[reset_indices], ball_initial_orientations[reset_indices], indices=reset_indices ) ball_view.set_velocities(ball_initial_velocities[reset_indices], indices=reset_indices) # reset the sensor ray_caster.reset(reset_indices) # reset the counter step_count = 0 # Step simulation sim.step() # Update the ray-caster with Timer(f"Ray-caster update with {num_envs} x {ray_caster.num_rays} rays"): ray_caster.update(dt=sim.get_physics_dt(), force_recompute=True) # Update counter step_count += 1 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,402
Python
33.424731
118
0.666198
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sensors/test_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # ignore private usage of variables warning # pyright: reportPrivateUsage=none from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True, offscreen_render=True) simulation_app = app_launcher.app """Rest everything follows.""" import copy import numpy as np import os import random import scipy.spatial.transform as tf import torch import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils import omni.replicator.core as rep from omni.isaac.core.prims import GeometryPrim, RigidPrim from omni.isaac.core.simulation_context import SimulationContext from pxr import Gf, Usd, UsdGeom import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.sensors.camera import Camera, CameraCfg from omni.isaac.orbit.utils import convert_dict_to_backend from omni.isaac.orbit.utils.math import convert_quat from omni.isaac.orbit.utils.timer import Timer # sample camera poses POSITION = [2.5, 2.5, 2.5] QUAT_ROS = [-0.17591989, 0.33985114, 0.82047325, -0.42470819] QUAT_OPENGL = [0.33985113, 0.17591988, 0.42470818, 0.82047324] QUAT_WORLD = [-0.3647052, -0.27984815, -0.1159169, 0.88047623] class TestCamera(unittest.TestCase): """Test for USD Camera sensor.""" def setUp(self): """Create a blank new stage for each test.""" self.camera_cfg = CameraCfg( height=128, width=128, prim_path="/World/Camera", update_period=0, data_types=["distance_to_image_plane"], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), ) # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.01 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="torch", device="cpu") # populate scene self._populate_scene() # load stage stage_utils.update_stage() def tearDown(self): """Stops simulator after each test.""" # close all the opened viewport from before. rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( self.sim._timeline.stop() # clear the stage self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Tests """ def test_camera_init(self): """Test camera initialization.""" # Create camera camera = Camera(self.camera_cfg) # Play sim self.sim.reset() # Check if camera is initialized self.assertTrue(camera._is_initialized) # Check if camera prim is set correctly and that it is a camera prim self.assertTrue(camera._sensor_prims[0].GetPath().pathString == self.camera_cfg.prim_path) self.assertTrue(isinstance(camera._sensor_prims[0], UsdGeom.Camera)) # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Check buffers that exists and have correct shapes self.assertTrue(camera.data.pos_w.shape == (1, 3)) self.assertTrue(camera.data.quat_w_ros.shape == (1, 4)) self.assertTrue(camera.data.quat_w_world.shape == (1, 4)) self.assertTrue(camera.data.quat_w_opengl.shape == (1, 4)) self.assertTrue(camera.data.intrinsic_matrices.shape == (1, 3, 3)) self.assertTrue(camera.data.image_shape == (self.camera_cfg.height, self.camera_cfg.width)) self.assertTrue(camera.data.info == [{self.camera_cfg.data_types[0]: None}]) # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update camera camera.update(self.dt) # check image data for im_data in camera.data.output.to_dict().values(): self.assertTrue(im_data.shape == (1, self.camera_cfg.height, self.camera_cfg.width)) def test_camera_init_offset(self): """Test camera initialization with offset using different conventions.""" # define the same offset in all conventions # -- ROS convention cam_cfg_offset_ros = copy.deepcopy(self.camera_cfg) cam_cfg_offset_ros.offset = CameraCfg.OffsetCfg( pos=POSITION, rot=QUAT_ROS, convention="ros", ) cam_cfg_offset_ros.prim_path = "/World/CameraOffsetRos" camera_ros = Camera(cam_cfg_offset_ros) # -- OpenGL convention cam_cfg_offset_opengl = copy.deepcopy(self.camera_cfg) cam_cfg_offset_opengl.offset = CameraCfg.OffsetCfg( pos=POSITION, rot=QUAT_OPENGL, convention="opengl", ) cam_cfg_offset_opengl.prim_path = "/World/CameraOffsetOpengl" camera_opengl = Camera(cam_cfg_offset_opengl) # -- World convention cam_cfg_offset_world = copy.deepcopy(self.camera_cfg) cam_cfg_offset_world.offset = CameraCfg.OffsetCfg( pos=POSITION, rot=QUAT_WORLD, convention="world", ) cam_cfg_offset_world.prim_path = "/World/CameraOffsetWorld" camera_world = Camera(cam_cfg_offset_world) # play sim self.sim.reset() # retrieve camera pose prim_tf_ros = camera_ros._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default()) prim_tf_opengl = camera_opengl._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default()) prim_tf_world = camera_world._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default()) prim_tf_ros = np.transpose(prim_tf_ros) prim_tf_opengl = np.transpose(prim_tf_opengl) prim_tf_world = np.transpose(prim_tf_world) # check that all transforms are set correctly np.testing.assert_allclose(prim_tf_ros[0:3, 3], cam_cfg_offset_ros.offset.pos) np.testing.assert_allclose(prim_tf_opengl[0:3, 3], cam_cfg_offset_opengl.offset.pos) np.testing.assert_allclose(prim_tf_world[0:3, 3], cam_cfg_offset_world.offset.pos) np.testing.assert_allclose( convert_quat(tf.Rotation.from_matrix(prim_tf_ros[:3, :3]).as_quat(), "wxyz"), cam_cfg_offset_opengl.offset.rot, rtol=1e-5, ) np.testing.assert_allclose( convert_quat(tf.Rotation.from_matrix(prim_tf_opengl[:3, :3]).as_quat(), "wxyz"), cam_cfg_offset_opengl.offset.rot, rtol=1e-5, ) np.testing.assert_allclose( convert_quat(tf.Rotation.from_matrix(prim_tf_world[:3, :3]).as_quat(), "wxyz"), cam_cfg_offset_opengl.offset.rot, rtol=1e-5, ) # check if transform correctly set in output np.testing.assert_allclose(camera_ros.data.pos_w[0], cam_cfg_offset_ros.offset.pos, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_ros[0], QUAT_ROS, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_opengl[0], QUAT_OPENGL, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_world[0], QUAT_WORLD, rtol=1e-5) def test_multi_camera_init(self): """Test multi-camera initialization.""" # create two cameras with different prim paths # -- camera 1 cam_cfg_1 = copy.deepcopy(self.camera_cfg) cam_cfg_1.prim_path = "/World/Camera_1" cam_1 = Camera(cam_cfg_1) # -- camera 2 cam_cfg_2 = copy.deepcopy(self.camera_cfg) cam_cfg_2.prim_path = "/World/Camera_2" cam_2 = Camera(cam_cfg_2) # play sim self.sim.reset() # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update camera cam_1.update(self.dt) cam_2.update(self.dt) # check image data for cam in [cam_1, cam_2]: for im_data in cam.data.output.to_dict().values(): self.assertTrue(im_data.shape == (1, self.camera_cfg.height, self.camera_cfg.width)) def test_camera_set_world_poses(self): """Test camera function to set specific world pose.""" camera = Camera(self.camera_cfg) # play sim self.sim.reset() # set new pose camera.set_world_poses(torch.tensor([POSITION]), torch.tensor([QUAT_WORLD]), convention="world") np.testing.assert_allclose(camera.data.pos_w, [POSITION], rtol=1e-5) np.testing.assert_allclose(camera.data.quat_w_world, [QUAT_WORLD], rtol=1e-5) def test_camera_set_world_poses_from_view(self): """Test camera function to set specific world pose from view.""" camera = Camera(self.camera_cfg) # play sim self.sim.reset() # set new pose camera.set_world_poses_from_view(torch.tensor([POSITION]), torch.tensor([[0.0, 0.0, 0.0]])) np.testing.assert_allclose(camera.data.pos_w, [POSITION], rtol=1e-5) np.testing.assert_allclose(camera.data.quat_w_ros, [QUAT_ROS], rtol=1e-5) def test_intrinsic_matrix(self): """Checks that the camera's set and retrieve methods work for intrinsic matrix.""" camera_cfg = copy.deepcopy(self.camera_cfg) camera_cfg.height = 240 camera_cfg.width = 320 camera = Camera(camera_cfg) # play sim self.sim.reset() # Desired properties (obtained from realsense camera at 320x240 resolution) rs_intrinsic_matrix = [229.31640625, 0.0, 164.810546875, 0.0, 229.826171875, 122.1650390625, 0.0, 0.0, 1.0] rs_intrinsic_matrix = np.array(rs_intrinsic_matrix).reshape(3, 3) # Set matrix into simulator camera.set_intrinsic_matrices([rs_intrinsic_matrix]) # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update camera camera.update(self.dt) # Check that matrix is correct K = camera.data.intrinsic_matrices[0].numpy() # TODO: This is not correctly setting all values in the matrix since the # vertical aperture and aperture offsets are not being set correctly # This is a bug in the simulator. self.assertAlmostEqual(rs_intrinsic_matrix[0, 0], K[0, 0], 4) # self.assertAlmostEqual(rs_intrinsic_matrix[1, 1], K[1, 1], 4) def test_camera_resolution_all_colorize(self): """Test camera resolution is correctly set for all types with colorization enabled.""" # Add all types camera_cfg = copy.deepcopy(self.camera_cfg) camera_cfg.data_types = [ "rgb", "distance_to_image_plane", "normals", "semantic_segmentation", "instance_segmentation_fast", "instance_id_segmentation_fast", ] camera_cfg.colorize_instance_id_segmentation = True camera_cfg.colorize_instance_segmentation = True camera_cfg.colorize_semantic_segmentation = True # Create camera camera = Camera(camera_cfg) # Play sim self.sim.reset() # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(12): self.sim.step() camera.update(self.dt) # expected sizes hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 4) hw_1c_shape = (1, camera_cfg.height, camera_cfg.width) # access image data and compare shapes output = camera.data.output self.assertEqual(output["rgb"].shape, hw_3c_shape) self.assertEqual(output["distance_to_image_plane"].shape, hw_1c_shape) self.assertEqual(output["normals"].shape, hw_3c_shape) # FIXME: No idea why it does not work here. The raw buffers are of type int64 than int32 -> need to investigate # It works fine when run_usd_camera.py tutorial is run. # self.assertEqual(output["semantic_segmentation"].shape, hw_3c_shape) # self.assertEqual(output["instance_segmentation_fast"].shape, hw_3c_shape) # self.assertEqual(output["instance_id_segmentation_fast"].shape, hw_3c_shape) # access image data and compare dtype output = camera.data.output self.assertEqual(output["rgb"].dtype, torch.uint8) self.assertEqual(output["distance_to_image_plane"].dtype, torch.float) self.assertEqual(output["normals"].dtype, torch.float) self.assertEqual(output["semantic_segmentation"].dtype, torch.uint8) self.assertEqual(output["instance_segmentation_fast"].dtype, torch.uint8) self.assertEqual(output["instance_id_segmentation_fast"].dtype, torch.uint8) def test_camera_resolution_no_colorize(self): """Test camera resolution is correctly set for all types with no colorization enabled.""" # Add all types camera_cfg = copy.deepcopy(self.camera_cfg) camera_cfg.data_types = [ "rgb", "distance_to_image_plane", "normals", "semantic_segmentation", "instance_segmentation_fast", "instance_id_segmentation_fast", ] camera_cfg.colorize_instance_id_segmentation = False camera_cfg.colorize_instance_segmentation = False camera_cfg.colorize_semantic_segmentation = False # Create camera camera = Camera(camera_cfg) # Play sim self.sim.reset() # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(12): self.sim.step() camera.update(self.dt) # expected sizes hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 4) hw_1c_shape = (1, camera_cfg.height, camera_cfg.width) # access image data and compare shapes output = camera.data.output self.assertEqual(output["rgb"].shape, hw_3c_shape) self.assertEqual(output["distance_to_image_plane"].shape, hw_1c_shape) self.assertEqual(output["normals"].shape, hw_3c_shape) self.assertEqual(output["semantic_segmentation"].shape, hw_1c_shape) self.assertEqual(output["instance_segmentation_fast"].shape, hw_1c_shape) self.assertEqual(output["instance_id_segmentation_fast"].shape, hw_1c_shape) # access image data and compare dtype output = camera.data.output self.assertEqual(output["rgb"].dtype, torch.uint8) self.assertEqual(output["distance_to_image_plane"].dtype, torch.float) self.assertEqual(output["normals"].dtype, torch.float) # FIXME: No idea why it does not work here. The raw buffers are of type int64 than int32 -> need to investigate # It works fine when run_usd_camera.py tutorial is run. # self.assertEqual(output["semantic_segmentation"].dtype, torch.int32) # self.assertEqual(output["instance_segmentation_fast"].dtype, torch.int32) # self.assertEqual(output["instance_id_segmentation_fast"].dtype, torch.int32) def test_throughput(self): """Checks that the single camera gets created properly with a rig.""" # Create directory temp dir to dump the results file_dir = os.path.dirname(os.path.realpath(__file__)) temp_dir = os.path.join(file_dir, "output", "camera", "throughput") os.makedirs(temp_dir, exist_ok=True) # Create replicator writer rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3) # create camera camera_cfg = copy.deepcopy(self.camera_cfg) camera_cfg.height = 480 camera_cfg.width = 640 camera = Camera(camera_cfg) # Play simulator self.sim.reset() # Set camera pose camera.set_world_poses_from_view(torch.tensor([[2.5, 2.5, 2.5]]), torch.tensor([[0.0, 0.0, 0.0]])) # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Simulate physics for _ in range(5): # perform rendering self.sim.step() # update camera with Timer(f"Time taken for updating camera with shape {camera.image_shape}"): camera.update(self.dt) # Save images with Timer(f"Time taken for writing data with shape {camera.image_shape} "): # Pack data back into replicator format to save them using its writer rep_output = dict() camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy") for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()): if info is not None: rep_output[key] = {"data": data, "info": info} else: rep_output[key] = data # Save images rep_output["trigger_outputs"] = {"on_time": camera.frame[0]} rep_writer.write(rep_output) print("----------------------------------------") # Check image data for im_data in camera.data.output.values(): self.assertTrue(im_data.shape == (1, camera_cfg.height, camera_cfg.width)) """ Helper functions. """ @staticmethod def _populate_scene(): """Add prims to the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.SphereLightCfg() cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0)) cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0)) # Random objects random.seed(0) for i in range(10): # sample random position position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0]) position *= np.asarray([1.5, 1.5, 0.5]) # create prim prim_type = random.choice(["Cube", "Sphere", "Cylinder"]) prim = prim_utils.create_prim( f"/World/Objects/Obj_{i:02d}", prim_type, translation=position, scale=(0.25, 0.25, 0.25), semantic_label=prim_type, ) # cast to geom prim geom_prim = getattr(UsdGeom, prim_type)(prim) # set random color color = Gf.Vec3f(random.random(), random.random(), random.random()) geom_prim.CreateDisplayColorAttr() geom_prim.GetDisplayColorAttr().Set([color]) # add rigid properties GeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True) RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0) if __name__ == "__main__": run_tests()
20,322
Python
42.611588
119
0.613522
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sensors/test_frame_transformer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script checks the FrameTransformer sensor by visualizing the frames that it creates. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import math import scipy.spatial.transform as tf import torch import unittest import omni.isaac.core.utils.stage as stage_utils import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors import FrameTransformerCfg, OffsetCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip def quat_from_euler_rpy(roll, pitch, yaw, degrees=False): """Converts Euler XYZ to Quaternion (w, x, y, z).""" quat = tf.Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat() return tuple(quat[[3, 0, 1, 2]].tolist()) def euler_rpy_apply(rpy, xyz, degrees=False): """Applies rotation from Euler XYZ on position vector.""" rot = tf.Rotation.from_euler("xyz", rpy, degrees=degrees) return tuple(rot.apply(xyz).tolist()) @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration.""" # terrain - flat terrain plane terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") # articulation - robot robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # sensors - frame transformer (filled inside unit test) frame_transformer: FrameTransformerCfg = None class TestFrameTransformer(unittest.TestCase): """Test for frame transformer sensor.""" def setUp(self): """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Load kit helper self.sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.005)) # Set main camera self.sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0]) def tearDown(self): """Stops simulator after each test.""" # stop simulation # self.sim.stop() # clear the stage self.sim.clear_all_callbacks() self.sim.clear_instance() """ Tests """ def test_frame_transformer_feet_wrt_base(self): """Test feet transformations w.r.t. base source frame. In this test, the source frame is the robot base. This frame is at index 0, when the frame bodies are sorted in the order of the regex matching in the frame transformer. """ # Spawn things into stage scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) scene_cfg.frame_transformer = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/base", target_frames=[ FrameTransformerCfg.FrameCfg( name="LF_FOOT_USER", prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", offset=OffsetCfg( pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), rot=quat_from_euler_rpy(0, 0, -math.pi / 2), ), ), FrameTransformerCfg.FrameCfg( name="RF_FOOT_USER", prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", offset=OffsetCfg( pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), rot=quat_from_euler_rpy(0, 0, math.pi / 2), ), ), FrameTransformerCfg.FrameCfg( name="LH_FOOT_USER", prim_path="{ENV_REGEX_NS}/Robot/LH_SHANK", offset=OffsetCfg( pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(-0.08795, 0.01305, -0.33797)), rot=quat_from_euler_rpy(0, 0, -math.pi / 2), ), ), FrameTransformerCfg.FrameCfg( name="RH_FOOT_USER", prim_path="{ENV_REGEX_NS}/Robot/RH_SHANK", offset=OffsetCfg( pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(-0.08795, -0.01305, -0.33797)), rot=quat_from_euler_rpy(0, 0, math.pi / 2), ), ), ], ) scene = InteractiveScene(scene_cfg) # Play the simulator self.sim.reset() # Acquire the index of ground truth bodies feet_indices, feet_names = scene.articulations["robot"].find_bodies( ["LF_FOOT", "RF_FOOT", "LH_FOOT", "RH_FOOT"] ) # Check names are parsed the same order user_feet_names = [f"{name}_USER" for name in feet_names] self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names) # default joint targets default_actions = scene.articulations["robot"].data.default_joint_pos.clone() # Define simulation stepping sim_dt = self.sim.get_physics_dt() # Simulate physics for count in range(100): # # reset if count % 25 == 0: # reset root state root_state = scene.articulations["robot"].data.default_root_state.clone() root_state[:, :3] += scene.env_origins joint_pos = scene.articulations["robot"].data.default_joint_pos joint_vel = scene.articulations["robot"].data.default_joint_vel # -- set root state # -- robot scene.articulations["robot"].write_root_state_to_sim(root_state) scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) # reset buffers scene.reset() # set joint targets robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) scene.articulations["robot"].set_joint_position_target(robot_actions) # write data to sim scene.write_data_to_sim() # perform step self.sim.step() # read data from sim scene.update(sim_dt) # check absolute frame transforms in world frame # -- ground-truth root_pose_w = scene.articulations["robot"].data.root_state_w[:, :7] feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices] feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices] # -- frame transformer source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w # check if they are same torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3) torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3) torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3) torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3) # check if relative transforms are same feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source for index in range(len(feet_indices)): # ground-truth foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( root_pose_w[:, :3], root_pose_w[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] ) # check if they are same torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3) torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3) def test_frame_transformer_feet_wrt_thigh(self): """Test feet transformation w.r.t. thigh source frame. In this test, the source frame is the LF leg's thigh frame. This frame is not at index 0, when the frame bodies are sorted in the order of the regex matching in the frame transformer. """ # Spawn things into stage scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) scene_cfg.frame_transformer = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/LF_THIGH", target_frames=[ FrameTransformerCfg.FrameCfg( name="LF_FOOT_USER", prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", offset=OffsetCfg( pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), rot=quat_from_euler_rpy(0, 0, -math.pi / 2), ), ), FrameTransformerCfg.FrameCfg( name="RF_FOOT_USER", prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", offset=OffsetCfg( pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), rot=quat_from_euler_rpy(0, 0, math.pi / 2), ), ), ], ) scene = InteractiveScene(scene_cfg) # Play the simulator self.sim.reset() # Acquire the index of ground truth bodies source_frame_index = scene.articulations["robot"].find_bodies("LF_THIGH")[0][0] feet_indices, feet_names = scene.articulations["robot"].find_bodies(["LF_FOOT", "RF_FOOT"]) # Check names are parsed the same order user_feet_names = [f"{name}_USER" for name in feet_names] self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names) # default joint targets default_actions = scene.articulations["robot"].data.default_joint_pos.clone() # Define simulation stepping sim_dt = self.sim.get_physics_dt() # Simulate physics for count in range(100): # # reset if count % 25 == 0: # reset root state root_state = scene.articulations["robot"].data.default_root_state.clone() root_state[:, :3] += scene.env_origins joint_pos = scene.articulations["robot"].data.default_joint_pos joint_vel = scene.articulations["robot"].data.default_joint_vel # -- set root state # -- robot scene.articulations["robot"].write_root_state_to_sim(root_state) scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) # reset buffers scene.reset() # set joint targets robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) scene.articulations["robot"].set_joint_position_target(robot_actions) # write data to sim scene.write_data_to_sim() # perform step self.sim.step() # read data from sim scene.update(sim_dt) # check absolute frame transforms in world frame # -- ground-truth source_pose_w_gt = scene.articulations["robot"].data.body_state_w[:, source_frame_index, :7] feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices] feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices] # -- frame transformer source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w # check if they are same torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3) torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3) torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3) torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3) # check if relative transforms are same feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source for index in range(len(feet_indices)): # ground-truth foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( source_pose_w_gt[:, :3], source_pose_w_gt[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] ) # check if they are same torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3) torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3) if __name__ == "__main__": run_tests()
13,964
Python
44.048387
119
0.578416
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sensors/test_contact_sensor.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Tests to verify contact sensor functionality on rigid object prims.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests HEADLESS = True # launch omniverse app app_launcher = AppLauncher(headless=HEADLESS) simulation_app = app_launcher.app """Rest everything follows.""" import torch import unittest from dataclasses import MISSING from enum import Enum import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors import ContactSensor, ContactSensorCfg from omni.isaac.orbit.sim import build_simulation_context from omni.isaac.orbit.terrains import HfRandomUniformTerrainCfg, TerrainGeneratorCfg, TerrainImporterCfg from omni.isaac.orbit.utils import configclass ## # Custom helper classes. ## class ContactTestMode(Enum): """Enum to declare the type of contact sensor test to execute.""" IN_CONTACT = 0 """Enum to test the condition where the test object is in contact with the ground plane.""" NON_CONTACT = 1 """Enum to test the condition where the test object is not in contact with the ground plane (air time).""" @configclass class TestContactSensorRigidObjectCfg(RigidObjectCfg): """Configuration for rigid objects used for the contact sensor test. This contains the expected values in the configuration to simplify test fixtures. """ contact_pose: torch.Tensor = MISSING """6D pose of the rigid object under test when it is in contact with the ground surface.""" non_contact_pose: torch.Tensor = MISSING """6D pose of the rigid object under test when it is not in contact.""" @configclass class ContactSensorSceneCfg(InteractiveSceneCfg): """Configuration of the scene used by the contact sensor test.""" terrain: TerrainImporterCfg = MISSING """Terrain configuration within the scene.""" shape: TestContactSensorRigidObjectCfg = MISSING """RigidObject contact prim configuration.""" contact_sensor: ContactSensorCfg = MISSING """Contact sensor configuration.""" ## # Scene entity configurations. ## CUBE_CFG = TestContactSensorRigidObjectCfg( prim_path="/World/Objects/Cube", spawn=sim_utils.CuboidCfg( size=(0.5, 0.5, 0.5), rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, ), collision_props=sim_utils.CollisionPropertiesCfg( collision_enabled=True, ), activate_contact_sensors=True, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.6, 0.4)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0, -1.0, 1.0)), contact_pose=torch.tensor([0, -1.0, 0, 1, 0, 0, 0]), non_contact_pose=torch.tensor([0, -1.0, 1.0, 1, 0, 0, 0]), ) """Configuration of the cube prim.""" SPHERE_CFG = TestContactSensorRigidObjectCfg( prim_path="/World/Objects/Sphere", spawn=sim_utils.SphereCfg( radius=0.25, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, ), collision_props=sim_utils.CollisionPropertiesCfg( collision_enabled=True, ), activate_contact_sensors=True, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.4, 0.6)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0, 1.0, 1.0)), contact_pose=torch.tensor([0, 1.0, 0.0, 1, 0, 0, 0]), non_contact_pose=torch.tensor([0, 1.0, 1.0, 1, 0, 0, 0]), ) """Configuration of the sphere prim.""" CYLINDER_CFG = TestContactSensorRigidObjectCfg( prim_path="/World/Objects/Cylinder", spawn=sim_utils.CylinderCfg( radius=0.5, height=0.01, axis="Y", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, ), collision_props=sim_utils.CollisionPropertiesCfg( collision_enabled=True, ), activate_contact_sensors=True, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.6, 0.4, 0.4)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0, 0.0, 1.0)), contact_pose=torch.tensor([0, 0, 0.0, 1, 0, 0, 0]), non_contact_pose=torch.tensor([0, 0, 1.0, 1, 0, 0, 0]), ) """Configuration of the cylinder prim.""" CAPSULE_CFG = TestContactSensorRigidObjectCfg( prim_path="/World/Objects/Capsule", spawn=sim_utils.CapsuleCfg( radius=0.25, height=0.5, axis="Z", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, ), collision_props=sim_utils.CollisionPropertiesCfg( collision_enabled=True, ), activate_contact_sensors=True, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.2, 0.4, 0.4)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(1.0, 0.0, 1.5)), contact_pose=torch.tensor([1.0, 0.0, 0.0, 1, 0, 0, 0]), non_contact_pose=torch.tensor([1.0, 0.0, 1.5, 1, 0, 0, 0]), ) """Configuration of the capsule prim.""" CONE_CFG = TestContactSensorRigidObjectCfg( prim_path="/World/Objects/Cone", spawn=sim_utils.ConeCfg( radius=0.5, height=0.5, axis="Z", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, ), collision_props=sim_utils.CollisionPropertiesCfg( collision_enabled=True, ), activate_contact_sensors=True, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.2, 0.4)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0, 0.0, 1.0)), contact_pose=torch.tensor([-1.0, 0.0, 0.0, 1, 0, 0, 0]), non_contact_pose=torch.tensor([-1.0, 0.0, 1.0, 1, 0, 0, 0]), ) """Configuration of the cone prim.""" FLAT_TERRAIN_CFG = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") """Configuration of the flat ground plane.""" COBBLESTONE_TERRAIN_CFG = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=TerrainGeneratorCfg( seed=0, size=(3.0, 3.0), border_width=0.0, num_rows=1, num_cols=1, sub_terrains={ "random_rough": HfRandomUniformTerrainCfg( proportion=1.0, noise_range=(0.0, 0.05), noise_step=0.01, border_width=0.25 ), }, ), ) """Configuration of the generated mesh terrain.""" class TestContactSensor(unittest.TestCase): """Unittest class for testing the contact sensor. This class includes test cases for the available rigid object primitives, and tests that the the contact sensor is reporting correct results for various contact durations, terrain types, and evaluation devices. """ @classmethod def setUpClass(cls): """Contact sensor test suite init.""" cls.sim_dt = 0.0025 cls.durations = [cls.sim_dt, cls.sim_dt * 2, cls.sim_dt * 32, cls.sim_dt * 128] cls.terrains = [FLAT_TERRAIN_CFG, COBBLESTONE_TERRAIN_CFG] cls.devices = ["cuda:0", "cpu"] def test_cube_contact_time(self): """Checks contact sensor values for contact time and air time for a cube collision primitive.""" self._run_contact_sensor_test(shape_cfg=CUBE_CFG) def test_sphere_contact_time(self): """Checks contact sensor values for contact time and air time for a sphere collision primitive.""" self._run_contact_sensor_test(shape_cfg=SPHERE_CFG) """ Internal helpers. """ def _run_contact_sensor_test(self, shape_cfg: TestContactSensorRigidObjectCfg): """Runs a rigid body test for a given contact primitive configuration. This method iterates through each device and terrain combination in the simulation environment, running tests for contact sensors. Args: shape_cfg: The configuration parameters for the shape to be tested. """ for device in self.devices: for terrain in self.terrains: with self.subTest(device=device, terrain=terrain): with build_simulation_context(device=device, dt=self.sim_dt, add_lighting=True) as sim: # Instance new scene for the current terrain and contact prim. scene_cfg = ContactSensorSceneCfg(num_envs=1, env_spacing=1.0, lazy_sensor_update=False) scene_cfg.terrain = terrain scene_cfg.shape = shape_cfg scene_cfg.contact_sensor = ContactSensorCfg( prim_path=shape_cfg.prim_path, track_pose=True, debug_vis=False, update_period=0.0, track_air_time=True, history_length=3, ) scene = InteractiveScene(scene_cfg) # Set variables internally for reference self.sim = sim self.scene = scene # Play the simulation self.sim.reset() # Run contact time and air time tests. self._test_sensor_contact( shape=self.scene["shape"], sensor=self.scene["contact_sensor"], mode=ContactTestMode.IN_CONTACT, ) self._test_sensor_contact( shape=self.scene["shape"], sensor=self.scene["contact_sensor"], mode=ContactTestMode.NON_CONTACT, ) def _test_sensor_contact(self, shape: RigidObject, sensor: ContactSensor, mode: ContactTestMode): """Test for the contact sensor. This test sets the contact prim to a pose either in contact or out of contact with the ground plane for a known duration. Once the contact duration has elapsed, the data stored inside the contact sensor associated with the contact prim is checked against the expected values. This process is repeated for all elements in :attr:`TestContactSensor.durations`, where each successive contact timing test is punctuated by setting the contact prim to the complement of the desired contact mode for 1 sim time-step. Args: shape: The contact prim used for the contact sensor test. sensor: The sensor reporting data to be verified by the contact sensor test. mode: The contact test mode: either contact with ground plane or air time. """ # reset tge test state sensor.reset() expected_last_test_contact_time = 0 expected_last_reset_contact_time = 0 # set poses for shape for a given contact sensor test mode. # desired contact mode to set for a given duration. test_pose = None # complement of the desired contact mode used to reset the contact sensor. reset_pose = None if mode == ContactTestMode.IN_CONTACT: test_pose = shape.cfg.contact_pose reset_pose = shape.cfg.non_contact_pose elif mode == ContactTestMode.NON_CONTACT: test_pose = shape.cfg.non_contact_pose reset_pose = shape.cfg.contact_pose else: raise ValueError("Received incompatible contact sensor test mode") for idx in range(len(self.durations)): current_test_time = 0 duration = self.durations[idx] while current_test_time < duration: # set object states to contact the ground plane shape.write_root_pose_to_sim(root_pose=test_pose) # perform simulation step self._perform_sim_step() # increment contact time current_test_time += self.sim_dt # set last contact time to the previous desired contact duration plus the extra dt allowance. expected_last_test_contact_time = self.durations[idx - 1] + self.sim_dt if idx > 0 else 0 # Check the data inside the contact sensor if mode == ContactTestMode.IN_CONTACT: self._check_prim_contact_state_times( sensor=sensor, expected_air_time=0.0, expected_contact_time=self.durations[idx], expected_last_contact_time=expected_last_test_contact_time, expected_last_air_time=expected_last_reset_contact_time, dt=duration + self.sim_dt, ) elif mode == ContactTestMode.NON_CONTACT: self._check_prim_contact_state_times( sensor=sensor, expected_air_time=self.durations[idx], expected_contact_time=0.0, expected_last_contact_time=expected_last_reset_contact_time, expected_last_air_time=expected_last_test_contact_time, dt=duration + self.sim_dt, ) # switch the contact mode for 1 dt step before the next contact test begins. shape.write_root_pose_to_sim(root_pose=reset_pose) # perform simulation step self._perform_sim_step() # set the last air time to 2 sim_dt steps, because last_air_time and last_contact_time # adds an additional sim_dt to the total time spent in the previous contact mode for uncertainty in # when the contact switch happened in between a dt step. expected_last_reset_contact_time = 2 * self.sim_dt def _check_prim_contact_state_times( self, sensor: ContactSensor, expected_air_time: float, expected_contact_time: float, expected_last_air_time: float, expected_last_contact_time: float, dt: float, ) -> None: """Checks contact sensor data matches expected values. Args: sensor: Instance of ContactSensor containing data to be tested. expected_air_time: Air time ground truth. expected_contact_time: Contact time ground truth. expected_last_air_time: Last air time ground truth. expected_last_contact_time: Last contact time ground truth. dt: Time since previous contact mode switch. If the contact prim left contact 0.1 seconds ago, dt should be 0.1 + simulation dt seconds. """ # store current state of the contact prim in_air = False in_contact = False if expected_air_time > 0.0: in_air = True if expected_contact_time > 0.0: in_contact = True measured_contact_time = sensor.data.current_contact_time measured_air_time = sensor.data.current_air_time measured_last_contact_time = sensor.data.last_contact_time measured_last_air_time = sensor.data.last_air_time # check current contact state self.assertAlmostEqual(measured_contact_time.item(), expected_contact_time, places=2) self.assertAlmostEqual(measured_air_time.item(), expected_air_time, places=2) # check last contact state self.assertAlmostEqual(measured_last_contact_time.item(), expected_last_contact_time, places=2) self.assertAlmostEqual(measured_last_air_time.item(), expected_last_air_time, places=2) # check current contact mode self.assertEqual(sensor.compute_first_contact(dt=dt).item(), in_contact) self.assertEqual(sensor.compute_first_air(dt=dt).item(), in_air) def _perform_sim_step(self) -> None: """Updates sensors and steps the contact sensor test scene.""" # write data to simulation self.scene.write_data_to_sim() # simulate self.sim.step(render=not HEADLESS) # update buffers at sim dt self.scene.update(dt=self.sim_dt) if __name__ == "__main__": run_tests()
16,350
Python
39.273399
115
0.624343
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sensors/test_ray_caster_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # ignore private usage of variables warning # pyright: reportPrivateUsage=none """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True, offscreen_render=True) simulation_app = app_launcher.app """Rest everything follows.""" import copy import numpy as np import os import torch import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils import omni.replicator.core as rep from omni.isaac.core.simulation_context import SimulationContext from pxr import Gf from omni.isaac.orbit.sensors.camera import Camera, CameraCfg from omni.isaac.orbit.sensors.ray_caster import RayCasterCamera, RayCasterCameraCfg, patterns from omni.isaac.orbit.sim import PinholeCameraCfg from omni.isaac.orbit.terrains.trimesh.utils import make_plane from omni.isaac.orbit.terrains.utils import create_prim_from_mesh from omni.isaac.orbit.utils import convert_dict_to_backend from omni.isaac.orbit.utils.timer import Timer # sample camera poses POSITION = [2.5, 2.5, 2.5] QUAT_ROS = [-0.17591989, 0.33985114, 0.82047325, -0.42470819] QUAT_OPENGL = [0.33985113, 0.17591988, 0.42470818, 0.82047324] QUAT_WORLD = [-0.3647052, -0.27984815, -0.1159169, 0.88047623] class TestWarpCamera(unittest.TestCase): """Test for orbit camera sensor""" """ Test Setup and Teardown """ def setUp(self): """Create a blank new stage for each test.""" camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, horizontal_aperture=20.955, height=480, width=640, ) self.camera_cfg = RayCasterCameraCfg( prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=[ "distance_to_image_plane", ], ) # Create a new stage stage_utils.create_new_stage() # create xform because placement of camera directly under world is not supported prim_utils.create_prim("/World/Camera", "Xform") # Simulation time-step self.dt = 0.01 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="torch", device="cpu") # Ground-plane mesh = make_plane(size=(2e1, 2e1), height=0.0, center_zero=True) create_prim_from_mesh("/World/defaultGroundPlane", mesh) # load stage stage_utils.update_stage() def tearDown(self): """Stops simulator after each test.""" # close all the opened viewport from before. rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( self.sim._timeline.stop() # clear the stage self.sim.clear() self.sim.clear_instance() """ Tests """ def test_camera_init(self): """Test camera initialization.""" # Create camera camera = RayCasterCamera(cfg=self.camera_cfg) # Play sim self.sim.reset() # Check if camera is initialized self.assertTrue(camera._is_initialized) # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Check buffers that exists and have correct shapes self.assertTrue(camera.data.pos_w.shape == (1, 3)) self.assertTrue(camera.data.quat_w_ros.shape == (1, 4)) self.assertTrue(camera.data.quat_w_world.shape == (1, 4)) self.assertTrue(camera.data.quat_w_opengl.shape == (1, 4)) self.assertTrue(camera.data.intrinsic_matrices.shape == (1, 3, 3)) self.assertTrue( camera.data.image_shape == (self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width) ) self.assertTrue(camera.data.info == [{self.camera_cfg.data_types[0]: None}]) # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update camera camera.update(self.dt) # check image data for im_data in camera.data.output.to_dict().values(): self.assertTrue( im_data.shape == (1, self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width) ) def test_camera_resolution(self): """Test camera resolution is correctly set.""" # Create camera camera = RayCasterCamera(cfg=self.camera_cfg) # Play sim self.sim.reset() # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() camera.update(self.dt) # access image data and compare shapes for im_data in camera.data.output.to_dict().values(): self.assertTrue(im_data.shape == (1, self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width)) def test_camera_init_offset(self): """Test camera initialization with offset using different conventions.""" # define the same offset in all conventions # -- ROS convention cam_cfg_offset_ros = copy.deepcopy(self.camera_cfg) cam_cfg_offset_ros.offset = RayCasterCameraCfg.OffsetCfg( pos=POSITION, rot=QUAT_ROS, convention="ros", ) prim_utils.create_prim("/World/CameraOffsetRos", "Xform") cam_cfg_offset_ros.prim_path = "/World/CameraOffsetRos" camera_ros = RayCasterCamera(cam_cfg_offset_ros) # -- OpenGL convention cam_cfg_offset_opengl = copy.deepcopy(self.camera_cfg) cam_cfg_offset_opengl.offset = RayCasterCameraCfg.OffsetCfg( pos=POSITION, rot=QUAT_OPENGL, convention="opengl", ) prim_utils.create_prim("/World/CameraOffsetOpengl", "Xform") cam_cfg_offset_opengl.prim_path = "/World/CameraOffsetOpengl" camera_opengl = RayCasterCamera(cam_cfg_offset_opengl) # -- World convention cam_cfg_offset_world = copy.deepcopy(self.camera_cfg) cam_cfg_offset_world.offset = RayCasterCameraCfg.OffsetCfg( pos=POSITION, rot=QUAT_WORLD, convention="world", ) prim_utils.create_prim("/World/CameraOffsetWorld", "Xform") cam_cfg_offset_world.prim_path = "/World/CameraOffsetWorld" camera_world = RayCasterCamera(cam_cfg_offset_world) # play sim self.sim.reset() # update cameras camera_world.update(self.dt) camera_opengl.update(self.dt) camera_ros.update(self.dt) # check that all transforms are set correctly np.testing.assert_allclose(camera_ros.data.pos_w[0].numpy(), cam_cfg_offset_ros.offset.pos) np.testing.assert_allclose(camera_opengl.data.pos_w[0].numpy(), cam_cfg_offset_opengl.offset.pos) np.testing.assert_allclose(camera_world.data.pos_w[0].numpy(), cam_cfg_offset_world.offset.pos) # check if transform correctly set in output np.testing.assert_allclose(camera_ros.data.pos_w[0], cam_cfg_offset_ros.offset.pos, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_ros[0], QUAT_ROS, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_opengl[0], QUAT_OPENGL, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_world[0], QUAT_WORLD, rtol=1e-5) def test_multi_camera_init(self): """Test multi-camera initialization.""" # create two cameras with different prim paths # -- camera 1 cam_cfg_1 = copy.deepcopy(self.camera_cfg) cam_cfg_1.prim_path = "/World/Camera_1" prim_utils.create_prim("/World/Camera_1", "Xform") # Create camera cam_1 = RayCasterCamera(cam_cfg_1) # -- camera 2 cam_cfg_2 = copy.deepcopy(self.camera_cfg) cam_cfg_2.prim_path = "/World/Camera_2" prim_utils.create_prim("/World/Camera_2", "Xform") cam_2 = RayCasterCamera(cam_cfg_2) # check that the loaded meshes are equal self.assertTrue(cam_1.meshes == cam_2.meshes) # play sim self.sim.reset() # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update camera cam_1.update(self.dt) cam_2.update(self.dt) # check image data for cam in [cam_1, cam_2]: for im_data in cam.data.output.to_dict().values(): self.assertTrue( im_data.shape == (1, self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width) ) def test_camera_set_world_poses(self): """Test camera function to set specific world pose.""" camera = RayCasterCamera(self.camera_cfg) # play sim self.sim.reset() # set new pose camera.set_world_poses(torch.tensor([POSITION]), torch.tensor([QUAT_WORLD]), convention="world") np.testing.assert_allclose(camera.data.pos_w, [POSITION], rtol=1e-5) np.testing.assert_allclose(camera.data.quat_w_world, [QUAT_WORLD], rtol=1e-5) def test_camera_set_world_poses_from_view(self): """Test camera function to set specific world pose from view.""" camera = RayCasterCamera(self.camera_cfg) # play sim self.sim.reset() # set new pose camera.set_world_poses_from_view(torch.tensor([POSITION]), torch.tensor([[0.0, 0.0, 0.0]])) np.testing.assert_allclose(camera.data.pos_w, [POSITION], rtol=1e-5) np.testing.assert_allclose(camera.data.quat_w_ros, [QUAT_ROS], rtol=1e-5) def test_intrinsic_matrix(self): """Checks that the camera's set and retrieve methods work for intrinsic matrix.""" camera_cfg = copy.deepcopy(self.camera_cfg) camera_cfg.pattern_cfg.height = 240 camera_cfg.pattern_cfg.width = 320 camera = RayCasterCamera(camera_cfg) # play sim self.sim.reset() # Desired properties (obtained from realsense camera at 320x240 resolution) rs_intrinsic_matrix = [229.31640625, 0.0, 164.810546875, 0.0, 229.826171875, 122.1650390625, 0.0, 0.0, 1.0] rs_intrinsic_matrix = torch.tensor(rs_intrinsic_matrix).reshape(3, 3).unsqueeze(0) # Set matrix into simulator camera.set_intrinsic_matrices(rs_intrinsic_matrix) # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update camera camera.update(self.dt) # Check that matrix is correct K = camera.data.intrinsic_matrices[0].numpy() # TODO: This is not correctly setting all values in the matrix since the # vertical aperture and aperture offsets are not being set correctly # This is a bug in the simulator. self.assertAlmostEqual(rs_intrinsic_matrix[0, 0, 0].numpy(), K[0, 0], 4) # self.assertAlmostEqual(rs_intrinsic_matrix[1, 1], K[1, 1], 4) def test_throughput(self): """Checks that the single camera gets created properly with a rig.""" # Create directory temp dir to dump the results file_dir = os.path.dirname(os.path.realpath(__file__)) temp_dir = os.path.join(file_dir, "output", "camera", "throughput") os.makedirs(temp_dir, exist_ok=True) # Create replicator writer rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3) # create camera camera_cfg = copy.deepcopy(self.camera_cfg) camera_cfg.pattern_cfg.height = 480 camera_cfg.pattern_cfg.width = 640 camera = RayCasterCamera(camera_cfg) # Play simulator self.sim.reset() # Set camera pose camera.set_world_poses_from_view(torch.tensor([[2.5, 2.5, 2.5]]), torch.tensor([[0.0, 0.0, 0.0]])) # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): self.sim.step() # Simulate physics for _ in range(5): # perform rendering self.sim.step() # update camera with Timer(f"Time taken for updating camera with shape {camera.image_shape}"): camera.update(self.dt) # Save images with Timer(f"Time taken for writing data with shape {camera.image_shape} "): # Pack data back into replicator format to save them using its writer rep_output = dict() camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy") for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()): if info is not None: rep_output[key] = {"data": data, "info": info} else: rep_output[key] = data # Save images rep_output["trigger_outputs"] = {"on_time": camera.frame[0]} rep_writer.write(rep_output) print("----------------------------------------") # Check image data for im_data in camera.data.output.values(): self.assertTrue(im_data.shape == (1, camera_cfg.pattern_cfg.height, camera_cfg.pattern_cfg.width)) def test_output_equal_to_usdcamera(self): camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, horizontal_aperture=20.955, height=240, width=320, ) prim_utils.create_prim("/World/Camera_warp", "Xform") camera_cfg_warp = RayCasterCameraCfg( prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], ) camera_warp = RayCasterCamera(camera_cfg_warp) # create usd camera camera_cfg_usd = CameraCfg( height=240, width=320, prim_path="/World/Camera_usd", update_period=0, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], spawn=PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-4, 1.0e5) ), ) camera_usd = Camera(camera_cfg_usd) # play sim self.sim.reset() self.sim.play() # set views camera_warp.set_world_poses_from_view(torch.tensor([[2.5, 2.5, 4.5]]), torch.tensor([[0.0, 0.0, 0.0]])) camera_usd.set_world_poses_from_view(torch.tensor([[2.5, 2.5, 4.5]]), torch.tensor([[0.0, 0.0, 0.0]])) # perform steps for _ in range(5): self.sim.step() # update camera camera_usd.update(self.dt) camera_warp.update(self.dt) # check image data np.testing.assert_allclose( camera_usd.data.output["distance_to_image_plane"].numpy(), camera_warp.data.output["distance_to_image_plane"].numpy(), rtol=5e-3, ) np.testing.assert_allclose( camera_usd.data.output["distance_to_camera"].numpy(), camera_warp.data.output["distance_to_camera"].numpy(), rtol=5e-3, ) np.testing.assert_allclose( camera_usd.data.output["normals"].numpy()[..., :3], camera_warp.data.output["normals"].numpy(), rtol=1e-5, atol=1e-4, ) def test_output_equal_to_usdcamera_offset(self): offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020] camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, horizontal_aperture=20.955, height=240, width=320, ) prim_utils.create_prim("/World/Camera_warp", "Xform") camera_cfg_warp = RayCasterCameraCfg( prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, offset=RayCasterCameraCfg.OffsetCfg(pos=(2.5, 2.5, 4.0), rot=offset_rot, convention="ros"), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], ) camera_warp = RayCasterCamera(camera_cfg_warp) # create usd camera camera_cfg_usd = CameraCfg( height=240, width=320, prim_path="/World/Camera_usd", update_period=0, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], spawn=PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-6, 1.0e5) ), offset=CameraCfg.OffsetCfg(pos=(2.5, 2.5, 4.0), rot=offset_rot, convention="ros"), ) camera_usd = Camera(camera_cfg_usd) # play sim self.sim.reset() self.sim.play() # perform steps for _ in range(5): self.sim.step() # update camera camera_usd.update(self.dt) camera_warp.update(self.dt) # check image data np.testing.assert_allclose( camera_usd.data.output["distance_to_image_plane"].numpy(), camera_warp.data.output["distance_to_image_plane"].numpy(), rtol=5e-3, ) np.testing.assert_allclose( camera_usd.data.output["distance_to_camera"].numpy(), camera_warp.data.output["distance_to_camera"].numpy(), rtol=5e-3, ) np.testing.assert_allclose( camera_usd.data.output["normals"].numpy()[..., :3], camera_warp.data.output["normals"].numpy(), rtol=1e-5, atol=1e-4, ) def test_output_equal_to_usdcamera_prim_offset(self): """Test that the output of the ray caster camera is equal to the output of the usd camera when both are placed under an XForm prim that is translated and rotated from the world origin .""" offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020] # gf quat gf_quatf = Gf.Quatd() gf_quatf.SetReal(QUAT_OPENGL[0]) gf_quatf.SetImaginary(tuple(QUAT_OPENGL[1:])) camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, horizontal_aperture=20.955, height=240, width=320, ) prim_raycast_cam = prim_utils.create_prim("/World/Camera_warp", "Xform") prim_raycast_cam.GetAttribute("xformOp:translate").Set(tuple(POSITION)) prim_raycast_cam.GetAttribute("xformOp:orient").Set(gf_quatf) camera_cfg_warp = RayCasterCameraCfg( prim_path="/World/Camera_warp", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, offset=RayCasterCameraCfg.OffsetCfg(pos=(0, 0, 2.0), rot=offset_rot, convention="ros"), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], ) camera_warp = RayCasterCamera(camera_cfg_warp) # create usd camera camera_cfg_usd = CameraCfg( height=240, width=320, prim_path="/World/Camera_usd/camera", update_period=0, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], spawn=PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-6, 1.0e5) ), offset=CameraCfg.OffsetCfg(pos=(0, 0, 2.0), rot=offset_rot, convention="ros"), ) prim_usd = prim_utils.create_prim("/World/Camera_usd", "Xform") prim_usd.GetAttribute("xformOp:translate").Set(tuple(POSITION)) prim_usd.GetAttribute("xformOp:orient").Set(gf_quatf) camera_usd = Camera(camera_cfg_usd) # play sim self.sim.reset() self.sim.play() # perform steps for _ in range(5): self.sim.step() # update camera camera_usd.update(self.dt) camera_warp.update(self.dt) # check if pos and orientation are correct np.testing.assert_allclose(camera_warp.data.pos_w[0].numpy(), camera_usd.data.pos_w[0].numpy(), rtol=1e-5) np.testing.assert_allclose( camera_warp.data.quat_w_ros[0].numpy(), camera_usd.data.quat_w_ros[0].numpy(), rtol=1e-5 ) # check image data np.testing.assert_allclose( camera_usd.data.output["distance_to_image_plane"].numpy(), camera_warp.data.output["distance_to_image_plane"].numpy(), rtol=5e-3, ) np.testing.assert_allclose( camera_usd.data.output["distance_to_camera"].numpy(), camera_warp.data.output["distance_to_camera"].numpy(), rtol=5e-3, ) np.testing.assert_allclose( camera_usd.data.output["normals"].numpy()[..., :3], camera_warp.data.output["normals"].numpy(), rtol=1e-5, atol=1e-4, ) if __name__ == "__main__": run_tests()
23,013
Python
39.660777
120
0.595272
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/envs/check_base_env_floating_cube.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates the base environment concept that combines a scene with an action, observation and event manager for a floating cube. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the concept of an Environment.") parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.envs.mdp as mdp import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBaseCfg, RigidObject, RigidObjectCfg from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers.action_manager import ActionTerm, ActionTermCfg from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration.""" # add terrain terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane", debug_vis=False) # add cube cube: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/cube", spawn=sim_utils.CuboidCfg( size=(0.2, 0.2, 0.2), rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), physics_material=sim_utils.RigidBodyMaterialCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5)), ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # Action Term ## class CubeActionTerm(ActionTerm): """Simple action term that implements a PD controller to track a target position.""" _asset: RigidObject """The articulation asset on which the action term is applied.""" def __init__(self, cfg: ActionTermCfg, env: BaseEnv): # call super constructor super().__init__(cfg, env) # create buffers self._raw_actions = torch.zeros(env.num_envs, 3, device=self.device) self._processed_actions = torch.zeros(env.num_envs, 3, device=self.device) self._vel_command = torch.zeros(self.num_envs, 6, device=self.device) # gains of controller self.p_gain = 5.0 self.d_gain = 0.5 """ Properties. """ @property def action_dim(self) -> int: return self._raw_actions.shape[1] @property def raw_actions(self) -> torch.Tensor: # desired: (x, y, z) return self._raw_actions @property def processed_actions(self) -> torch.Tensor: return self._processed_actions """ Operations """ def process_actions(self, actions: torch.Tensor): # store the raw actions self._raw_actions[:] = actions # no-processing of actions self._processed_actions[:] = self._raw_actions[:] def apply_actions(self): # implement a PD controller to track the target position pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins) vel_error = -self._asset.data.root_lin_vel_w # set velocity targets self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error self._asset.write_root_velocity_to_sim(self._vel_command) @configclass class CubeActionTermCfg(ActionTermCfg): """Configuration for the cube action term.""" class_type: type = CubeActionTerm ## # Observation Term ## def base_position(env: BaseEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Root linear velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] return asset.data.root_pos_w - env.scene.env_origins ## # Environment settings ## @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = CubeActionTermCfg(asset_name="cube") @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # cube velocity position = ObsTerm(func=base_position, params={"asset_cfg": SceneEntityCfg("cube")}) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (-0.5, 0.5), "y": (-0.5, 0.5), "z": (-0.5, 0.5), }, "asset_cfg": SceneEntityCfg("cube"), }, ) ## # Environment configuration ## @configclass class CubeEnvCfg(BaseEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5, replicate_physics=True) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 # simulation settings self.sim.dt = 0.01 self.sim.physics_material = self.scene.terrain.physics_material def main(): """Main function.""" # setup base environment env = BaseEnv(cfg=CubeEnvCfg()) # setup target position commands target_position = torch.rand(env.num_envs, 3, device=env.device) * 2 target_position[:, 2] += 2.0 # offset all targets so that they move to the world origin target_position -= env.scene.env_origins # simulate physics count = 0 while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 300 == 0: env.reset() count = 0 # step env obs, _ = env.step(target_position) # print mean squared position error between target and current position error = torch.norm(obs["policy"] - target_position).mean().item() print(f"[Step: {count:04d}]: Mean position error: {error:.4f}") # update counter count += 1 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,663
Python
27.490706
114
0.650268
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/envs/test_null_command_term.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import unittest from collections import namedtuple from omni.isaac.orbit.envs.mdp import NullCommandCfg class TestNullCommandTerm(unittest.TestCase): """Test cases for null command generator.""" def setUp(self) -> None: self.env = namedtuple("RLTaskEnv", ["num_envs", "dt", "device"])(20, 0.1, "cpu") def test_str(self): """Test the string representation of the command manager.""" cfg = NullCommandCfg() command_term = cfg.class_type(cfg, self.env) # print the expected string print() print(command_term) def test_compute(self): """Test the compute function. For null command generator, it does nothing.""" cfg = NullCommandCfg() command_term = cfg.class_type(cfg, self.env) # test the reset function command_term.reset() # test the compute function command_term.compute(dt=self.env.dt) # expect error with self.assertRaises(RuntimeError): command_term.command if __name__ == "__main__": run_tests()
1,459
Python
26.037037
88
0.660041
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/envs/check_base_env_anymal_locomotion.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates the environment concept that combines a scene with an action, observation and event manager for a quadruped robot. A locomotion policy is loaded and used to control the robot. This shows how to use the environment with a policy. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the concept of an Environment.") parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import os import torch import omni.isaac.orbit.envs.mdp as mdp import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import RayCasterCfg, patterns from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR, check_file_path, read_file from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise ## # Pre-defined configs ## from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration.""" # add terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=ROUGH_TERRAINS_CFG, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", static_friction=1.0, dynamic_friction=1.0, ), debug_vis=False, ) # add robot robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # sensors height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=True, mesh_prim_paths=["/World/ground"], ) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) ## # MDP settings ## def constant_commands(env: BaseEnv) -> torch.Tensor: """The generated command from the command generator.""" return torch.tensor([[1, 0, 0]], device=env.device).repeat(env.num_envs, 1) @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) projected_gravity = ObsTerm( func=mdp.projected_gravity, noise=Unoise(n_min=-0.05, n_max=0.05), ) velocity_commands = ObsTerm(func=constant_commands) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) actions = ObsTerm(func=mdp.last_action) height_scan = ObsTerm( func=mdp.height_scan, params={"sensor_cfg": SceneEntityCfg("height_scanner")}, noise=Unoise(n_min=-0.1, n_max=0.1), clip=(-1.0, 1.0), ) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" reset_base = EventTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (-0.5, 0.5), "y": (-0.5, 0.5), "z": (-0.5, 0.5), "roll": (-0.5, 0.5), "pitch": (-0.5, 0.5), "yaw": (-0.5, 0.5), }, }, ) ## # Environment configuration ## @configclass class QuadrupedEnvCfg(BaseEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5, replicate_physics=True) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() events: EventCfg = EventCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 0.005 # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: self.scene.height_scanner.update_period = self.decimation * self.sim.dt def main(): """Main function.""" # setup base environment env = BaseEnv(cfg=QuadrupedEnvCfg()) obs, _ = env.reset() # load level policy policy_path = os.path.join(ISAAC_ORBIT_NUCLEUS_DIR, "Policies", "ANYmal-C", "policy.pt") # check if policy file exists if not check_file_path(policy_path): raise FileNotFoundError(f"Policy file '{policy_path}' does not exist.") file_bytes = read_file(policy_path) # jit load the policy locomotion_policy = torch.jit.load(file_bytes) locomotion_policy.to(env.device) locomotion_policy.eval() # simulate physics count = 0 while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 1000 == 0: obs, _ = env.reset() count = 0 print("[INFO]: Resetting robots state...") # infer action action = locomotion_policy(obs["policy"]) # step env obs, _ = env.step(action) # update counter count += 1 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,531
Python
29.248996
118
0.644536
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/managers/test_observation_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import torch import unittest from collections import namedtuple from omni.isaac.orbit.managers import ManagerTermBase, ObservationGroupCfg, ObservationManager, ObservationTermCfg from omni.isaac.orbit.utils import configclass def grilled_chicken(env): return torch.ones(env.num_envs, 4, device=env.device) def grilled_chicken_with_bbq(env, bbq: bool): return bbq * torch.ones(env.num_envs, 1, device=env.device) def grilled_chicken_with_curry(env, hot: bool): return hot * 2 * torch.ones(env.num_envs, 1, device=env.device) def grilled_chicken_with_yoghurt(env, hot: bool, bland: float): return hot * bland * torch.ones(env.num_envs, 5, device=env.device) def grilled_chicken_with_yoghurt_and_bbq(env, hot: bool, bland: float, bbq: bool = False): return hot * bland * bbq * torch.ones(env.num_envs, 3, device=env.device) class complex_function_class(ManagerTermBase): def __init__(self, cfg: ObservationTermCfg, env: object): self.cfg = cfg self.env = env # define some variables self._time_passed = torch.zeros(env.num_envs, device=env.device) def reset(self, env_ids: torch.Tensor | None = None): if env_ids is None: env_ids = slice(None) self._time_passed[env_ids] = 0.0 def __call__(self, env: object, interval: float) -> torch.Tensor: self._time_passed += interval return self._time_passed.clone().unsqueeze(-1) class non_callable_complex_function_class(ManagerTermBase): def __init__(self, cfg: ObservationTermCfg, env: object): self.cfg = cfg self.env = env # define some variables self._cost = 2 * self.env.num_envs def call_me(self, env: object) -> torch.Tensor: return torch.ones(env.num_envs, 2, device=env.device) * self._cost class MyDataClass: def __init__(self, num_envs: int, device: str): self.pos_w = torch.rand((num_envs, 3), device=device) self.lin_vel_w = torch.rand((num_envs, 3), device=device) def pos_w_data(env) -> torch.Tensor: return env.data.pos_w def lin_vel_w_data(env) -> torch.Tensor: return env.data.lin_vel_w class TestObservationManager(unittest.TestCase): """Test cases for various situations with observation manager.""" def setUp(self) -> None: # set up the environment self.num_envs = 20 self.device = "cuda:0" # create dummy environment self.env = namedtuple("BaseEnv", ["num_envs", "device", "data"])( self.num_envs, self.device, MyDataClass(self.num_envs, self.device) ) def test_str(self): """Test the string representation of the observation manager.""" @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class SampleGroupCfg(ObservationGroupCfg): """Test config class for policy observation group.""" term_1 = ObservationTermCfg(func="__main__:grilled_chicken", scale=10) term_2 = ObservationTermCfg(func=grilled_chicken, scale=2) term_3 = ObservationTermCfg(func=grilled_chicken_with_bbq, scale=5, params={"bbq": True}) term_4 = ObservationTermCfg( func=grilled_chicken_with_yoghurt, scale=1.0, params={"hot": False, "bland": 2.0} ) term_5 = ObservationTermCfg( func=grilled_chicken_with_yoghurt_and_bbq, scale=1.0, params={"hot": False, "bland": 2.0} ) policy: ObservationGroupCfg = SampleGroupCfg() # create observation manager cfg = MyObservationManagerCfg() self.obs_man = ObservationManager(cfg, self.env) self.assertEqual(len(self.obs_man.active_terms["policy"]), 5) # print the expected string print() print(self.obs_man) def test_config_equivalence(self): """Test the equivalence of observation manager created from different config types.""" # create from config class @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class SampleGroupCfg(ObservationGroupCfg): """Test config class for policy observation group.""" your_term = ObservationTermCfg(func="__main__:grilled_chicken", scale=10) his_term = ObservationTermCfg(func=grilled_chicken, scale=2) my_term = ObservationTermCfg(func=grilled_chicken_with_bbq, scale=5, params={"bbq": True}) her_term = ObservationTermCfg( func=grilled_chicken_with_yoghurt, scale=1.0, params={"hot": False, "bland": 2.0} ) policy = SampleGroupCfg() critic = SampleGroupCfg(concatenate_terms=False, her_term=None) cfg = MyObservationManagerCfg() obs_man_from_cfg = ObservationManager(cfg, self.env) # create from config class @configclass class MyObservationManagerAnnotatedCfg: """Test config class for observation manager with annotations on terms.""" @configclass class SampleGroupCfg(ObservationGroupCfg): """Test config class for policy observation group.""" your_term: ObservationTermCfg = ObservationTermCfg(func="__main__:grilled_chicken", scale=10) his_term: ObservationTermCfg = ObservationTermCfg(func=grilled_chicken, scale=2) my_term: ObservationTermCfg = ObservationTermCfg( func=grilled_chicken_with_bbq, scale=5, params={"bbq": True} ) her_term: ObservationTermCfg = ObservationTermCfg( func=grilled_chicken_with_yoghurt, scale=1.0, params={"hot": False, "bland": 2.0} ) policy: ObservationGroupCfg = SampleGroupCfg() critic: ObservationGroupCfg = SampleGroupCfg(concatenate_terms=False, her_term=None) cfg = MyObservationManagerAnnotatedCfg() obs_man_from_annotated_cfg = ObservationManager(cfg, self.env) # check equivalence # parsed terms self.assertEqual(obs_man_from_cfg.active_terms, obs_man_from_annotated_cfg.active_terms) self.assertEqual(obs_man_from_cfg.group_obs_term_dim, obs_man_from_annotated_cfg.group_obs_term_dim) self.assertEqual(obs_man_from_cfg.group_obs_dim, obs_man_from_annotated_cfg.group_obs_dim) # parsed term configs self.assertEqual(obs_man_from_cfg._group_obs_term_cfgs, obs_man_from_annotated_cfg._group_obs_term_cfgs) self.assertEqual(obs_man_from_cfg._group_obs_concatenate, obs_man_from_annotated_cfg._group_obs_concatenate) def test_config_terms(self): """Test the number of terms in the observation manager.""" @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class SampleGroupCfg(ObservationGroupCfg): """Test config class for policy observation group.""" term_1 = ObservationTermCfg(func=grilled_chicken, scale=10) term_2 = ObservationTermCfg(func=grilled_chicken_with_curry, scale=0.0, params={"hot": False}) policy: ObservationGroupCfg = SampleGroupCfg() critic: ObservationGroupCfg = SampleGroupCfg(term_2=None) # create observation manager cfg = MyObservationManagerCfg() self.obs_man = ObservationManager(cfg, self.env) self.assertEqual(len(self.obs_man.active_terms["policy"]), 2) self.assertEqual(len(self.obs_man.active_terms["critic"]), 1) def test_compute(self): """Test the observation computation.""" @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class PolicyCfg(ObservationGroupCfg): """Test config class for policy observation group.""" term_1 = ObservationTermCfg(func=grilled_chicken, scale=10) term_2 = ObservationTermCfg(func=grilled_chicken_with_curry, scale=0.0, params={"hot": False}) term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0) term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5) @configclass class CriticCfg(ObservationGroupCfg): term_1 = ObservationTermCfg(func=pos_w_data, scale=2.0) term_2 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5) term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0) term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5) policy: ObservationGroupCfg = PolicyCfg() critic: ObservationGroupCfg = CriticCfg() # create observation manager cfg = MyObservationManagerCfg() self.obs_man = ObservationManager(cfg, self.env) # compute observation using manager observations = self.obs_man.compute() # obtain the group observations obs_policy: torch.Tensor = observations["policy"] obs_critic: torch.Tensor = observations["critic"] # check the observation shape self.assertEqual((self.env.num_envs, 11), obs_policy.shape) self.assertEqual((self.env.num_envs, 12), obs_critic.shape) # make sure that the data are the same for same terms # -- within group torch.testing.assert_close(obs_critic[:, 0:3], obs_critic[:, 6:9]) torch.testing.assert_close(obs_critic[:, 3:6], obs_critic[:, 9:12]) # -- between groups torch.testing.assert_close(obs_policy[:, 5:8], obs_critic[:, 0:3]) torch.testing.assert_close(obs_policy[:, 8:11], obs_critic[:, 3:6]) def test_invalid_observation_config(self): """Test the invalid observation config.""" @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class PolicyCfg(ObservationGroupCfg): """Test config class for policy observation group.""" term_1 = ObservationTermCfg(func=grilled_chicken_with_bbq, scale=0.1, params={"hot": False}) term_2 = ObservationTermCfg(func=grilled_chicken_with_yoghurt, scale=2.0, params={"hot": False}) policy: ObservationGroupCfg = PolicyCfg() # create observation manager cfg = MyObservationManagerCfg() # check the invalid config with self.assertRaises(ValueError): self.obs_man = ObservationManager(cfg, self.env) def test_callable_class_term(self): """Test the observation computation with callable class term.""" @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class PolicyCfg(ObservationGroupCfg): """Test config class for policy observation group.""" term_1 = ObservationTermCfg(func=grilled_chicken, scale=10) term_2 = ObservationTermCfg(func=complex_function_class, scale=0.2, params={"interval": 0.5}) policy: ObservationGroupCfg = PolicyCfg() # create observation manager cfg = MyObservationManagerCfg() self.obs_man = ObservationManager(cfg, self.env) # compute observation using manager observations = self.obs_man.compute() # check the observation self.assertEqual((self.env.num_envs, 5), observations["policy"].shape) self.assertAlmostEqual(observations["policy"][0, -1].item(), 0.2 * 0.5) # check memory in term num_exec_count = 10 for _ in range(num_exec_count): observations = self.obs_man.compute() self.assertAlmostEqual(observations["policy"][0, -1].item(), 0.2 * 0.5 * (num_exec_count + 1)) # check reset works self.obs_man.reset(env_ids=[0, 4, 9, 14, 19]) observations = self.obs_man.compute() self.assertAlmostEqual(observations["policy"][0, -1].item(), 0.2 * 0.5) self.assertAlmostEqual(observations["policy"][1, -1].item(), 0.2 * 0.5 * (num_exec_count + 2)) def test_non_callable_class_term(self): """Test the observation computation with non-callable class term.""" @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @configclass class PolicyCfg(ObservationGroupCfg): """Test config class for policy observation group.""" term_1 = ObservationTermCfg(func=grilled_chicken, scale=10) term_2 = ObservationTermCfg(func=non_callable_complex_function_class, scale=0.2) policy: ObservationGroupCfg = PolicyCfg() # create observation manager config cfg = MyObservationManagerCfg() # create observation manager with self.assertRaises(NotImplementedError): self.obs_man = ObservationManager(cfg, self.env) if __name__ == "__main__": run_tests()
13,651
Python
38.686046
116
0.631456
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/managers/test_reward_manager.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest from collections import namedtuple from omni.isaac.orbit.managers import RewardManager, RewardTermCfg from omni.isaac.orbit.utils import configclass def grilled_chicken(env): return 1 def grilled_chicken_with_bbq(env, bbq: bool): return 0 def grilled_chicken_with_curry(env, hot: bool): return 0 def grilled_chicken_with_yoghurt(env, hot: bool, bland: float): return 0 class TestRewardManager(unittest.TestCase): """Test cases for various situations with reward manager.""" def setUp(self) -> None: self.env = namedtuple("RLTaskEnv", ["num_envs", "dt", "device"])(20, 0.1, "cpu") def test_str(self): """Test the string representation of the reward manager.""" cfg = { "term_1": RewardTermCfg(func=grilled_chicken, weight=10), "term_2": RewardTermCfg(func=grilled_chicken_with_bbq, weight=5, params={"bbq": True}), "term_3": RewardTermCfg( func=grilled_chicken_with_yoghurt, weight=1.0, params={"hot": False, "bland": 2.0}, ), } self.rew_man = RewardManager(cfg, self.env) self.assertEqual(len(self.rew_man.active_terms), 3) # print the expected string print() print(self.rew_man) def test_config_equivalence(self): """Test the equivalence of reward manager created from different config types.""" # create from dictionary cfg = { "my_term": RewardTermCfg(func=grilled_chicken, weight=10), "your_term": RewardTermCfg(func=grilled_chicken_with_bbq, weight=2.0, params={"bbq": True}), "his_term": RewardTermCfg( func=grilled_chicken_with_yoghurt, weight=1.0, params={"hot": False, "bland": 2.0}, ), } rew_man_from_dict = RewardManager(cfg, self.env) # create from config class @configclass class MyRewardManagerCfg: """Reward manager config with no type annotations.""" my_term = RewardTermCfg(func=grilled_chicken, weight=10.0) your_term = RewardTermCfg(func=grilled_chicken_with_bbq, weight=2.0, params={"bbq": True}) his_term = RewardTermCfg(func=grilled_chicken_with_yoghurt, weight=1.0, params={"hot": False, "bland": 2.0}) cfg = MyRewardManagerCfg() rew_man_from_cfg = RewardManager(cfg, self.env) # create from config class @configclass class MyRewardManagerAnnotatedCfg: """Reward manager config with type annotations.""" my_term: RewardTermCfg = RewardTermCfg(func=grilled_chicken, weight=10.0) your_term: RewardTermCfg = RewardTermCfg(func=grilled_chicken_with_bbq, weight=2.0, params={"bbq": True}) his_term: RewardTermCfg = RewardTermCfg( func=grilled_chicken_with_yoghurt, weight=1.0, params={"hot": False, "bland": 2.0} ) cfg = MyRewardManagerAnnotatedCfg() rew_man_from_annotated_cfg = RewardManager(cfg, self.env) # check equivalence # parsed terms self.assertEqual(rew_man_from_dict.active_terms, rew_man_from_annotated_cfg.active_terms) self.assertEqual(rew_man_from_cfg.active_terms, rew_man_from_annotated_cfg.active_terms) self.assertEqual(rew_man_from_dict.active_terms, rew_man_from_cfg.active_terms) # parsed term configs self.assertEqual(rew_man_from_dict._term_cfgs, rew_man_from_annotated_cfg._term_cfgs) self.assertEqual(rew_man_from_cfg._term_cfgs, rew_man_from_annotated_cfg._term_cfgs) self.assertEqual(rew_man_from_dict._term_cfgs, rew_man_from_cfg._term_cfgs) def test_compute(self): """Test the computation of reward.""" cfg = { "term_1": RewardTermCfg(func=grilled_chicken, weight=10), "term_2": RewardTermCfg(func=grilled_chicken_with_curry, weight=0.0, params={"hot": False}), } self.rew_man = RewardManager(cfg, self.env) # compute expected reward expected_reward = cfg["term_1"].weight * self.env.dt # compute reward using manager rewards = self.rew_man.compute(dt=self.env.dt) # check the reward for environment index 0 self.assertEqual(float(rewards[0]), expected_reward) self.assertEqual(tuple(rewards.shape), (self.env.num_envs,)) def test_active_terms(self): """Test the correct reading of active terms.""" cfg = { "term_1": RewardTermCfg(func=grilled_chicken, weight=10), "term_2": RewardTermCfg(func=grilled_chicken_with_bbq, weight=5, params={"bbq": True}), "term_3": RewardTermCfg(func=grilled_chicken_with_curry, weight=0.0, params={"hot": False}), } self.rew_man = RewardManager(cfg, self.env) self.assertEqual(len(self.rew_man.active_terms), 3) def test_missing_weight(self): """Test the missing of weight in the config.""" # TODO: The error should be raised during the config parsing, not during the reward manager creation. cfg = { "term_1": RewardTermCfg(func=grilled_chicken, weight=10), "term_2": RewardTermCfg(func=grilled_chicken_with_bbq, params={"bbq": True}), } with self.assertRaises(TypeError): self.rew_man = RewardManager(cfg, self.env) def test_invalid_reward_func_module(self): """Test the handling of invalid reward function's module in string representation.""" cfg = { "term_1": RewardTermCfg(func=grilled_chicken, weight=10), "term_2": RewardTermCfg(func=grilled_chicken_with_bbq, weight=5, params={"bbq": True}), "term_3": RewardTermCfg(func="a:grilled_chicken_with_no_bbq", weight=0.1, params={"hot": False}), } with self.assertRaises(ValueError): self.rew_man = RewardManager(cfg, self.env) def test_invalid_reward_config(self): """Test the handling of invalid reward function's config parameters.""" cfg = { "term_1": RewardTermCfg(func=grilled_chicken_with_bbq, weight=0.1, params={"hot": False}), "term_2": RewardTermCfg(func=grilled_chicken_with_yoghurt, weight=2.0, params={"hot": False}), } with self.assertRaises(ValueError): self.rew_man = RewardManager(cfg, self.env) if __name__ == "__main__": run_tests()
6,841
Python
39.011696
120
0.629732
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/deps/test_torch.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch import torch.utils.benchmark as benchmark import unittest from omni.isaac.orbit.app import run_tests class TestTorchOperations(unittest.TestCase): """Tests for assuring torch related operations used in Orbit.""" def test_array_slicing(self): """Check that using ellipsis and slices work for torch tensors.""" size = (400, 300, 5) my_tensor = torch.rand(size, device="cuda:0") self.assertEqual(my_tensor[..., 0].shape, (400, 300)) self.assertEqual(my_tensor[:, :, 0].shape, (400, 300)) self.assertEqual(my_tensor[slice(None), slice(None), 0].shape, (400, 300)) with self.assertRaises(IndexError): my_tensor[..., ..., 0] self.assertEqual(my_tensor[0, ...].shape, (300, 5)) self.assertEqual(my_tensor[0, :, :].shape, (300, 5)) self.assertEqual(my_tensor[0, slice(None), slice(None)].shape, (300, 5)) self.assertEqual(my_tensor[0, ..., ...].shape, (300, 5)) self.assertEqual(my_tensor[..., 0, 0].shape, (400,)) self.assertEqual(my_tensor[slice(None), 0, 0].shape, (400,)) self.assertEqual(my_tensor[:, 0, 0].shape, (400,)) def test_array_circular(self): """Check circular buffer implementation in torch.""" size = (10, 30, 5) my_tensor = torch.rand(size, device="cuda:0") # roll up the tensor without cloning my_tensor_1 = my_tensor.clone() my_tensor_1[:, 1:, :] = my_tensor_1[:, :-1, :] my_tensor_1[:, 0, :] = my_tensor[:, -1, :] # check that circular buffer works as expected error = torch.max(torch.abs(my_tensor_1 - my_tensor.roll(1, dims=1))) self.assertNotEqual(error.item(), 0.0) self.assertFalse(torch.allclose(my_tensor_1, my_tensor.roll(1, dims=1))) # roll up the tensor with cloning my_tensor_2 = my_tensor.clone() my_tensor_2[:, 1:, :] = my_tensor_2[:, :-1, :].clone() my_tensor_2[:, 0, :] = my_tensor[:, -1, :] # check that circular buffer works as expected error = torch.max(torch.abs(my_tensor_2 - my_tensor.roll(1, dims=1))) self.assertEqual(error.item(), 0.0) self.assertTrue(torch.allclose(my_tensor_2, my_tensor.roll(1, dims=1))) # roll up the tensor with detach operation my_tensor_3 = my_tensor.clone() my_tensor_3[:, 1:, :] = my_tensor_3[:, :-1, :].detach() my_tensor_3[:, 0, :] = my_tensor[:, -1, :] # check that circular buffer works as expected error = torch.max(torch.abs(my_tensor_3 - my_tensor.roll(1, dims=1))) self.assertNotEqual(error.item(), 0.0) self.assertFalse(torch.allclose(my_tensor_3, my_tensor.roll(1, dims=1))) # roll up the tensor with roll operation my_tensor_4 = my_tensor.clone() my_tensor_4 = my_tensor_4.roll(1, dims=1) my_tensor_4[:, 0, :] = my_tensor[:, -1, :] # check that circular buffer works as expected error = torch.max(torch.abs(my_tensor_4 - my_tensor.roll(1, dims=1))) self.assertEqual(error.item(), 0.0) self.assertTrue(torch.allclose(my_tensor_4, my_tensor.roll(1, dims=1))) def test_array_circular_copy(self): """Check that circular buffer implementation in torch is copying data.""" size = (10, 30, 5) my_tensor = torch.rand(size, device="cuda:0") my_tensor_clone = my_tensor.clone() # roll up the tensor my_tensor_1 = my_tensor.clone() my_tensor_1[:, 1:, :] = my_tensor_1[:, :-1, :].clone() my_tensor_1[:, 0, :] = my_tensor[:, -1, :] # change the source tensor my_tensor[:, 0, :] = 1000 # check that circular buffer works as expected self.assertFalse(torch.allclose(my_tensor_1, my_tensor.roll(1, dims=1))) self.assertTrue(torch.allclose(my_tensor_1, my_tensor_clone.roll(1, dims=1))) def test_array_multi_indexing(self): """Check multi-indexing works for torch tensors.""" size = (400, 300, 5) my_tensor = torch.rand(size, device="cuda:0") # this fails since array indexing cannot be broadcasted!! with self.assertRaises(IndexError): my_tensor[[0, 1, 2, 3], [0, 1, 2, 3, 4]] def test_array_single_indexing(self): """Check how indexing effects the returned tensor.""" size = (400, 300, 5) my_tensor = torch.rand(size, device="cuda:0") # obtain a slice of the tensor my_slice = my_tensor[0, ...] self.assertEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr()) # obtain a slice over ranges my_slice = my_tensor[0:2, ...] self.assertEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr()) # obtain a slice over list my_slice = my_tensor[[0, 1], ...] self.assertNotEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr()) # obtain a slice over tensor my_slice = my_tensor[torch.tensor([0, 1]), ...] self.assertNotEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr()) def test_logical_or(self): """Test bitwise or operation.""" size = (400, 300, 5) my_tensor_1 = torch.rand(size, device="cuda:0") > 0.5 my_tensor_2 = torch.rand(size, device="cuda:0") < 0.5 # check the speed of logical or timer_logical_or = benchmark.Timer( stmt="torch.logical_or(my_tensor_1, my_tensor_2)", globals={"my_tensor_1": my_tensor_1, "my_tensor_2": my_tensor_2}, ) timer_bitwise_or = benchmark.Timer( stmt="my_tensor_1 | my_tensor_2", globals={"my_tensor_1": my_tensor_1, "my_tensor_2": my_tensor_2} ) print("Time for logical or:", timer_logical_or.timeit(number=1000)) print("Time for bitwise or:", timer_bitwise_or.timeit(number=1000)) # check that logical or works as expected output_logical_or = torch.logical_or(my_tensor_1, my_tensor_2) output_bitwise_or = my_tensor_1 | my_tensor_2 self.assertTrue(torch.allclose(output_logical_or, output_bitwise_or)) if __name__ == "__main__": run_tests()
6,424
Python
39.923567
110
0.596357
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/deps/test_scipy.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations # isort: off import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # isort: on import numpy as np import scipy.interpolate as interpolate import unittest from omni.isaac.orbit.app import run_tests class TestScipyOperations(unittest.TestCase): """Tests for assuring scipy related operations used in Orbit.""" def test_interpolation(self): """Test scipy interpolation 2D method.""" # parameters size = (10.0, 12.0) horizontal_scale = 0.1 vertical_scale = 0.005 downsampled_scale = 0.2 noise_range = (-0.02, 0.1) noise_step = 0.02 # switch parameters to discrete units # -- horizontal scale width_pixels = int(size[0] / horizontal_scale) length_pixels = int(size[1] / horizontal_scale) # -- downsampled scale width_downsampled = int(size[0] / downsampled_scale) length_downsampled = int(size[1] / downsampled_scale) # -- height height_min = int(noise_range[0] / vertical_scale) height_max = int(noise_range[1] / vertical_scale) height_step = int(noise_step / vertical_scale) # create range of heights possible height_range = np.arange(height_min, height_max + height_step, height_step) # sample heights randomly from the range along a grid height_field_downsampled = np.random.choice(height_range, size=(width_downsampled, length_downsampled)) # create interpolation function for the sampled heights x = np.linspace(0, size[0] * horizontal_scale, width_downsampled) y = np.linspace(0, size[1] * horizontal_scale, length_downsampled) # interpolate the sampled heights to obtain the height field x_upsampled = np.linspace(0, size[0] * horizontal_scale, width_pixels) y_upsampled = np.linspace(0, size[1] * horizontal_scale, length_pixels) # -- method 1: interp2d (this will be deprecated in the future 1.12 release) func_interp2d = interpolate.interp2d(y, x, height_field_downsampled, kind="cubic") z_upsampled_interp2d = func_interp2d(y_upsampled, x_upsampled) # -- method 2: RectBivariateSpline (alternate to interp2d) func_RectBiVariate = interpolate.RectBivariateSpline(x, y, height_field_downsampled) z_upsampled_RectBivariant = func_RectBiVariate(x_upsampled, y_upsampled) # -- method 3: RegularGridInterpolator (recommended from scipy but slow!) # Ref: https://github.com/scipy/scipy/issues/18010 func_RegularGridInterpolator = interpolate.RegularGridInterpolator( (x, y), height_field_downsampled, method="cubic" ) xx_upsampled, yy_upsampled = np.meshgrid(x_upsampled, y_upsampled, indexing="ij", sparse=True) z_upsampled_RegularGridInterpolator = func_RegularGridInterpolator((xx_upsampled, yy_upsampled)) # check if the interpolated height field is the same as the sampled height field np.testing.assert_allclose(z_upsampled_interp2d, z_upsampled_RectBivariant, atol=1e-14) np.testing.assert_allclose(z_upsampled_RectBivariant, z_upsampled_RegularGridInterpolator, atol=1e-14) np.testing.assert_allclose(z_upsampled_RegularGridInterpolator, z_upsampled_interp2d, atol=1e-14) if __name__ == "__main__": run_tests()
3,498
Python
43.858974
111
0.684105
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows the issue with renderer in Isaac Sim that affects episodic resets. The first few images of every new episode are not updated. They take multiple steps to update and have the same image as the previous episode for the first few steps. ``` # run with cube _isaac_sim/python.sh source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_camera.py --scenario cube # run with anymal _isaac_sim/python.sh source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_camera.py --scenario anymal ``` """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse # omni-isaac-orbit from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser( description="This script shows the issue with renderer in Isaac Sim that affects episodic resets." ) parser.add_argument("--gpu", action="store_true", default=False, help="Use GPU device for camera rendering output.") parser.add_argument("--scenario", type=str, default="anymal", help="Scenario to load.", choices=["anymal", "cube"]) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import os import random import omni.isaac.core.utils.nucleus as nucleus_utils import omni.isaac.core.utils.prims as prim_utils import omni.replicator.core as rep from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.prims import GeometryPrim, RigidPrim, RigidPrimView from omni.isaac.core.utils.carb import set_carb_setting from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.core.world import World from PIL import Image, ImageChops from pxr import Gf, UsdGeom # check nucleus connection if nucleus_utils.get_assets_root_path() is None: msg = ( "Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n" "\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus" ) raise RuntimeError(msg) ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac" """Path to the `Isaac` directory on the NVIDIA Nucleus Server.""" def main(): """Runs a camera sensor from orbit.""" # Load kit helper world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cpu") # Set main camera set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Enable flatcache which avoids passing data over to USD structure # this speeds up the read-write operation of GPU buffers if world.get_physics_context().use_gpu_pipeline: world.get_physics_context().enable_flatcache(True) # Enable hydra scene-graph instancing # this is needed to visualize the scene when flatcache is enabled set_carb_setting(world._settings, "/persistent/omnihydra/useSceneGraphInstancing", True) # Populate scene # Ground world.scene.add_default_ground_plane() # Lights-1 prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0)) # Lights-2 prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0)) # Xform to hold objects if args_cli.scenario == "cube": prim_utils.create_prim("/World/Objects", "Xform") # Random objects for i in range(8): # sample random position position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0]) position *= np.asarray([1.5, 1.5, 0.5]) # create prim prim_type = random.choice(["Cube", "Sphere", "Cylinder"]) _ = prim_utils.create_prim( f"/World/Objects/Obj_{i:02d}", prim_type, translation=position, scale=(0.25, 0.25, 0.25), semantic_label=prim_type, ) # add rigid properties GeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True) rigid_obj = RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0) # cast to geom prim geom_prim = getattr(UsdGeom, prim_type)(rigid_obj.prim) # set random color color = Gf.Vec3f(random.random(), random.random(), random.random()) geom_prim.CreateDisplayColorAttr() geom_prim.GetDisplayColorAttr().Set([color]) # Setup camera sensor on the world cam_prim_path = "/World/CameraSensor" else: # Robot prim_utils.create_prim( "/World/Robot", usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd", translation=(0.0, 0.0, 0.6), ) # Setup camera sensor on the robot cam_prim_path = "/World/CameraSensor" # Create camera cam_prim = prim_utils.create_prim( cam_prim_path, prim_type="Camera", translation=(5.0, 5.0, 5.0), orientation=(0.33985113, 0.17591988, 0.42470818, 0.82047324), ) _ = UsdGeom.Camera(cam_prim) # Get render product render_prod_path = rep.create.render_product(cam_prim_path, resolution=(640, 480)) # create annotator node rep_registry = {} for name in ["rgb", "distance_to_image_plane"]: # create annotator rep_annotator = rep.AnnotatorRegistry.get_annotator(name, device="cpu") rep_annotator.attach(render_prod_path) # add to registry rep_registry[name] = rep_annotator # Create replicator writer output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera", args_cli.scenario) os.makedirs(output_dir, exist_ok=True) # Create a view of the stuff we want to see if args_cli.scenario == "cube": view: RigidPrimView = world.scene.add(RigidPrimView("/World/Objects/.*", name="my_object")) else: view: ArticulationView = world.scene.add(ArticulationView("/World/Robot", name="my_object")) # Play simulator world.reset() # Get initial state if args_cli.scenario == "cube": initial_pos, initial_quat = view.get_world_poses() initial_joint_pos = None initial_joint_vel = None else: initial_pos, initial_quat = view.get_world_poses() initial_joint_pos = view.get_joint_positions() initial_joint_vel = view.get_joint_velocities() # Simulate for a few steps # note: This is a workaround to ensure that the textures are loaded. # Check "Known Issues" section in the documentation for more details. for _ in range(5): world.step(render=True) # Counter count = 0 prev_im = None # make episode directory episode_count = 0 episode_dir = os.path.join(output_dir, f"episode_{episode_count:06d}") os.makedirs(episode_dir, exist_ok=True) # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if world.is_stopped(): break # If simulation is paused, then skip. if not world.is_playing(): world.step(render=False) continue # Reset on intervals if count % 25 == 0: # reset all the state view.set_world_poses(initial_pos, initial_quat) if initial_joint_pos is not None: view.set_joint_positions(initial_joint_pos) if initial_joint_vel is not None: view.set_joint_velocities(initial_joint_vel) # make a new episode directory episode_dir = os.path.join(output_dir, f"episode_{episode_count:06d}") os.makedirs(episode_dir, exist_ok=True) # reset counters count = 0 episode_count += 1 # Step simulation for _ in range(15): world.step(render=False) world.render() # Update camera data rgb_data = rep_registry["rgb"].get_data() depth_data = rep_registry["distance_to_image_plane"].get_data() # Show current image number print(f"[Epi {episode_count:03d}] Current image number: {count:06d}") # Save data curr_im = Image.fromarray(rgb_data) curr_im.save(os.path.join(episode_dir, f"{count:06d}_rgb.png")) # Save diff if prev_im is not None: diff_im = ImageChops.difference(curr_im, prev_im) # convert to grayscale and threshold diff_im = diff_im.convert("L") threshold = 30 diff_im = diff_im.point(lambda p: p > threshold and 255) # Save all of them together dst_im = Image.new("RGB", (curr_im.width + prev_im.width + diff_im.width, diff_im.height)) dst_im.paste(prev_im, (0, 0)) dst_im.paste(curr_im, (prev_im.width, 0)) dst_im.paste(diff_im, (2 * prev_im.width, 0)) dst_im.save(os.path.join(episode_dir, f"{count:06d}_diff.png")) # Save to previous prev_im = curr_im.copy() # Update counter count += 1 # Print camera info print("Received shape of rgb image: ", rgb_data.shape) print("Received shape of depth image: ", depth_data.shape) print("-------------------------------") if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
9,609
Python
36.98419
117
0.634093
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_legged_robot_clone.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the cloner API from Isaac Sim. Reference: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gym_cloner.html """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser( description="This script shows the issue in Isaac Sim with GPU simulation of floating robots." ) parser.add_argument("--num_robots", type=int, default=128, help="Number of robots to spawn.") parser.add_argument( "--asset", type=str, default="orbit", help="The asset source location for the robot. Can be: orbit, oige, custom asset path.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import os import torch import carb import omni.isaac.core.utils.nucleus as nucleus_utils import omni.isaac.core.utils.prims as prim_utils from omni.isaac.cloner import GridCloner from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.carb import set_carb_setting from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.core.world import World # check nucleus connection if nucleus_utils.get_assets_root_path() is None: msg = ( "Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n" "\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus" ) carb.log_error(msg) raise RuntimeError(msg) ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac" """Path to the `Isaac` directory on the NVIDIA Nucleus Server.""" ISAAC_ORBIT_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac/Samples/Orbit" """Path to the `Isaac/Samples/Orbit` directory on the NVIDIA Nucleus Server.""" """ Main """ def main(): """Spawns the ANYmal robot and clones it using Isaac Sim Cloner API.""" # Load kit helper world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cuda:0") # Set main camera set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Enable hydra scene-graph instancing # this is needed to visualize the scene when flatcache is enabled set_carb_setting(world._settings, "/persistent/omnihydra/useSceneGraphInstancing", True) # Create interface to clone the scene cloner = GridCloner(spacing=2.0) cloner.define_base_env("/World/envs") # Everything under the namespace "/World/envs/env_0" will be cloned prim_utils.define_prim("/World/envs/env_0") # Spawn things into stage # Ground-plane world.scene.add_default_ground_plane(prim_path="/World/defaultGroundPlane", z_position=0.0) # Lights-1 prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0)) # Lights-2 prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0)) # -- Robot # resolve asset if args_cli.asset == "orbit": usd_path = f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd" root_prim_path = "/World/envs/env_.*/Robot/base" elif args_cli.asset == "oige": usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd" root_prim_path = "/World/envs/env_.*/Robot" elif os.path.exists(args_cli.asset): usd_path = args_cli.asset else: raise ValueError(f"Invalid asset: {args_cli.asset}. Must be one of: orbit, oige.") # add asset print("Loading robot from: ", usd_path) prim_utils.create_prim( "/World/envs/env_0/Robot", usd_path=usd_path, translation=(0.0, 0.0, 0.6), ) # Clone the scene num_envs = args_cli.num_robots cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) envs_positions = cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True ) # convert environment positions to torch tensor envs_positions = torch.tensor(envs_positions, dtype=torch.float, device=world.device) # filter collisions within each environment instance physics_scene_path = world.get_physics_context().prim_path cloner.filter_collisions( physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"] ) # Resolve robot prim paths if args_cli.asset == "orbit": root_prim_path = "/World/envs/env_.*/Robot/base" elif args_cli.asset == "oige": root_prim_path = "/World/envs/env_.*/Robot" elif os.path.exists(args_cli.asset): usd_path = args_cli.asset root_prim_path = "/World/envs/env_.*/Robot" else: raise ValueError(f"Invalid asset: {args_cli.asset}. Must be one of: orbit, oige.") # Setup robot robot_view = ArticulationView(root_prim_path, name="ANYMAL") world.scene.add(robot_view) # Play the simulator world.reset() # Now we are ready! print("[INFO]: Setup complete...") # dummy actions # actions = torch.zeros(robot.count, robot.num_actions, device=robot.device) # Define simulation stepping sim_dt = world.get_physics_dt() # episode counter sim_time = 0.0 # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if world.is_stopped(): break # If simulation is paused, then skip. if not world.is_playing(): world.step(render=False) continue # perform step world.step() # update sim-time sim_time += sim_dt if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,142
Python
32.568306
117
0.677629
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_rep_texture_randomizer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use replicator to randomly change the textures of a USD scene. Note: Currently this script fails since cloner does not support changing textures of cloned USD prims. This is because the prims are cloned using `Sdf.ChangeBlock` which does not allow individual texture changes. Usage: .. code-block:: bash ./orbit.sh -p source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_rep_texture_randomizer.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse # omni-isaac-orbit from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser( description="This script shows how to use replicator to randomly change the textures of a USD scene." ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import torch import omni.isaac.core.utils.prims as prim_utils import omni.replicator.core as rep from omni.isaac.cloner import GridCloner from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.viewports import set_camera_view def main(): """Spawn a bunch of balls and randomly change their textures.""" # Load kit helper sim_params = { "use_gpu": True, "use_gpu_pipeline": True, "use_flatcache": True, # deprecated from Isaac Sim 2023.1 onwards "use_fabric": True, # used from Isaac Sim 2023.1 onwards "enable_scene_query_support": True, } sim = SimulationContext( physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0" ) # Set main camera set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5]) # Parameters num_balls = 128 # Create interface to clone the scene cloner = GridCloner(spacing=2.0) cloner.define_base_env("/World/envs") # Everything under the namespace "/World/envs/env_0" will be cloned prim_utils.define_prim("/World/envs/env_0") # Define the scene # -- Ball DynamicSphere(prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25) # Clone the scene cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls) env_positions = cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True, copy_from_source=True ) physics_scene_path = sim.get_physics_context().prim_path cloner.filter_collisions( physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) # Use replicator to randomize color on the spheres with rep.new_layer(): # Define a function to get all the shapes def get_shapes(): shapes = rep.get.prims(path_pattern="/World/envs/env_.*/ball") with shapes: rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1))) return shapes.node # Register the function rep.randomizer.register(get_shapes) # Specify the frequency of randomization with rep.trigger.on_frame(): rep.randomizer.get_shapes() # Set ball positions over terrain origins # Create a view over all the balls ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False) # cache initial state of the balls ball_initial_positions = torch.tensor(env_positions, dtype=torch.float, device=sim.device) ball_initial_positions[:, 2] += 5.0 # set initial poses # note: setting here writes to USD :) ball_view.set_world_poses(positions=ball_initial_positions) # Play simulator sim.reset() # Step replicator to randomize colors rep.orchestrator.step(pause_timeline=False) # Stop replicator to prevent further randomization rep.orchestrator.stop() # Pause simulator at the beginning for inspection sim.pause() # Initialize the ball views for physics simulation ball_view.initialize() ball_initial_velocities = ball_view.get_velocities() # Create a counter for resetting the scene step_count = 0 # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if sim.is_stopped(): break # If simulation is paused, then skip. if not sim.is_playing(): sim.step() continue # Reset the scene if step_count % 500 == 0: # reset the balls ball_view.set_world_poses(positions=ball_initial_positions) ball_view.set_velocities(ball_initial_velocities) # reset the counter step_count = 0 # Step simulation sim.step() # Update counter step_count += 1 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,412
Python
31.413173
119
0.671286
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_app.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows the issue with launching Isaac Sim application in headless mode. On launching the application in headless mode, the application does not exit gracefully. There are multiple warnings and errors that are printed on the console. ``` _isaac_sim/python.sh source/extensions/omni.isaac.orbit/test/deps/isaacsim/check_app.py ``` Output: ``` [10.948s] Simulation App Startup Complete [11.471s] Simulation App Shutting Down ...... [Warning] [carb] [Plugin: omni.spectree.delegate.plugin] Module /media/vulcan/packman-repo/chk/kit-sdk/105.1+release.129498.98d86eae.tc.linux-x86_64.release/exts/omni.usd_resolver/bin/libomni.spectree.delegate.plugin.so remained loaded after unload request ...... [Warning] [omni.core.ITypeFactory] Module /media/vulcan/packman-repo/chk/kit-sdk/105.1+release.129498.98d86eae.tc.linux-x86_64.release/exts/omni.graph.action/bin/libomni.graph.action.plugin.so remained loaded after unload request. ...... [Warning] [omni.core.ITypeFactory] Module /media/vulcan/packman-repo/chk/kit-sdk/105.1+release.129498.98d86eae.tc.linux-x86_64.release/exts/omni.activity.core/bin/libomni.activity.core.plugin.so remained loaded after unload request. ``` """ from __future__ import annotations from omni.isaac.kit import SimulationApp if __name__ == "__main__": app = SimulationApp({"headless": True}) app.close()
1,481
Python
41.342856
263
0.763673
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/controllers/test_differential_ik.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import torch import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.cloner import GridCloner import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.controllers import DifferentialIKController, DifferentialIKControllerCfg from omni.isaac.orbit.utils.math import compute_pose_error, subtract_frame_transforms ## # Pre-defined configs ## from omni.isaac.orbit_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG # isort:skip class TestDifferentialIKController(unittest.TestCase): """Test fixture for checking that differential IK controller tracks commands properly.""" def setUp(self): """Create a blank new stage for each test.""" # Wait for spawning stage_utils.create_new_stage() # Constants self.num_envs = 128 # Load kit helper sim_cfg = sim_utils.SimulationCfg(dt=0.01) self.sim = sim_utils.SimulationContext(sim_cfg) # TODO: Remove this once we have a better way to handle this. self.sim._app_control_on_stop_handle = None # Create a ground plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/GroundPlane", cfg) # Create interface to clone the scene cloner = GridCloner(spacing=2.0) cloner.define_base_env("/World/envs") self.env_prim_paths = cloner.generate_paths("/World/envs/env", self.num_envs) # create source prim prim_utils.define_prim(self.env_prim_paths[0], "Xform") # clone the env xform self.env_origins = cloner.clone( source_prim_path=self.env_prim_paths[0], prim_paths=self.env_prim_paths, replicate_physics=True, ) # Define goals for the arm ee_goals_set = [ [0.5, 0.5, 0.7, 0.707, 0, 0.707, 0], [0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0], [0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0], ] self.ee_pose_b_des_set = torch.tensor(ee_goals_set, device=self.sim.device) def tearDown(self): """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Test fixtures. """ def test_franka_ik_pose_abs(self): """Test IK controller for Franka arm with Franka hand.""" # Create robot instance robot_cfg = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="/World/envs/env_.*/Robot") robot = Articulation(cfg=robot_cfg) # Create IK controller diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls") diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=self.num_envs, device=self.sim.device) # Run the controller and check that it converges to the goal self._run_ik_controller(robot, diff_ik_controller, "panda_hand", ["panda_joint.*"]) def test_ur10_ik_pose_abs(self): """Test IK controller for UR10 arm.""" # Create robot instance robot_cfg = UR10_CFG.replace(prim_path="/World/envs/env_.*/Robot") robot_cfg.spawn.rigid_props.disable_gravity = True robot = Articulation(cfg=robot_cfg) # Create IK controller diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls") diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=self.num_envs, device=self.sim.device) # Run the controller and check that it converges to the goal self._run_ik_controller(robot, diff_ik_controller, "ee_link", [".*"]) """ Helper functions. """ def _run_ik_controller( self, robot: Articulation, diff_ik_controller: DifferentialIKController, ee_frame_name: str, arm_joint_names: list[str], ): # Define simulation stepping sim_dt = self.sim.get_physics_dt() # Play the simulator self.sim.reset() # Obtain the frame index of the end-effector ee_frame_idx = robot.find_bodies(ee_frame_name)[0][0] ee_jacobi_idx = ee_frame_idx - 1 # Obtain joint indices arm_joint_ids = robot.find_joints(arm_joint_names)[0] # Update existing buffers # Note: We need to update buffers before the first step for the controller. robot.update(dt=sim_dt) # Track the given command current_goal_idx = 0 # Current goal for the arm ee_pose_b_des = torch.zeros(self.num_envs, diff_ik_controller.action_dim, device=self.sim.device) ee_pose_b_des[:] = self.ee_pose_b_des_set[current_goal_idx] # Compute current pose of the end-effector ee_pose_w = robot.data.body_state_w[:, ee_frame_idx, 0:7] root_pose_w = robot.data.root_state_w[:, 0:7] ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) # Now we are ready! for count in range(1500): # reset every 150 steps if count % 250 == 0: # check that we converged to the goal if count > 0: pos_error, rot_error = compute_pose_error( ee_pos_b, ee_quat_b, ee_pose_b_des[:, 0:3], ee_pose_b_des[:, 3:7] ) pos_error_norm = torch.norm(pos_error, dim=-1) rot_error_norm = torch.norm(rot_error, dim=-1) # desired error (zer) des_error = torch.zeros_like(pos_error_norm) # check convergence torch.testing.assert_close(pos_error_norm, des_error, rtol=0.0, atol=1e-3) torch.testing.assert_close(rot_error_norm, des_error, rtol=0.0, atol=1e-3) # reset joint state joint_pos = robot.data.default_joint_pos.clone() joint_vel = robot.data.default_joint_vel.clone() # joint_pos *= sample_uniform(0.9, 1.1, joint_pos.shape, joint_pos.device) robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.set_joint_position_target(joint_pos) robot.write_data_to_sim() robot.reset() # reset actions ee_pose_b_des[:] = self.ee_pose_b_des_set[current_goal_idx] joint_pos_des = joint_pos[:, arm_joint_ids].clone() # update goal for next iteration current_goal_idx = (current_goal_idx + 1) % len(self.ee_pose_b_des_set) # set the controller commands diff_ik_controller.reset() diff_ik_controller.set_command(ee_pose_b_des) else: # at reset, the jacobians are not updated to the latest state # so we MUST skip the first step # obtain quantities from simulation jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] ee_pose_w = robot.data.body_state_w[:, ee_frame_idx, 0:7] root_pose_w = robot.data.root_state_w[:, 0:7] joint_pos = robot.data.joint_pos[:, arm_joint_ids] # compute frame in root frame ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) # compute the joint commands joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos) # apply actions robot.set_joint_position_target(joint_pos_des, arm_joint_ids) robot.write_data_to_sim() # perform step self.sim.step(render=False) # update buffers robot.update(sim_dt) if __name__ == "__main__": run_tests()
8,463
Python
39.497607
114
0.59317
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_spawn_from_files.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.app import AppLauncher, run_tests """Launch Isaac Sim Simulator first.""" # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.extensions import enable_extension, get_extension_path_from_name import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR class TestSpawningFromFiles(unittest.TestCase): """Test fixture for checking spawning of USD references from files with different settings.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.1 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") # Wait for spawning stage_utils.update_stage() def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Basic spawning. """ def test_spawn_usd(self): """Test loading prim from Usd file.""" # Spawn cone cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd") prim = cfg.func("/World/Franka", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Franka")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") def test_spawn_urdf(self): """Test loading prim from URDF file.""" # retrieve path to urdf importer extension enable_extension("omni.importer.urdf") extension_path = get_extension_path_from_name("omni.importer.urdf") # Spawn franka from URDF cfg = sim_utils.UrdfFileCfg( asset_path=f"{extension_path}/data/urdf/robots/franka_description/robots/panda_arm_hand.urdf", fix_base=True ) prim = cfg.func("/World/Franka", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Franka")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") def test_spawn_ground_plane(self): """Test loading prim for the ground plane from grid world USD.""" # Spawn ground plane cfg = sim_utils.GroundPlaneCfg(color=(0.1, 0.1, 0.1), size=(10.0, 10.0)) prim = cfg.func("/World/ground_plane", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/ground_plane")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") if __name__ == "__main__": run_tests()
3,226
Python
34.855555
120
0.66057
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_urdf_converter.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app config = {"headless": True} simulation_app = AppLauncher(config).app """Rest everything follows.""" import math import numpy as np import os import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.extensions import enable_extension, get_extension_path_from_name from omni.isaac.orbit.sim.converters import UrdfConverter, UrdfConverterCfg class TestUrdfConverter(unittest.TestCase): """Test fixture for the UrdfConverter class.""" def setUp(self): """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # retrieve path to urdf importer extension enable_extension("omni.importer.urdf") extension_path = get_extension_path_from_name("omni.importer.urdf") # default configuration self.config = UrdfConverterCfg( asset_path=f"{extension_path}/data/urdf/robots/franka_description/robots/panda_arm_hand.urdf", fix_base=True ) # Simulation time-step self.dt = 0.01 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() # cleanup stage and context self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() def test_no_change(self): """Call conversion twice. This should not generate a new USD file.""" urdf_converter = UrdfConverter(self.config) time_usd_file_created = os.stat(urdf_converter.usd_path).st_mtime_ns # no change to config only define the usd directory new_config = self.config new_config.usd_dir = urdf_converter.usd_dir # convert to usd but this time in the same directory as previous step new_urdf_converter = UrdfConverter(new_config) new_time_usd_file_created = os.stat(new_urdf_converter.usd_path).st_mtime_ns self.assertEqual(time_usd_file_created, new_time_usd_file_created) def test_config_change(self): """Call conversion twice but change the config in the second call. This should generate a new USD file.""" urdf_converter = UrdfConverter(self.config) time_usd_file_created = os.stat(urdf_converter.usd_path).st_mtime_ns # change the config new_config = self.config new_config.fix_base = not self.config.fix_base # define the usd directory new_config.usd_dir = urdf_converter.usd_dir # convert to usd but this time in the same directory as previous step new_urdf_converter = UrdfConverter(new_config) new_time_usd_file_created = os.stat(new_urdf_converter.usd_path).st_mtime_ns self.assertNotEqual(time_usd_file_created, new_time_usd_file_created) def test_create_prim_from_usd(self): """Call conversion and create a prim from it.""" urdf_converter = UrdfConverter(self.config) prim_path = "/World/Robot" prim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) self.assertTrue(prim_utils.is_prim_path_valid(prim_path)) def test_config_drive_type(self): """Change the drive mechanism of the robot to be position.""" # Create directory to dump results test_dir = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(test_dir, "output", "urdf_converter") if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) # change the config self.config.default_drive_type = "position" self.config.default_drive_stiffness = 400.0 self.config.default_drive_damping = 40.0 self.config.usd_dir = output_dir urdf_converter = UrdfConverter(self.config) # check the drive type of the robot prim_path = "/World/Robot" prim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) # access the robot robot = ArticulationView(prim_path, reset_xform_properties=False) # play the simulator and initialize the robot self.sim.reset() robot.initialize() # check drive values for the robot (read from physx) drive_stiffness, drive_damping = robot.get_gains() # -- for the arm (revolute joints) # user provides the values in radians but simulator sets them as in degrees expected_drive_stiffness = math.degrees(self.config.default_drive_stiffness) expected_drive_damping = math.degrees(self.config.default_drive_damping) np.testing.assert_array_equal(drive_stiffness[:, :7], expected_drive_stiffness) np.testing.assert_array_equal(drive_damping[:, :7], expected_drive_damping) # -- for the hand (prismatic joints) # note: from isaac sim 2023.1, the test asset has mimic joints for the hand # so the mimic joint doesn't have drive values expected_drive_stiffness = self.config.default_drive_stiffness expected_drive_damping = self.config.default_drive_damping np.testing.assert_array_equal(drive_stiffness[:, 7], expected_drive_stiffness) np.testing.assert_array_equal(drive_damping[:, 7], expected_drive_damping) # check drive values for the robot (read from usd) self.sim.stop() drive_stiffness, drive_damping = robot.get_gains() # -- for the arm (revolute joints) # user provides the values in radians but simulator sets them as in degrees expected_drive_stiffness = math.degrees(self.config.default_drive_stiffness) expected_drive_damping = math.degrees(self.config.default_drive_damping) np.testing.assert_array_equal(drive_stiffness[:, :7], expected_drive_stiffness) np.testing.assert_array_equal(drive_damping[:, :7], expected_drive_damping) # -- for the hand (prismatic joints) # note: from isaac sim 2023.1, the test asset has mimic joints for the hand # so the mimic joint doesn't have drive values expected_drive_stiffness = self.config.default_drive_stiffness expected_drive_damping = self.config.default_drive_damping np.testing.assert_array_equal(drive_stiffness[:, 7], expected_drive_stiffness) np.testing.assert_array_equal(drive_damping[:, 7], expected_drive_damping) if __name__ == "__main__": run_tests()
6,926
Python
41.496932
120
0.677736
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_schemas.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext from pxr import UsdPhysics import omni.isaac.orbit.sim.schemas as schemas from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.string import to_camel_case class TestPhysicsSchema(unittest.TestCase): """Test fixture for checking schemas modifications through Orbit.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.1 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") # Set some default values for test self.arti_cfg = schemas.ArticulationRootPropertiesCfg( enabled_self_collisions=False, articulation_enabled=True, solver_position_iteration_count=4, solver_velocity_iteration_count=1, sleep_threshold=1.0, stabilization_threshold=5.0, ) self.rigid_cfg = schemas.RigidBodyPropertiesCfg( rigid_body_enabled=True, kinematic_enabled=False, disable_gravity=False, linear_damping=0.1, angular_damping=0.5, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=10.0, max_contact_impulse=10.0, enable_gyroscopic_forces=True, retain_accelerations=True, solver_position_iteration_count=8, solver_velocity_iteration_count=1, sleep_threshold=1.0, stabilization_threshold=6.0, ) self.collision_cfg = schemas.CollisionPropertiesCfg( collision_enabled=True, contact_offset=0.05, rest_offset=0.001, min_torsional_patch_radius=0.1, torsional_patch_radius=1.0, ) self.mass_cfg = schemas.MassPropertiesCfg(mass=1.0, density=100.0) self.joint_cfg = schemas.JointDrivePropertiesCfg(drive_type="acceleration") def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() def test_valid_properties_cfg(self): """Test that all the config instances have non-None values. This is to ensure that we check that all the properties of the schema are set. """ for cfg in [self.arti_cfg, self.rigid_cfg, self.collision_cfg, self.mass_cfg, self.joint_cfg]: # check nothing is none for k, v in cfg.__dict__.items(): self.assertIsNotNone(v, f"{cfg.__class__.__name__}:{k} is None. Please make sure schemas are valid.") def test_modify_properties_on_invalid_prim(self): """Test modifying properties on a prim that does not exist.""" # set properties with self.assertRaises(ValueError): schemas.modify_rigid_body_properties("/World/asset_xyz", self.rigid_cfg) def test_modify_properties_on_articulation_instanced_usd(self): """Test modifying properties on articulation instanced usd. In this case, modifying collision properties on the articulation instanced usd will fail. """ # spawn asset to the stage asset_usd_file = f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd" prim_utils.create_prim("/World/asset_instanced", usd_path=asset_usd_file, translation=(0.0, 0.0, 0.62)) # set properties on the asset and check all properties are set schemas.modify_articulation_root_properties("/World/asset_instanced", self.arti_cfg) schemas.modify_rigid_body_properties("/World/asset_instanced", self.rigid_cfg) schemas.modify_mass_properties("/World/asset_instanced", self.mass_cfg) schemas.modify_joint_drive_properties("/World/asset_instanced", self.joint_cfg) # validate the properties self._validate_articulation_properties_on_prim("/World/asset_instanced") self._validate_rigid_body_properties_on_prim("/World/asset_instanced") self._validate_mass_properties_on_prim("/World/asset_instanced") self._validate_joint_drive_properties_on_prim("/World/asset_instanced") def test_modify_properties_on_articulation_usd(self): """Test setting properties on articulation usd.""" # spawn asset to the stage asset_usd_file = f"{ISAAC_NUCLEUS_DIR}/Robots/Franka/franka.usd" prim_utils.create_prim("/World/asset", usd_path=asset_usd_file, translation=(0.0, 0.0, 0.62)) # set properties on the asset and check all properties are set schemas.modify_articulation_root_properties("/World/asset", self.arti_cfg) schemas.modify_rigid_body_properties("/World/asset", self.rigid_cfg) schemas.modify_collision_properties("/World/asset", self.collision_cfg) schemas.modify_mass_properties("/World/asset", self.mass_cfg) schemas.modify_joint_drive_properties("/World/asset", self.joint_cfg) # validate the properties self._validate_articulation_properties_on_prim("/World/asset") self._validate_rigid_body_properties_on_prim("/World/asset") self._validate_collision_properties_on_prim("/World/asset") self._validate_mass_properties_on_prim("/World/asset") self._validate_joint_drive_properties_on_prim("/World/asset") def test_defining_rigid_body_properties_on_prim(self): """Test defining rigid body properties on a prim.""" # create a prim prim_utils.create_prim("/World/parent", prim_type="XForm") # spawn a prim prim_utils.create_prim("/World/cube1", prim_type="Cube", translation=(0.0, 0.0, 0.62)) # set properties on the asset and check all properties are set schemas.define_rigid_body_properties("/World/cube1", self.rigid_cfg) schemas.define_collision_properties("/World/cube1", self.collision_cfg) schemas.define_mass_properties("/World/cube1", self.mass_cfg) # validate the properties self._validate_rigid_body_properties_on_prim("/World/cube1") self._validate_collision_properties_on_prim("/World/cube1") self._validate_mass_properties_on_prim("/World/cube1") # spawn another prim prim_utils.create_prim("/World/cube2", prim_type="Cube", translation=(1.0, 1.0, 0.62)) # set properties on the asset and check all properties are set schemas.define_rigid_body_properties("/World/cube2", self.rigid_cfg) schemas.define_collision_properties("/World/cube2", self.collision_cfg) # validate the properties self._validate_rigid_body_properties_on_prim("/World/cube2") self._validate_collision_properties_on_prim("/World/cube2") # check if we can play self.sim.reset() for _ in range(100): self.sim.step() def test_defining_articulation_properties_on_prim(self): """Test defining articulation properties on a prim.""" # create a parent articulation prim_utils.create_prim("/World/parent", prim_type="Xform") schemas.define_articulation_root_properties("/World/parent", self.arti_cfg) # validate the properties self._validate_articulation_properties_on_prim("/World/parent") # create a child articulation prim_utils.create_prim("/World/parent/child", prim_type="Cube", translation=(0.0, 0.0, 0.62)) schemas.define_rigid_body_properties("/World/parent/child", self.rigid_cfg) schemas.define_mass_properties("/World/parent/child", self.mass_cfg) # check if we can play self.sim.reset() for _ in range(100): self.sim.step() """ Helper functions. """ def _validate_articulation_properties_on_prim(self, prim_path: str, verbose: bool = False): """Validate the articulation properties on the prim.""" # the root prim root_prim = prim_utils.get_prim_at_path(prim_path) # check articulation properties are set correctly for attr_name, attr_value in self.arti_cfg.__dict__.items(): # skip names we know are not present if attr_name == "func": continue # convert attribute name in prim to cfg name prim_prop_name = f"physxArticulation:{to_camel_case(attr_name, to='cC')}" # validate the values self.assertAlmostEqual( root_prim.GetAttribute(prim_prop_name).Get(), attr_value, places=5, msg=f"Failed setting for {prim_prop_name}", ) def _validate_rigid_body_properties_on_prim(self, prim_path: str, verbose: bool = False): """Validate the rigid body properties on the prim. Note: Right now this function exploits the hierarchy in the asset to check the properties. This is not a fool-proof way of checking the properties. """ # the root prim root_prim = prim_utils.get_prim_at_path(prim_path) # check rigid body properties are set correctly for link_prim in root_prim.GetChildren(): if UsdPhysics.RigidBodyAPI(link_prim): for attr_name, attr_value in self.rigid_cfg.__dict__.items(): # skip names we know are not present if attr_name in ["func", "rigid_body_enabled", "kinematic_enabled"]: continue # convert attribute name in prim to cfg name prim_prop_name = f"physxRigidBody:{to_camel_case(attr_name, to='cC')}" # validate the values self.assertAlmostEqual( link_prim.GetAttribute(prim_prop_name).Get(), attr_value, places=5, msg=f"Failed setting for {prim_prop_name}", ) elif verbose: print(f"Skipping prim {link_prim.GetPrimPath()} as it is not a rigid body.") def _validate_collision_properties_on_prim(self, prim_path: str, verbose: bool = False): """Validate the collision properties on the prim. Note: Right now this function exploits the hierarchy in the asset to check the properties. This is not a fool-proof way of checking the properties. """ # the root prim root_prim = prim_utils.get_prim_at_path(prim_path) # check collision properties are set correctly for link_prim in root_prim.GetChildren(): for mesh_prim in link_prim.GetChildren(): if UsdPhysics.CollisionAPI(mesh_prim): for attr_name, attr_value in self.collision_cfg.__dict__.items(): # skip names we know are not present if attr_name in ["func", "collision_enabled"]: continue # convert attribute name in prim to cfg name prim_prop_name = f"physxCollision:{to_camel_case(attr_name, to='cC')}" # validate the values self.assertAlmostEqual( mesh_prim.GetAttribute(prim_prop_name).Get(), attr_value, places=5, msg=f"Failed setting for {prim_prop_name}", ) elif verbose: print(f"Skipping prim {mesh_prim.GetPrimPath()} as it is not a collision mesh.") def _validate_mass_properties_on_prim(self, prim_path: str, verbose: bool = False): """Validate the mass properties on the prim. Note: Right now this function exploits the hierarchy in the asset to check the properties. This is not a fool-proof way of checking the properties. """ # the root prim root_prim = prim_utils.get_prim_at_path(prim_path) # check rigid body mass properties are set correctly for link_prim in root_prim.GetChildren(): if UsdPhysics.MassAPI(link_prim): for attr_name, attr_value in self.mass_cfg.__dict__.items(): # skip names we know are not present if attr_name in ["func"]: continue # print(link_prim.GetProperties()) prim_prop_name = f"physics:{to_camel_case(attr_name, to='cC')}" # validate the values self.assertAlmostEqual( link_prim.GetAttribute(prim_prop_name).Get(), attr_value, places=5, msg=f"Failed setting for {prim_prop_name}", ) elif verbose: print(f"Skipping prim {link_prim.GetPrimPath()} as it is not a mass api.") def _validate_joint_drive_properties_on_prim(self, prim_path: str, verbose: bool = False): """Validate the mass properties on the prim. Note: Right now this function exploits the hierarchy in the asset to check the properties. This is not a fool-proof way of checking the properties. """ # the root prim root_prim = prim_utils.get_prim_at_path(prim_path) # check joint drive properties are set correctly for link_prim in root_prim.GetAllChildren(): for joint_prim in link_prim.GetChildren(): if joint_prim.IsA(UsdPhysics.PrismaticJoint) or joint_prim.IsA(UsdPhysics.RevoluteJoint): # check it has drive API self.assertTrue(joint_prim.HasAPI(UsdPhysics.DriveAPI)) # iterate over the joint properties for attr_name, attr_value in self.joint_cfg.__dict__.items(): # skip names we know are not present if attr_name == "func": continue # manually check joint type if attr_name == "drive_type": if joint_prim.IsA(UsdPhysics.PrismaticJoint): prim_attr_name = "drive:linear:physics:type" elif joint_prim.IsA(UsdPhysics.RevoluteJoint): prim_attr_name = "drive:angular:physics:type" else: raise ValueError(f"Unknown joint type for prim {joint_prim.GetPrimPath()}") # check the value self.assertEqual(attr_value, joint_prim.GetAttribute(prim_attr_name).Get()) continue elif verbose: print(f"Skipping prim {joint_prim.GetPrimPath()} as it is not a joint drive api.") if __name__ == "__main__": run_tests()
15,658
Python
46.308157
117
0.601162
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_spawn_materials.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext from pxr import UsdPhysics, UsdShade import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.assets import NVIDIA_NUCLEUS_DIR class TestSpawningMaterials(unittest.TestCase): """Test fixture for checking spawning of materials.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.1 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") # Wait for spawning stage_utils.update_stage() def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() def test_spawn_preview_surface(self): """Test spawning preview surface.""" # Spawn preview surface cfg = sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)) prim = cfg.func("/Looks/PreviewSurface", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/Looks/PreviewSurface")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Shader") # Check properties self.assertEqual(prim.GetAttribute("inputs:diffuseColor").Get(), cfg.diffuse_color) def test_spawn_mdl_material(self): """Test spawning mdl material.""" # Spawn mdl material cfg = sim_utils.materials.MdlFileCfg( mdl_path=f"{NVIDIA_NUCLEUS_DIR}/Materials/Base/Metals/Aluminum_Anodized.mdl", project_uvw=True, albedo_brightness=0.5, ) prim = cfg.func("/Looks/MdlMaterial", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/Looks/MdlMaterial")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Shader") # Check properties self.assertEqual(prim.GetAttribute("inputs:project_uvw").Get(), cfg.project_uvw) self.assertEqual(prim.GetAttribute("inputs:albedo_brightness").Get(), cfg.albedo_brightness) def test_spawn_glass_mdl_material(self): """Test spawning a glass mdl material.""" # Spawn mdl material cfg = sim_utils.materials.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0)) prim = cfg.func("/Looks/GlassMaterial", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/Looks/GlassMaterial")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Shader") # Check properties self.assertEqual(prim.GetAttribute("inputs:thin_walled").Get(), cfg.thin_walled) self.assertEqual(prim.GetAttribute("inputs:glass_ior").Get(), cfg.glass_ior) self.assertEqual(prim.GetAttribute("inputs:glass_color").Get(), cfg.glass_color) def test_spawn_rigid_body_material(self): """Test spawning a rigid body material.""" # spawn physics material cfg = sim_utils.materials.RigidBodyMaterialCfg( dynamic_friction=1.5, restitution=1.5, static_friction=0.5, restitution_combine_mode="max", friction_combine_mode="max", improve_patch_friction=True, ) prim = cfg.func("/Looks/RigidBodyMaterial", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/Looks/RigidBodyMaterial")) # Check properties self.assertEqual(prim.GetAttribute("physics:staticFriction").Get(), cfg.static_friction) self.assertEqual(prim.GetAttribute("physics:dynamicFriction").Get(), cfg.dynamic_friction) self.assertEqual(prim.GetAttribute("physics:restitution").Get(), cfg.restitution) self.assertEqual(prim.GetAttribute("physxMaterial:improvePatchFriction").Get(), cfg.improve_patch_friction) self.assertEqual(prim.GetAttribute("physxMaterial:restitutionCombineMode").Get(), cfg.restitution_combine_mode) self.assertEqual(prim.GetAttribute("physxMaterial:frictionCombineMode").Get(), cfg.friction_combine_mode) def test_apply_rigid_body_material_on_visual_material(self): """Test applying a rigid body material on a visual material.""" # Spawn mdl material cfg = sim_utils.materials.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0)) prim = cfg.func("/Looks/Material", cfg) # spawn physics material cfg = sim_utils.materials.RigidBodyMaterialCfg( dynamic_friction=1.5, restitution=1.5, static_friction=0.5, restitution_combine_mode="max", friction_combine_mode="max", improve_patch_friction=True, ) prim = cfg.func("/Looks/Material", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/Looks/Material")) # Check properties self.assertEqual(prim.GetAttribute("physics:staticFriction").Get(), cfg.static_friction) self.assertEqual(prim.GetAttribute("physics:dynamicFriction").Get(), cfg.dynamic_friction) self.assertEqual(prim.GetAttribute("physics:restitution").Get(), cfg.restitution) self.assertEqual(prim.GetAttribute("physxMaterial:improvePatchFriction").Get(), cfg.improve_patch_friction) self.assertEqual(prim.GetAttribute("physxMaterial:restitutionCombineMode").Get(), cfg.restitution_combine_mode) self.assertEqual(prim.GetAttribute("physxMaterial:frictionCombineMode").Get(), cfg.friction_combine_mode) def test_bind_prim_to_material(self): """Test binding a rigid body material on a mesh prim.""" # create a mesh prim object_prim = prim_utils.create_prim("/World/Geometry/box", "Cube") UsdPhysics.CollisionAPI.Apply(object_prim) # create a visual material visual_material_cfg = sim_utils.GlassMdlCfg(glass_ior=1.0, thin_walled=True) visual_material_cfg.func("/World/Looks/glassMaterial", visual_material_cfg) # create a physics material physics_material_cfg = sim_utils.RigidBodyMaterialCfg( static_friction=0.5, dynamic_friction=1.5, restitution=1.5 ) physics_material_cfg.func("/World/Physics/rubberMaterial", physics_material_cfg) # bind the visual material to the mesh prim sim_utils.bind_visual_material("/World/Geometry/box", "/World/Looks/glassMaterial") sim_utils.bind_physics_material("/World/Geometry/box", "/World/Physics/rubberMaterial") # check the main material binding material_binding_api = UsdShade.MaterialBindingAPI(object_prim) # -- visual material_direct_binding = material_binding_api.GetDirectBinding() self.assertEqual(material_direct_binding.GetMaterialPath(), "/World/Looks/glassMaterial") self.assertEqual(material_direct_binding.GetMaterialPurpose(), "") # -- physics material_direct_binding = material_binding_api.GetDirectBinding("physics") self.assertEqual(material_direct_binding.GetMaterialPath(), "/World/Physics/rubberMaterial") self.assertEqual(material_direct_binding.GetMaterialPurpose(), "physics") if __name__ == "__main__": run_tests()
8,077
Python
45.16
119
0.673517
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_spawn_lights.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext from pxr import UsdLux import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.string import to_camel_case class TestSpawningLights(unittest.TestCase): """Test fixture for checking spawning of USD lights with different settings.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.1 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") # Wait for spawning stage_utils.update_stage() def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Basic spawning. """ def test_spawn_disk_light(self): """Test spawning a disk light source.""" cfg = sim_utils.DiskLightCfg( color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=100, radius=20.0 ) prim = cfg.func("/World/disk_light", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/disk_light")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "DiskLight") # validate properties on the prim self._validate_properties_on_prim("/World/disk_light", cfg) def test_spawn_distant_light(self): """Test spawning a distant light.""" cfg = sim_utils.DistantLightCfg( color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=100, angle=20 ) prim = cfg.func("/World/distant_light", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/distant_light")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "DistantLight") # validate properties on the prim self._validate_properties_on_prim("/World/distant_light", cfg) def test_spawn_dome_light(self): """Test spawning a dome light source.""" cfg = sim_utils.DomeLightCfg( color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=100 ) prim = cfg.func("/World/dome_light", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/dome_light")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "DomeLight") # validate properties on the prim self._validate_properties_on_prim("/World/dome_light", cfg) def test_spawn_cylinder_light(self): """Test spawning a cylinder light source.""" cfg = sim_utils.CylinderLightCfg( color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=100, radius=20.0 ) prim = cfg.func("/World/cylinder_light", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/cylinder_light")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "CylinderLight") # validate properties on the prim self._validate_properties_on_prim("/World/cylinder_light", cfg) def test_spawn_sphere_light(self): """Test spawning a sphere light source.""" cfg = sim_utils.SphereLightCfg( color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=100, radius=20.0 ) prim = cfg.func("/World/sphere_light", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/sphere_light")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "SphereLight") # validate properties on the prim self._validate_properties_on_prim("/World/sphere_light", cfg) """ Helper functions. """ def _validate_properties_on_prim(self, prim_path: str, cfg: sim_utils.LightCfg): """Validate the properties on the prim. Args: prim_path: The prim name. cfg: The configuration for the light source. """ # default list of params to skip non_usd_params = ["func", "prim_type", "visible", "semantic_tags", "copy_from_source"] # obtain prim prim = prim_utils.get_prim_at_path(prim_path) for attr_name, attr_value in cfg.__dict__.items(): # skip names we know are not present if attr_name in non_usd_params or attr_value is None: continue # deal with texture input names if "texture" in attr_name: light_prim = UsdLux.DomeLight(prim) if attr_name == "texture_file": configured_value = light_prim.GetTextureFileAttr().Get() elif attr_name == "texture_format": configured_value = light_prim.GetTextureFormatAttr().Get() else: raise ValueError(f"Unknown texture attribute: '{attr_name}'") else: # convert attribute name in prim to cfg name prim_prop_name = f"inputs:{to_camel_case(attr_name, to='cC')}" # configured value configured_value = prim.GetAttribute(prim_prop_name).Get() # validate the values self.assertEqual(configured_value, attr_value, msg=f"Failed for attribute: '{attr_name}'") if __name__ == "__main__": run_tests()
6,365
Python
38.055214
116
0.626866
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_utils.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app config = {"headless": True} simulation_app = AppLauncher(config).app """Rest everything follows.""" import numpy as np import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils import omni.isaac.orbit.sim as sim_utils class TestUtilities(unittest.TestCase): """Test fixture for the sim utility functions.""" def setUp(self): """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() stage_utils.update_stage() def tearDown(self) -> None: """Clear stage after each test.""" stage_utils.clear_stage() def test_get_all_matching_child_prims(self): """Test get_all_matching_child_prims() function.""" # create scene prim_utils.create_prim("/World/Floor") prim_utils.create_prim( "/World/Floor/thefloor", "Cube", position=np.array([75, 75, -150.1]), attributes={"size": 300} ) prim_utils.create_prim("/World/Room", "Sphere", attributes={"radius": 1e3}) # test isaac_sim_result = prim_utils.get_all_matching_child_prims("/World") orbit_result = sim_utils.get_all_matching_child_prims("/World") self.assertListEqual(isaac_sim_result, orbit_result) # test valid path with self.assertRaises(ValueError): sim_utils.get_all_matching_child_prims("World/Room") def test_find_matching_prim_paths(self): """Test find_matching_prim_paths() function.""" # create scene for index in range(2048): random_pos = np.random.uniform(-100, 100, size=3) prim_utils.create_prim(f"/World/Floor_{index}", "Cube", position=random_pos, attributes={"size": 2.0}) prim_utils.create_prim(f"/World/Floor_{index}/Sphere", "Sphere", attributes={"radius": 10}) prim_utils.create_prim(f"/World/Floor_{index}/Sphere/childSphere", "Sphere", attributes={"radius": 1}) prim_utils.create_prim(f"/World/Floor_{index}/Sphere/childSphere2", "Sphere", attributes={"radius": 1}) # test leaf paths isaac_sim_result = prim_utils.find_matching_prim_paths("/World/Floor_.*/Sphere") orbit_result = sim_utils.find_matching_prim_paths("/World/Floor_.*/Sphere") self.assertListEqual(isaac_sim_result, orbit_result) # test non-leaf paths isaac_sim_result = prim_utils.find_matching_prim_paths("/World/Floor_.*") orbit_result = sim_utils.find_matching_prim_paths("/World/Floor_.*") self.assertListEqual(isaac_sim_result, orbit_result) # test child-leaf paths isaac_sim_result = prim_utils.find_matching_prim_paths("/World/Floor_.*/Sphere/childSphere.*") orbit_result = sim_utils.find_matching_prim_paths("/World/Floor_.*/Sphere/childSphere.*") self.assertListEqual(isaac_sim_result, orbit_result) # test valid path with self.assertRaises(ValueError): sim_utils.get_all_matching_child_prims("World/Floor_.*") if __name__ == "__main__": run_tests()
3,382
Python
36.588888
115
0.649024
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_build_simulation_context_headless.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This test has a lot of duplication with ``test_build_simulation_context_nonheadless.py``. This is intentional to ensure that the tests are run in both headless and non-headless modes, and we currently can't re-build the simulation app in a script. If you need to make a change to this test, please make sure to also make the same change to ``test_build_simulation_context_nonheadless.py``. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import unittest from omni.isaac.core.utils.prims import is_prim_path_valid from omni.isaac.orbit.sim.simulation_cfg import SimulationCfg from omni.isaac.orbit.sim.simulation_context import build_simulation_context class TestBuildSimulationContextHeadless(unittest.TestCase): """Tests for simulation context builder with headless usecase.""" """ Tests """ def test_build_simulation_context_no_cfg(self): """Test that the simulation context is built when no simulation cfg is passed in.""" for gravity_enabled in (True, False): for device in ("cuda:0", "cpu"): for dt in (0.01, 0.1): with self.subTest(gravity_enabled=gravity_enabled, device=device, dt=dt): with build_simulation_context(gravity_enabled=gravity_enabled, device=device, dt=dt) as sim: if gravity_enabled: self.assertEqual(sim.cfg.gravity, (0.0, 0.0, -9.81)) else: self.assertEqual(sim.cfg.gravity, (0.0, 0.0, 0.0)) if device == "cuda:0": self.assertEqual(sim.cfg.device, "cuda:0") else: self.assertEqual(sim.cfg.device, "cpu") self.assertEqual(sim.cfg.dt, dt) # Ensure that dome light didn't get added automatically as we are headless self.assertFalse(is_prim_path_valid("/World/defaultDomeLight")) def test_build_simulation_context_ground_plane(self): """Test that the simulation context is built with the correct ground plane.""" for add_ground_plane in (True, False): with self.subTest(add_ground_plane=add_ground_plane): with build_simulation_context(add_ground_plane=add_ground_plane) as _: # Ensure that ground plane got added self.assertEqual(is_prim_path_valid("/World/defaultGroundPlane"), add_ground_plane) def test_build_simulation_context_auto_add_lighting(self): """Test that the simulation context is built with the correct lighting.""" for add_lighting in (True, False): for auto_add_lighting in (True, False): with self.subTest(add_lighting=add_lighting, auto_add_lighting=auto_add_lighting): with build_simulation_context(add_lighting=add_lighting, auto_add_lighting=auto_add_lighting) as _: if add_lighting: # Ensure that dome light got added self.assertTrue(is_prim_path_valid("/World/defaultDomeLight")) else: # Ensure that dome light didn't get added as there's no GUI self.assertFalse(is_prim_path_valid("/World/defaultDomeLight")) def test_build_simulation_context_cfg(self): """Test that the simulation context is built with the correct cfg and values don't get overridden.""" dt = 0.001 # Non-standard gravity gravity = (0.0, 0.0, -1.81) device = "cuda:0" cfg = SimulationCfg( gravity=gravity, device=device, dt=dt, ) with build_simulation_context(sim_cfg=cfg, gravity_enabled=False, dt=0.01, device="cpu") as sim: self.assertEqual(sim.cfg.gravity, gravity) self.assertEqual(sim.cfg.device, device) self.assertEqual(sim.cfg.dt, dt) if __name__ == "__main__": run_tests()
4,457
Python
41.865384
141
0.607135
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_simulation_context.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True, experience="omni.isaac.sim.python.gym.headless.kit").app """Rest everything follows.""" import ctypes import numpy as np import unittest import omni.isaac.core.utils.prims as prim_utils from omni.isaac.core.simulation_context import SimulationContext as IsaacSimulationContext from omni.isaac.orbit.sim import SimulationCfg, SimulationContext class TestSimulationContext(unittest.TestCase): """Test fixture for wrapper around simulation context.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Load kit helper SimulationContext.clear_instance() def test_singleton(self): """Tests that the singleton is working.""" sim1 = SimulationContext() sim2 = SimulationContext() sim3 = IsaacSimulationContext() self.assertIs(sim1, sim2) self.assertIs(sim1, sim3) # try to delete the singleton sim2.clear_instance() self.assertTrue(sim1.instance() is None) # create new instance sim4 = SimulationContext() self.assertIsNot(sim1, sim4) self.assertIsNot(sim3, sim4) self.assertIs(sim1.instance(), sim4.instance()) self.assertIs(sim3.instance(), sim4.instance()) # clear instance sim3.clear_instance() def test_initialization(self): """Test the simulation config.""" cfg = SimulationCfg(physics_prim_path="/Physics/PhysX", substeps=5, gravity=(0.0, -0.5, -0.5)) sim = SimulationContext(cfg) # TODO: Figure out why keyword argument doesn't work. # note: added a fix in Isaac Sim 2023.1 for this. # sim = SimulationContext(cfg=cfg) # check valid settings self.assertEqual(sim.get_physics_dt(), cfg.dt) self.assertEqual(sim.get_rendering_dt(), cfg.dt * cfg.substeps) # check valid paths self.assertTrue(prim_utils.is_prim_path_valid("/Physics/PhysX")) self.assertTrue(prim_utils.is_prim_path_valid("/Physics/PhysX/defaultMaterial")) # check valid gravity gravity_dir, gravity_mag = sim.get_physics_context().get_gravity() gravity = np.array(gravity_dir) * gravity_mag np.testing.assert_almost_equal(gravity, cfg.gravity) def test_sim_version(self): """Test obtaining the version.""" sim = SimulationContext() version = sim.get_version() self.assertTrue(len(version) > 0) self.assertTrue(version[0] >= 2023) def test_carb_setting(self): """Test setting carb settings.""" sim = SimulationContext() # known carb setting sim.set_setting("/physics/physxDispatcher", False) self.assertEqual(sim.get_setting("/physics/physxDispatcher"), False) # unknown carb setting sim.set_setting("/myExt/using_omniverse_version", sim.get_version()) self.assertSequenceEqual(sim.get_setting("/myExt/using_omniverse_version"), sim.get_version()) def test_headless_mode(self): """Test that render mode is headless since we are running in headless mode.""" sim = SimulationContext() # check default render mode self.assertEqual(sim.render_mode, sim.RenderMode.NO_GUI_OR_RENDERING) def test_boundedness(self): """Test that the boundedness of the simulation context remains constant. Note: This test fails right now because Isaac Sim does not handle boundedness correctly. On creation, it is registering itself to various callbacks and hence the boundedness is more than 1. This may not be critical for the simulation context since we usually call various clear functions before deleting the simulation context. """ sim = SimulationContext() # manually set the boundedness to 1? -- this is not possible because of Isaac Sim. sim.clear_all_callbacks() sim._stage_open_callback = None sim._physics_timer_callback = None sim._event_timer_callback = None # check that boundedness of simulation context is correct sim_ref_count = ctypes.c_long.from_address(id(sim)).value # reset the simulation sim.reset() self.assertEqual(ctypes.c_long.from_address(id(sim)).value, sim_ref_count) # step the simulation for _ in range(10): sim.step() self.assertEqual(ctypes.c_long.from_address(id(sim)).value, sim_ref_count) # clear the simulation sim.clear_instance() self.assertEqual(ctypes.c_long.from_address(id(sim)).value, sim_ref_count - 1) def test_zero_gravity(self): """Test that gravity can be properly disabled.""" cfg = SimulationCfg(gravity=(0.0, 0.0, 0.0)) sim = SimulationContext(cfg) gravity_dir, gravity_mag = sim.get_physics_context().get_gravity() gravity = np.array(gravity_dir) * gravity_mag np.testing.assert_almost_equal(gravity, cfg.gravity) if __name__ == "__main__": run_tests()
5,329
Python
37.345323
111
0.660537
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_mesh_converter.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import os import tempfile import unittest import omni import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext from pxr import UsdGeom, UsdPhysics from omni.isaac.orbit.sim.converters import MeshConverter, MeshConverterCfg from omni.isaac.orbit.sim.schemas import schemas_cfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR, retrieve_file_path class TestMeshConverter(unittest.TestCase): """Test fixture for the MeshConverter class.""" @classmethod def setUpClass(cls): """Load assets for tests.""" assets_dir = f"{ISAAC_ORBIT_NUCLEUS_DIR}/Tests/MeshConverter/duck" # Create mapping of file endings to file paths that can be used by tests cls.assets = { "obj": f"{assets_dir}/duck.obj", "stl": f"{assets_dir}/duck.stl", "fbx": f"{assets_dir}/duck.fbx", "mtl": f"{assets_dir}/duck.mtl", "png": f"{assets_dir}/duckCM.png", } # Download all these locally download_dir = tempfile.mkdtemp(suffix="_mesh_converter_test_assets") for key, value in cls.assets.items(): cls.assets[key] = retrieve_file_path(value, download_dir=download_dir) def setUp(self): """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.01 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() # cleanup stage and context self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Test fixtures. """ def test_no_change(self): """Call conversion twice on the same input asset. This should not generate a new USD file if the hash is the same.""" # create an initial USD file from asset mesh_config = MeshConverterCfg(asset_path=self.assets["obj"]) mesh_converter = MeshConverter(mesh_config) time_usd_file_created = os.stat(mesh_converter.usd_path).st_mtime_ns # no change to config only define the usd directory new_config = mesh_config new_config.usd_dir = mesh_converter.usd_dir # convert to usd but this time in the same directory as previous step new_mesh_converter = MeshConverter(new_config) new_time_usd_file_created = os.stat(new_mesh_converter.usd_path).st_mtime_ns self.assertEqual(time_usd_file_created, new_time_usd_file_created) def test_config_change(self): """Call conversion twice but change the config in the second call. This should generate a new USD file.""" # create an initial USD file from asset mesh_config = MeshConverterCfg(asset_path=self.assets["obj"]) mesh_converter = MeshConverter(mesh_config) time_usd_file_created = os.stat(mesh_converter.usd_path).st_mtime_ns # change the config new_config = mesh_config new_config.make_instanceable = not mesh_config.make_instanceable # define the usd directory new_config.usd_dir = mesh_converter.usd_dir # convert to usd but this time in the same directory as previous step new_mesh_converter = MeshConverter(new_config) new_time_usd_file_created = os.stat(new_mesh_converter.usd_path).st_mtime_ns self.assertNotEqual(time_usd_file_created, new_time_usd_file_created) def test_convert_obj(self): """Convert an OBJ file""" mesh_config = MeshConverterCfg(asset_path=self.assets["obj"]) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_conversion(mesh_converter) def test_convert_stl(self): """Convert an STL file""" mesh_config = MeshConverterCfg(asset_path=self.assets["stl"]) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_conversion(mesh_converter) def test_convert_fbx(self): """Convert an FBX file""" mesh_config = MeshConverterCfg(asset_path=self.assets["fbx"]) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_conversion(mesh_converter) def test_collider_no_approximation(self): """Convert an OBJ file using no approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) mesh_config = MeshConverterCfg( asset_path=self.assets["obj"], collision_approximation="none", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_collider_settings(mesh_converter) def test_collider_convex_hull(self): """Convert an OBJ file using convex hull approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) mesh_config = MeshConverterCfg( asset_path=self.assets["obj"], collision_approximation="convexHull", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_collider_settings(mesh_converter) def test_collider_mesh_simplification(self): """Convert an OBJ file using mesh simplification approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) mesh_config = MeshConverterCfg( asset_path=self.assets["obj"], collision_approximation="meshSimplification", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_collider_settings(mesh_converter) def test_collider_mesh_bounding_cube(self): """Convert an OBJ file using bounding cube approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) mesh_config = MeshConverterCfg( asset_path=self.assets["obj"], collision_approximation="boundingCube", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_collider_settings(mesh_converter) def test_collider_mesh_bounding_sphere(self): """Convert an OBJ file using bounding sphere""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) mesh_config = MeshConverterCfg( asset_path=self.assets["obj"], collision_approximation="boundingSphere", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_collider_settings(mesh_converter) def test_collider_mesh_no_collision(self): """Convert an OBJ file using bounding sphere with collision disabled""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=False) mesh_config = MeshConverterCfg( asset_path=self.assets["obj"], collision_approximation="boundingSphere", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) # check that mesh conversion is successful self._check_mesh_collider_settings(mesh_converter) """ Helper functions. """ def _check_mesh_conversion(self, mesh_converter: MeshConverter): """Check that mesh is loadable and stage is valid.""" # Load the mesh prim_path = "/World/Object" prim_utils.create_prim(prim_path, usd_path=mesh_converter.usd_path) # Check prim can be properly spawned self.assertTrue(prim_utils.is_prim_path_valid(prim_path)) # Load a second time prim_path = "/World/Object2" prim_utils.create_prim(prim_path, usd_path=mesh_converter.usd_path) # Check prim can be properly spawned self.assertTrue(prim_utils.is_prim_path_valid(prim_path)) stage = omni.usd.get_context().get_stage() # Check axis is z-up axis = UsdGeom.GetStageUpAxis(stage) self.assertEqual(axis, "Z") # Check units is meters units = UsdGeom.GetStageMetersPerUnit(stage) self.assertEqual(units, 1.0) def _check_mesh_collider_settings(self, mesh_converter: MeshConverter): # Check prim can be properly spawned prim_path = "/World/Object" prim_utils.create_prim(prim_path, usd_path=mesh_converter.usd_path) self.assertTrue(prim_utils.is_prim_path_valid(prim_path)) # Make uninstanceable to check collision settings geom_prim = prim_utils.get_prim_at_path(prim_path + "/geometry") # Check that instancing worked! self.assertEqual(geom_prim.IsInstanceable(), mesh_converter.cfg.make_instanceable) # Obtain mesh settings geom_prim.SetInstanceable(False) mesh_prim = prim_utils.get_prim_at_path(prim_path + "/geometry/mesh") # Check collision settings # -- if collision is enabled, check that API is present exp_collision_enabled = ( mesh_converter.cfg.collision_props is not None and mesh_converter.cfg.collision_props.collision_enabled ) collision_api = UsdPhysics.CollisionAPI(mesh_prim) collision_enabled = collision_api.GetCollisionEnabledAttr().Get() self.assertEqual(collision_enabled, exp_collision_enabled, "Collision enabled is not the same!") # -- if collision is enabled, check that collision approximation is correct if exp_collision_enabled: exp_collision_approximation = mesh_converter.cfg.collision_approximation mesh_collision_api = UsdPhysics.MeshCollisionAPI(mesh_prim) collision_approximation = mesh_collision_api.GetApproximationAttr().Get() self.assertEqual( collision_approximation, exp_collision_approximation, "Collision approximation is not the same!" ) if __name__ == "__main__": run_tests()
10,939
Python
39.973783
125
0.664503
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_spawn_shapes.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext import omni.isaac.orbit.sim as sim_utils class TestSpawningUsdGeometries(unittest.TestCase): """Test fixture for checking spawning of USDGeom prim with different settings.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.1 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") # Wait for spawning stage_utils.update_stage() def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Basic spawning. """ def test_spawn_cone(self): """Test spawning of UsdGeom.Cone prim.""" # Spawn cone cfg = sim_utils.ConeCfg(radius=1.0, height=2.0, axis="Y") prim = cfg.func("/World/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cone")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") # Check properties prim = prim_utils.get_prim_at_path("/World/Cone/geometry/mesh") self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Cone") self.assertEqual(prim.GetAttribute("radius").Get(), cfg.radius) self.assertEqual(prim.GetAttribute("height").Get(), cfg.height) self.assertEqual(prim.GetAttribute("axis").Get(), cfg.axis) def test_spawn_capsule(self): """Test spawning of UsdGeom.Capsule prim.""" # Spawn capsule cfg = sim_utils.CapsuleCfg(radius=1.0, height=2.0, axis="Y") prim = cfg.func("/World/Capsule", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Capsule")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") # Check properties prim = prim_utils.get_prim_at_path("/World/Capsule/geometry/mesh") self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Capsule") self.assertEqual(prim.GetAttribute("radius").Get(), cfg.radius) self.assertEqual(prim.GetAttribute("height").Get(), cfg.height) self.assertEqual(prim.GetAttribute("axis").Get(), cfg.axis) def test_spawn_cylinder(self): """Test spawning of UsdGeom.Cylinder prim.""" # Spawn cylinder cfg = sim_utils.CylinderCfg(radius=1.0, height=2.0, axis="Y") prim = cfg.func("/World/Cylinder", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cylinder")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") # Check properties prim = prim_utils.get_prim_at_path("/World/Cylinder/geometry/mesh") self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Cylinder") self.assertEqual(prim.GetAttribute("radius").Get(), cfg.radius) self.assertEqual(prim.GetAttribute("height").Get(), cfg.height) self.assertEqual(prim.GetAttribute("axis").Get(), cfg.axis) def test_spawn_cuboid(self): """Test spawning of UsdGeom.Cube prim.""" # Spawn cuboid cfg = sim_utils.CuboidCfg(size=(1.0, 2.0, 3.0)) prim = cfg.func("/World/Cube", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cube")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") # Check properties prim = prim_utils.get_prim_at_path("/World/Cube/geometry/mesh") self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Cube") self.assertEqual(prim.GetAttribute("size").Get(), min(cfg.size)) def test_spawn_sphere(self): """Test spawning of UsdGeom.Sphere prim.""" # Spawn sphere cfg = sim_utils.SphereCfg(radius=1.0) prim = cfg.func("/World/Sphere", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Sphere")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform") # Check properties prim = prim_utils.get_prim_at_path("/World/Sphere/geometry/mesh") self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Sphere") self.assertEqual(prim.GetAttribute("radius").Get(), cfg.radius) """ Physics properties. """ def test_spawn_cone_with_rigid_props(self): """Test spawning of UsdGeom.Cone prim with rigid body API. Note: Playing the simulation in this case will give a warning that no mass is specified! Need to also setup mass and colliders. """ # Spawn cone cfg = sim_utils.ConeCfg( radius=1.0, height=2.0, rigid_props=sim_utils.RigidBodyPropertiesCfg( rigid_body_enabled=True, solver_position_iteration_count=8, sleep_threshold=0.1 ), ) prim = cfg.func("/World/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cone")) # Check properties prim = prim_utils.get_prim_at_path("/World/Cone") self.assertEqual(prim.GetAttribute("physics:rigidBodyEnabled").Get(), cfg.rigid_props.rigid_body_enabled) self.assertEqual( prim.GetAttribute("physxRigidBody:solverPositionIterationCount").Get(), cfg.rigid_props.solver_position_iteration_count, ) self.assertAlmostEqual( prim.GetAttribute("physxRigidBody:sleepThreshold").Get(), cfg.rigid_props.sleep_threshold ) def test_spawn_cone_with_rigid_and_mass_props(self): """Test spawning of UsdGeom.Cone prim with rigid body and mass API.""" # Spawn cone cfg = sim_utils.ConeCfg( radius=1.0, height=2.0, rigid_props=sim_utils.RigidBodyPropertiesCfg( rigid_body_enabled=True, solver_position_iteration_count=8, sleep_threshold=0.1 ), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), ) prim = cfg.func("/World/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cone")) # Check properties prim = prim_utils.get_prim_at_path("/World/Cone") self.assertEqual(prim.GetAttribute("physics:mass").Get(), cfg.mass_props.mass) # check sim playing self.sim.play() for _ in range(10): self.sim.step() def test_spawn_cone_with_rigid_and_density_props(self): """Test spawning of UsdGeom.Cone prim with rigid body and mass API. Note: In this case, we specify the density instead of the mass. In that case, physics need to know the collision shape to compute the mass. Thus, we have to set the collider properties. In order to not have a collision shape, we disable the collision. """ # Spawn cone cfg = sim_utils.ConeCfg( radius=1.0, height=2.0, rigid_props=sim_utils.RigidBodyPropertiesCfg( rigid_body_enabled=True, solver_position_iteration_count=8, sleep_threshold=0.1 ), mass_props=sim_utils.MassPropertiesCfg(density=10.0), collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=False), ) prim = cfg.func("/World/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cone")) # Check properties prim = prim_utils.get_prim_at_path("/World/Cone") self.assertEqual(prim.GetAttribute("physics:density").Get(), cfg.mass_props.density) # check sim playing self.sim.play() for _ in range(10): self.sim.step() def test_spawn_cone_with_all_props(self): """Test spawning of UsdGeom.Cone prim with all properties.""" # Spawn cone cfg = sim_utils.ConeCfg( radius=1.0, height=2.0, mass_props=sim_utils.MassPropertiesCfg(mass=5.0), rigid_props=sim_utils.RigidBodyPropertiesCfg(), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), physics_material=sim_utils.materials.RigidBodyMaterialCfg(), ) prim = cfg.func("/World/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cone")) self.assertTrue(prim_utils.is_prim_path_valid("/World/Cone/geometry/material")) # Check properties # -- rigid body prim = prim_utils.get_prim_at_path("/World/Cone") self.assertEqual(prim.GetAttribute("physics:rigidBodyEnabled").Get(), True) # -- collision shape prim = prim_utils.get_prim_at_path("/World/Cone/geometry/mesh") self.assertEqual(prim.GetAttribute("physics:collisionEnabled").Get(), True) # check sim playing self.sim.play() for _ in range(10): self.sim.step() """ Cloning. """ def test_spawn_cone_clones_invalid_paths(self): """Test spawning of cone clones on invalid cloning paths.""" num_clones = 10 for i in range(num_clones): prim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) # Spawn cone cfg = sim_utils.ConeCfg(radius=1.0, height=2.0, copy_from_source=True) # Should raise error for invalid path with self.assertRaises(RuntimeError): cfg.func("/World/env/env_.*/Cone", cfg) def test_spawn_cone_clones(self): """Test spawning of cone clones.""" num_clones = 10 for i in range(num_clones): prim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) # Spawn cone cfg = sim_utils.ConeCfg(radius=1.0, height=2.0, copy_from_source=True) prim = cfg.func("/World/env_.*/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertEqual(prim_utils.get_prim_path(prim), "/World/env_0/Cone") # Find matching prims prims = prim_utils.find_matching_prim_paths("/World/env_*/Cone") self.assertEqual(len(prims), num_clones) def test_spawn_cone_clone_with_all_props_global_material(self): """Test spawning of cone clones with global material reference.""" num_clones = 10 for i in range(num_clones): prim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) # Spawn cone cfg = sim_utils.ConeCfg( radius=1.0, height=2.0, mass_props=sim_utils.MassPropertiesCfg(mass=5.0), rigid_props=sim_utils.RigidBodyPropertiesCfg(), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), physics_material=sim_utils.materials.RigidBodyMaterialCfg(), visual_material_path="/Looks/visualMaterial", physics_material_path="/Looks/physicsMaterial", ) prim = cfg.func("/World/env_.*/Cone", cfg) # Check validity self.assertTrue(prim.IsValid()) self.assertEqual(prim_utils.get_prim_path(prim), "/World/env_0/Cone") # Find matching prims prims = prim_utils.find_matching_prim_paths("/World/env_*/Cone") self.assertEqual(len(prims), num_clones) # Find global materials prims = prim_utils.find_matching_prim_paths("/Looks/visualMaterial.*") self.assertEqual(len(prims), 1) if __name__ == "__main__": run_tests()
12,731
Python
40.472313
113
0.623203
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/sim/test_spawn_sensors.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.sim.spawners.sensors.sensors import ( CUSTOM_FISHEYE_CAMERA_ATTRIBUTES, CUSTOM_PINHOLE_CAMERA_ATTRIBUTES, ) from omni.isaac.orbit.utils.string import to_camel_case class TestSpawningSensors(unittest.TestCase): """Test fixture for checking spawning of USD sensors with different settings.""" def setUp(self) -> None: """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.1 # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") # Wait for spawning stage_utils.update_stage() def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() self.sim.clear() self.sim.clear_all_callbacks() self.sim.clear_instance() """ Basic spawning. """ def test_spawn_pinhole_camera(self): """Test spawning a pinhole camera.""" cfg = sim_utils.PinholeCameraCfg( focal_length=5.0, f_stop=10.0, clipping_range=(0.1, 1000.0), horizontal_aperture=10.0 ) prim = cfg.func("/World/pinhole_camera", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/pinhole_camera")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Camera") # validate properties on the prim self._validate_properties_on_prim("/World/pinhole_camera", cfg, CUSTOM_PINHOLE_CAMERA_ATTRIBUTES) def test_spawn_fisheye_camera(self): """Test spawning a fisheye camera.""" cfg = sim_utils.FisheyeCameraCfg( projection_type="fisheye_equidistant", focal_length=5.0, f_stop=10.0, clipping_range=(0.1, 1000.0), horizontal_aperture=10.0, ) # FIXME: This throws a warning. Check with Replicator team if this is expected/known. # [omni.hydra] Camera '/World/fisheye_camera': Unknown projection type, defaulting to pinhole prim = cfg.func("/World/fisheye_camera", cfg) # check if the light is spawned self.assertTrue(prim.IsValid()) self.assertTrue(prim_utils.is_prim_path_valid("/World/fisheye_camera")) self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Camera") # validate properties on the prim self._validate_properties_on_prim("/World/fisheye_camera", cfg, CUSTOM_FISHEYE_CAMERA_ATTRIBUTES) """ Helper functions. """ def _validate_properties_on_prim(self, prim_path: str, cfg: object, custom_attr: dict): """Validate the properties on the prim. Args: prim_path: The prim name. cfg: The configuration object. custom_attr: The custom attributes for sensor. """ # delete custom attributes in the config that are not USD parameters non_usd_cfg_param_names = ["func", "copy_from_source", "lock_camera", "visible", "semantic_tags"] # get prim prim = prim_utils.get_prim_at_path(prim_path) for attr_name, attr_value in cfg.__dict__.items(): # skip names we know are not present if attr_name in non_usd_cfg_param_names or attr_value is None: continue # obtain prim property name if attr_name in custom_attr: # check custom attributes prim_prop_name = custom_attr[attr_name][0] else: # convert attribute name in prim to cfg name prim_prop_name = to_camel_case(attr_name, to="cC") # validate the values self.assertAlmostEqual(prim.GetAttribute(prim_prop_name).Get(), attr_value, places=5) if __name__ == "__main__": run_tests()
4,516
Python
35.427419
105
0.636847
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/app/test_env_var_launch.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import os import unittest from omni.isaac.orbit.app import AppLauncher, run_tests class TestAppLauncher(unittest.TestCase): """Test launching of the simulation app using AppLauncher.""" def test_livestream_launch_with_env_var(self): """Test launching with no-keyword args but environment variables.""" # manually set the settings as well to make sure they are set correctly os.environ["LIVESTREAM"] = "1" # everything defaults to None app = AppLauncher().app # import settings import carb # acquire settings interface carb_settings_iface = carb.settings.get_settings() # check settings # -- no-gui mode self.assertEqual(carb_settings_iface.get("/app/window/enabled"), False) # -- livestream self.assertEqual(carb_settings_iface.get("/app/livestream/enabled"), True) # close the app on exit app.close() if __name__ == "__main__": run_tests()
1,149
Python
27.04878
82
0.657093
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/app/test_argparser_launch.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import argparse import unittest from unittest import mock from omni.isaac.orbit.app import AppLauncher, run_tests class TestAppLauncher(unittest.TestCase): """Test launching of the simulation app using AppLauncher.""" @mock.patch("argparse.ArgumentParser.parse_args", return_value=argparse.Namespace(livestream=1)) def test_livestream_launch_with_argparser(self, mock_args): """Test launching with argparser arguments.""" # create argparser parser = argparse.ArgumentParser() # add app launcher arguments AppLauncher.add_app_launcher_args(parser) # check that argparser has the mandatory arguments for name in AppLauncher._APPLAUNCHER_CFG_INFO: self.assertTrue(parser._option_string_actions[f"--{name}"]) # parse args mock_args = parser.parse_args() # everything defaults to None app = AppLauncher(mock_args).app # import settings import carb # acquire settings interface carb_settings_iface = carb.settings.get_settings() # check settings # -- no-gui mode self.assertEqual(carb_settings_iface.get("/app/window/enabled"), False) # -- livestream self.assertEqual(carb_settings_iface.get("/app/livestream/enabled"), True) # close the app on exit app.close() if __name__ == "__main__": run_tests()
1,567
Python
30.359999
100
0.665603
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/app/test_kwarg_launch.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import unittest from omni.isaac.orbit.app import AppLauncher, run_tests class TestAppLauncher(unittest.TestCase): """Test launching of the simulation app using AppLauncher.""" def test_livestream_launch_with_kwarg(self): """Test launching with headless and livestreaming arguments.""" # everything defaults to None app = AppLauncher(headless=True, livestream=1).app # import settings import carb # acquire settings interface carb_settings_iface = carb.settings.get_settings() # check settings # -- no-gui mode self.assertEqual(carb_settings_iface.get("/app/window/enabled"), False) # -- livestream self.assertEqual(carb_settings_iface.get("/app/livestream/enabled"), True) # close the app on exit app.close() if __name__ == "__main__": run_tests()
1,040
Python
26.394736
82
0.6625
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/scene/check_interactive_scene.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the scene interface to quickly setup a scene with multiple articulated robots and sensors. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the scene interface.") parser.add_argument("--headless", action="store_true", default=False, help="Force display off at all times.") parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBaseCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors.ray_caster import RayCasterCfg, patterns from omni.isaac.orbit.sim import SimulationContext from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.timer import Timer ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration.""" # terrain - flat terrain plane terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="plane", ) # articulation - robot 1 robot_1 = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot_1") # articulation - robot 2 robot_2 = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot_2") robot_2.init_state.pos = (0.0, 1.0, 0.6) # sensor - ray caster attached to the base of robot 1 that scans the ground height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot_1/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=True, mesh_prim_paths=["/World/ground"], ) # extras - light light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, 500.0)), ) def main(): """Main function.""" # Load kit helper sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) # Set main camera sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0]) # Spawn things into stage with Timer("Setup scene"): scene = InteractiveScene(MySceneCfg(num_envs=args_cli.num_envs, env_spacing=5.0, lazy_sensor_update=False)) # Check that parsing happened as expected assert len(scene.env_prim_paths) == args_cli.num_envs, "Number of environments does not match." assert scene.terrain is not None, "Terrain not found." assert len(scene.articulations) == 2, "Number of robots does not match." assert len(scene.sensors) == 1, "Number of sensors does not match." assert len(scene.extras) == 1, "Number of extras does not match." # Play the simulator with Timer("Time taken to play the simulator"): sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # default joint targets robot_1_actions = scene.articulations["robot_1"].data.default_joint_pos.clone() robot_2_actions = scene.articulations["robot_2"].data.default_joint_pos.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if sim.is_stopped(): break # If simulation is paused, then skip. if not sim.is_playing(): sim.step() continue # reset if count % 50 == 0: # reset counters sim_time = 0.0 count = 0 # reset root state root_state = scene.articulations["robot_1"].data.default_root_state.clone() root_state[:, :3] += scene.env_origins joint_pos = scene.articulations["robot_1"].data.default_joint_pos joint_vel = scene.articulations["robot_1"].data.default_joint_vel # -- set root state # -- robot 1 scene.articulations["robot_1"].write_root_state_to_sim(root_state) scene.articulations["robot_1"].write_joint_state_to_sim(joint_pos, joint_vel) # -- robot 2 root_state[:, 1] += 1.0 scene.articulations["robot_2"].write_root_state_to_sim(root_state) scene.articulations["robot_2"].write_joint_state_to_sim(joint_pos, joint_vel) # reset buffers scene.reset() print(">>>>>>>> Reset!") # perform this loop at policy control freq (50 Hz) for _ in range(4): # set joint targets scene.articulations["robot_1"].set_joint_position_target(robot_1_actions) scene.articulations["robot_2"].set_joint_position_target(robot_2_actions) # write data to sim scene.write_data_to_sim() # perform step sim.step() # read data from sim scene.update(sim_dt) # update sim-time sim_time += sim_dt * 4 count += 1 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,794
Python
34.121212
115
0.643942
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/scene/test_interactive_scene.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app simulation_app = AppLauncher(headless=True).app """Rest everything follows.""" import unittest import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors import ContactSensorCfg from omni.isaac.orbit.sim import build_simulation_context from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR @configclass class MySceneCfg(InteractiveSceneCfg): """Example scene configuration.""" # terrain - flat terrain plane terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="plane", ) # articulation robot = ArticulationCfg( prim_path="/World/Robot", spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Simple/revolute_articulation.usd"), actuators={ "joint": ImplicitActuatorCfg(), }, ) # rigid object rigid_obj = RigidObjectCfg( prim_path="/World/RigidObj", spawn=sim_utils.CuboidCfg( size=(0.5, 0.5, 0.5), rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, ), collision_props=sim_utils.CollisionPropertiesCfg( collision_enabled=True, ), ), ) # sensor sensor = ContactSensorCfg( prim_path="/World/Robot", ) # extras - light light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(), ) class TestInteractiveScene(unittest.TestCase): """Test cases for InteractiveScene.""" def setUp(self) -> None: self.devices = ["cuda:0", "cpu"] self.sim_dt = 0.001 self.scene_cfg = MySceneCfg(num_envs=1, env_spacing=1) def test_scene_entity_isolation(self): """Tests that multiple instances of InteractiveScene does not share any data. In this test, two InteractiveScene instances are created in a loop and added to a list. The scene at index 0 of the list will have all of its entities cleared manually, and the test compares that the data held in the scene at index 1 remained intact. """ for device in self.devices: scene_list = [] # create two InteractiveScene instances for _ in range(2): with build_simulation_context(device=device, dt=self.sim_dt) as _: scene = InteractiveScene(MySceneCfg(num_envs=1, env_spacing=1)) scene_list.append(scene) scene_0 = scene_list[0] scene_1 = scene_list[1] # clear entities for scene_0 - this should not affect any data in scene_1 scene_0.articulations.clear() scene_0.rigid_objects.clear() scene_0.sensors.clear() scene_0.extras.clear() # check that scene_0 and scene_1 do not share entity data via dictionary comparison self.assertEqual(scene_0.articulations, dict()) self.assertNotEqual(scene_0.articulations, scene_1.articulations) self.assertEqual(scene_0.rigid_objects, dict()) self.assertNotEqual(scene_0.rigid_objects, scene_1.rigid_objects) self.assertEqual(scene_0.sensors, dict()) self.assertNotEqual(scene_0.sensors, scene_1.sensors) self.assertEqual(scene_0.extras, dict()) self.assertNotEqual(scene_0.extras, scene_1.extras) if __name__ == "__main__": run_tests()
4,032
Python
34.069565
108
0.650546
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_terrain_generator.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app # note: we only need to do this because of `TerrainImporter` which uses Omniverse functions app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import os import shutil from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG from omni.isaac.orbit.terrains.terrain_generator import TerrainGenerator def main(): # Create directory to dump results test_dir = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(test_dir, "output", "generator") # remove directory if os.path.exists(output_dir): shutil.rmtree(output_dir) # create directory os.makedirs(output_dir, exist_ok=True) # modify the config to cache ROUGH_TERRAINS_CFG.use_cache = True ROUGH_TERRAINS_CFG.cache_dir = output_dir ROUGH_TERRAINS_CFG.curriculum = False # generate terrains terrain_generator = TerrainGenerator(cfg=ROUGH_TERRAINS_CFG) # noqa: F841 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
1,349
Python
27.124999
91
0.716827
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_mesh_subterrains.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse parser = argparse.ArgumentParser(description="Generate terrains using trimesh") parser.add_argument( "--headless", action="store_true", default=False, help="Don't create a window to display each output." ) args_cli = parser.parse_args() from omni.isaac.orbit.app import AppLauncher # launch omniverse app # note: we only need to do this because of `TerrainImporter` which uses Omniverse functions app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import argparse import os import trimesh import omni.isaac.orbit.terrains.trimesh as mesh_gen from omni.isaac.orbit.terrains.utils import color_meshes_by_height def test_flat_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshPlaneTerrainCfg(size=(8.0, 8.0)) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "flat_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Flat Terrain") def test_pyramid_stairs_terrain(difficulty: float, holes: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshPyramidStairsTerrainCfg( size=(8.0, 8.0), border_width=0.2, step_width=0.3, step_height_range=(0.05, 0.23), platform_width=1.5, holes=holes, ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if holes: caption = "Pyramid Stairs Terrain with Holes" filename = "pyramid_stairs_terrain_with_holes.jpg" else: caption = "Pyramid Stairs Terrain" filename = "pyramid_stairs_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_inverted_pyramid_stairs_terrain(difficulty: float, holes: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshInvertedPyramidStairsTerrainCfg( size=(8.0, 8.0), border_width=0.2, step_width=0.3, step_height_range=(0.05, 0.23), platform_width=1.5, holes=holes, ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if holes: caption = "Inverted Pyramid Stairs Terrain with Holes" filename = "inverted_pyramid_stairs_terrain_with_holes.jpg" else: caption = "Inverted Pyramid Stairs Terrain" filename = "inverted_pyramid_stairs_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_random_grid_terrain(difficulty: float, holes: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshRandomGridTerrainCfg( size=(8.0, 8.0), platform_width=1.5, grid_width=0.75, grid_height_range=(0.025, 0.2), holes=holes, ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if holes: caption = "Random Grid Terrain with Holes" filename = "random_grid_terrain_with_holes.jpg" else: caption = "Random Grid Terrain" filename = "random_grid_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_rails_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshRailsTerrainCfg( size=(8.0, 8.0), platform_width=1.5, rail_thickness_range=(0.05, 0.1), rail_height_range=(0.05, 0.3), ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "rails_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Rail Terrain") def test_pit_terrain(difficulty: float, double_pit: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshPitTerrainCfg( size=(8.0, 8.0), platform_width=1.5, pit_depth_range=(0.05, 1.1), double_pit=double_pit ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if double_pit: caption = "Pit Terrain with Two Levels" filename = "pit_terrain_with_two_levels.jpg" else: caption = "Pit Terrain" filename = "pit_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_box_terrain(difficulty: float, double_box: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshBoxTerrainCfg( size=(8.0, 8.0), platform_width=1.5, box_height_range=(0.05, 0.2), double_box=double_box, ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if double_box: caption = "Box Terrain with Two Levels" filename = "box_terrain_with_two_boxes.jpg" else: caption = "Box Terrain" filename = "box_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_gap_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshGapTerrainCfg( size=(8.0, 8.0), platform_width=1.5, gap_width_range=(0.05, 1.1), ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "gap_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Gap Terrain") def test_floating_ring_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshFloatingRingTerrainCfg( size=(8.0, 8.0), platform_width=1.5, ring_height_range=(0.4, 1.0), ring_width_range=(0.5, 1.0), ring_thickness=0.05, ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "floating_ring_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Floating Ring Terrain") def test_star_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = mesh_gen.MeshStarTerrainCfg( size=(8.0, 8.0), platform_width=1.5, num_bars=5, bar_width_range=(0.5, 1.0), bar_height_range=(0.05, 0.2), ) # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "star_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Star Terrain") def test_repeated_objects_terrain( difficulty: float, object_type: str, output_dir: str, headless: bool, provide_as_string: bool = False ): # parameters for the terrain if object_type == "pyramid": cfg = mesh_gen.MeshRepeatedPyramidsTerrainCfg( size=(8.0, 8.0), platform_width=1.5, max_height_noise=0.5, object_params_start=mesh_gen.MeshRepeatedPyramidsTerrainCfg.ObjectCfg( num_objects=40, height=0.05, radius=0.6, max_yx_angle=0.0, degrees=True ), object_params_end=mesh_gen.MeshRepeatedPyramidsTerrainCfg.ObjectCfg( num_objects=80, height=0.15, radius=0.6, max_yx_angle=60.0, degrees=True ), ) elif object_type == "box": cfg = mesh_gen.MeshRepeatedBoxesTerrainCfg( size=(8.0, 8.0), platform_width=1.5, max_height_noise=0.5, object_params_start=mesh_gen.MeshRepeatedBoxesTerrainCfg.ObjectCfg( num_objects=40, height=0.05, size=(0.6, 0.6), max_yx_angle=0.0, degrees=True ), object_params_end=mesh_gen.MeshRepeatedBoxesTerrainCfg.ObjectCfg( num_objects=80, height=0.15, size=(0.6, 0.6), max_yx_angle=60.0, degrees=True ), ) elif object_type == "cylinder": cfg = mesh_gen.MeshRepeatedCylindersTerrainCfg( size=(8.0, 8.0), platform_width=1.5, max_height_noise=0.5, object_params_start=mesh_gen.MeshRepeatedCylindersTerrainCfg.ObjectCfg( num_objects=40, height=0.05, radius=0.6, max_yx_angle=0.0, degrees=True ), object_params_end=mesh_gen.MeshRepeatedCylindersTerrainCfg.ObjectCfg( num_objects=80, height=0.15, radius=0.6, max_yx_angle=60.0, degrees=True ), ) else: raise ValueError(f"Invalid object type for repeated objects terrain: {object_type}") # provide object_type as string (check that the import works) if provide_as_string: cfg.object_type = object_type # generate the terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, f"repeated_objects_{object_type}_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=f"Repeated Objects Terrain: {object_type}") def main(): # Create directory to dump results test_dir = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(test_dir, "output", "terrains", "trimesh") if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) # Read headless mode headless = args_cli.headless # generate terrains test_flat_terrain(difficulty=0.0, output_dir=output_dir, headless=headless) test_pyramid_stairs_terrain(difficulty=0.75, holes=False, output_dir=output_dir, headless=headless) test_pyramid_stairs_terrain(difficulty=0.75, holes=True, output_dir=output_dir, headless=headless) test_inverted_pyramid_stairs_terrain(difficulty=0.75, holes=False, output_dir=output_dir, headless=headless) test_inverted_pyramid_stairs_terrain(difficulty=0.75, holes=True, output_dir=output_dir, headless=headless) test_random_grid_terrain(difficulty=0.75, holes=False, output_dir=output_dir, headless=headless) test_random_grid_terrain(difficulty=0.75, holes=True, output_dir=output_dir, headless=headless) test_star_terrain(difficulty=0.75, output_dir=output_dir, headless=headless) test_repeated_objects_terrain(difficulty=0.75, object_type="pyramid", output_dir=output_dir, headless=headless) test_repeated_objects_terrain(difficulty=0.75, object_type="cylinder", output_dir=output_dir, headless=headless) test_repeated_objects_terrain(difficulty=0.75, object_type="box", output_dir=output_dir, headless=headless) test_repeated_objects_terrain( difficulty=0.75, object_type="cylinder", provide_as_string=True, output_dir=output_dir, headless=headless ) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
17,826
Python
40.076037
116
0.674296
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use the terrain generator from the Orbit framework. The terrains are generated using the :class:`TerrainGenerator` class and imported using the :class:`TerrainImporter` class. The terrains can be imported from a file or generated procedurally. Example usage: .. code-block:: bash # generate terrain # -- use physics sphere mesh ./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type generator # -- usd usd sphere geom ./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type generator --geom_sphere # usd terrain ./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type usd # plane terrain ./orbit.sh -p source/extensions/omni.isaac.orbit/test/terrains/check_terrain_importer.py --terrain_type plane """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse # omni-isaac-orbit from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script shows how to use the terrain importer.") parser.add_argument("--geom_sphere", action="store_true", default=False, help="Whether to use sphere mesh or shape.") parser.add_argument( "--terrain_type", type=str, choices=["generator", "usd", "plane"], default="generator", help="Type of terrain to import. Can be 'generator' or 'usd' or 'plane'.", ) parser.add_argument( "--color_scheme", type=str, default="height", choices=["height", "random", "none"], help="The color scheme to use for the generated terrain.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import omni.isaac.core.utils.prims as prim_utils import omni.kit.commands from omni.isaac.cloner import GridCloner from omni.isaac.core.materials import PhysicsMaterial, PreviewSurface from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import GeometryPrim, RigidPrim, RigidPrimView from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.viewports import set_camera_view import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.terrains as terrain_gen from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG from omni.isaac.orbit.terrains.terrain_importer import TerrainImporter from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR def main(): """Generates a terrain from orbit.""" # Load kit helper sim_params = { "use_gpu": True, "use_gpu_pipeline": True, "use_flatcache": True, "use_fabric": True, "enable_scene_query_support": True, } sim = SimulationContext( physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0" ) # Set main camera set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5]) # Parameters num_balls = 2048 # Create interface to clone the scene cloner = GridCloner(spacing=2.0) cloner.define_base_env("/World/envs") # Everything under the namespace "/World/envs/env_0" will be cloned prim_utils.define_prim("/World/envs/env_0") # Handler for terrains importing terrain_importer_cfg = terrain_gen.TerrainImporterCfg( num_envs=2048, env_spacing=3.0, prim_path="/World/ground", max_init_terrain_level=None, terrain_type=args_cli.terrain_type, terrain_generator=ROUGH_TERRAINS_CFG.replace(curriculum=True, color_scheme=args_cli.color_scheme), usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd", ) terrain_importer = TerrainImporter(terrain_importer_cfg) # Define the scene # -- Light cfg = sim_utils.DistantLightCfg(intensity=1000.0) cfg.func("/World/Light", cfg) # -- Ball if args_cli.geom_sphere: # -- Ball physics _ = DynamicSphere( prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25 ) else: # -- Ball geometry cube_prim_path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")[1] prim_utils.move_prim(cube_prim_path, "/World/envs/env_0/ball") # -- Ball physics RigidPrim(prim_path="/World/envs/env_0/ball", mass=0.5, scale=(0.5, 0.5, 0.5), translation=(0.0, 0.0, 0.5)) GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True) # -- Ball material sphere_geom = GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True) visual_material = PreviewSurface(prim_path="/World/Looks/ballColorMaterial", color=np.asarray([0.0, 0.0, 1.0])) physics_material = PhysicsMaterial( prim_path="/World/Looks/ballPhysicsMaterial", dynamic_friction=1.0, static_friction=0.2, restitution=0.0, ) sphere_geom.set_collision_approximation("convexHull") sphere_geom.apply_visual_material(visual_material) sphere_geom.apply_physics_material(physics_material) # Clone the scene cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls) cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) physics_scene_path = sim.get_physics_context().prim_path cloner.filter_collisions( physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) # Set ball positions over terrain origins # Create a view over all the balls ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False) # cache initial state of the balls ball_initial_positions = terrain_importer.env_origins ball_initial_positions[:, 2] += 5.0 # set initial poses # note: setting here writes to USD :) ball_view.set_world_poses(positions=ball_initial_positions) # Play simulator sim.reset() # Initialize the ball views for physics simulation ball_view.initialize() ball_initial_velocities = ball_view.get_velocities() # Create a counter for resetting the scene step_count = 0 # Simulate physics while simulation_app.is_running(): # If simulation is stopped, then exit. if sim.is_stopped(): break # If simulation is paused, then skip. if not sim.is_playing(): sim.step() continue # Reset the scene if step_count % 500 == 0: # reset the balls ball_view.set_world_poses(positions=ball_initial_positions) ball_view.set_velocities(ball_initial_velocities) # reset the counter step_count = 0 # Step simulation sim.step() # Update counter step_count += 1 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,376
Python
34.637681
131
0.680857
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/terrains/check_height_field_subterrains.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse parser = argparse.ArgumentParser(description="Generate terrains using trimesh") parser.add_argument( "--headless", action="store_true", default=False, help="Don't create a window to display each output." ) args_cli = parser.parse_args() from omni.isaac.orbit.app import AppLauncher # launch omniverse app # note: we only need to do this because of `TerrainImporter` which uses Omniverse functions app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import os import trimesh import omni.isaac.orbit.terrains.height_field as hf_gen from omni.isaac.orbit.terrains.utils import color_meshes_by_height def test_random_uniform_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = hf_gen.HfRandomUniformTerrainCfg( size=(8.0, 8.0), horizontal_scale=0.1, vertical_scale=0.005, border_width=0.0, noise_range=(-0.05, 0.05), noise_step=0.005, downsampled_scale=0.2, ) # generate terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "random_uniform_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Random Uniform Terrain") def test_pyramid_sloped_terrain(difficulty: float, inverted: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = hf_gen.HfPyramidSlopedTerrainCfg( size=(8.0, 8.0), horizontal_scale=0.1, vertical_scale=0.005, border_width=0.0, slope_range=(0.0, 0.4), platform_width=1.5, inverted=inverted, ) # generate terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if inverted: caption = "Inverted Pyramid Sloped Terrain" filename = "inverted_pyramid_sloped_terrain.jpg" else: caption = "Pyramid Sloped Terrain" filename = "pyramid_sloped_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_pyramid_stairs_terrain(difficulty: float, inverted: bool, output_dir: str, headless: bool): # parameters for the terrain cfg = hf_gen.HfPyramidStairsTerrainCfg( size=(8.0, 8.0), horizontal_scale=0.1, vertical_scale=0.005, border_width=0.0, platform_width=1.5, step_width=0.301, step_height_range=(0.05, 0.23), inverted=inverted, ) # generate terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if inverted: caption = "Inverted Pyramid Stairs Terrain" filename = "inverted_pyramid_stairs_terrain.jpg" else: caption = "Pyramid Stairs Terrain" filename = "pyramid_stairs_terrain.jpg" # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_discrete_obstacles_terrain(difficulty: float, obstacle_height_mode: str, output_dir: str, headless: bool): # parameters for the terrain cfg = hf_gen.HfDiscreteObstaclesTerrainCfg( size=(8.0, 8.0), horizontal_scale=0.1, vertical_scale=0.005, border_width=0.0, num_obstacles=50, obstacle_height_mode=obstacle_height_mode, obstacle_width_range=(0.25, 0.75), obstacle_height_range=(1.0, 2.0), platform_width=1.5, ) # generate terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # resolve file name if obstacle_height_mode == "choice": caption = "Discrete Obstacles Terrain (Sampled Height)" filename = "discrete_obstacles_terrain_choice.jpg" elif obstacle_height_mode == "fixed": caption = "Discrete Obstacles Terrain (Fixed Height)" filename = "discrete_obstacles_terrain_fixed.jpg" else: raise ValueError(f"Unknown obstacle height mode: {obstacle_height_mode}") # write the image to a file with open(os.path.join(output_dir, filename), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption=caption) def test_wave_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = hf_gen.HfWaveTerrainCfg( size=(8.0, 8.0), horizontal_scale=0.1, vertical_scale=0.005, border_width=0.0, num_waves=5, amplitude_range=(0.5, 1.0), ) # generate terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "wave_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Wave Terrain") def test_stepping_stones_terrain(difficulty: float, output_dir: str, headless: bool): # parameters for the terrain cfg = hf_gen.HfSteppingStonesTerrainCfg( size=(8.0, 8.0), horizontal_scale=0.1, vertical_scale=0.005, platform_width=1.5, border_width=0.0, stone_width_range=(0.25, 1.575), stone_height_max=0.2, stone_distance_range=(0.05, 0.1), holes_depth=-2.0, ) # generate terrain meshes, origin = cfg.function(difficulty=difficulty, cfg=cfg) # add colors to the meshes based on the height colored_mesh = color_meshes_by_height(meshes) # add a marker for the origin origin_transform = trimesh.transformations.translation_matrix(origin) origin_marker = trimesh.creation.axis(origin_size=0.1, transform=origin_transform) # visualize the meshes scene = trimesh.Scene([colored_mesh, origin_marker]) # save the scene to a png file data = scene.save_image(resolution=(640, 480)) # write the image to a file with open(os.path.join(output_dir, "stepping_stones_terrain.jpg"), "wb") as f: f.write(data) # show the scene in a window if not headless: trimesh.viewer.SceneViewer(scene=scene, caption="Stepping Stones Terrain") def main(): # Create directory to dump results test_dir = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(test_dir, "output", "terrains", "height_field") if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) # Read headless mode headless = args_cli.headless # generate terrains test_random_uniform_terrain(difficulty=0.25, output_dir=output_dir, headless=headless) test_pyramid_sloped_terrain(difficulty=0.25, inverted=False, output_dir=output_dir, headless=headless) test_pyramid_sloped_terrain(difficulty=0.25, inverted=True, output_dir=output_dir, headless=headless) test_pyramid_stairs_terrain(difficulty=0.25, inverted=False, output_dir=output_dir, headless=headless) test_pyramid_stairs_terrain(difficulty=0.25, inverted=True, output_dir=output_dir, headless=headless) test_discrete_obstacles_terrain( difficulty=0.25, obstacle_height_mode="choice", output_dir=output_dir, headless=headless ) test_discrete_obstacles_terrain( difficulty=0.25, obstacle_height_mode="fixed", output_dir=output_dir, headless=headless ) test_wave_terrain(difficulty=0.25, output_dir=output_dir, headless=headless) test_stepping_stones_terrain(difficulty=1.0, output_dir=output_dir, headless=headless) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
10,545
Python
38.059259
115
0.681081
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/terrains/test_terrain_importer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import torch import unittest import omni.isaac.core.utils.prims as prim_utils from omni.isaac.cloner import GridCloner import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.terrains import TerrainImporter, TerrainImporterCfg class TestTerrainImporter(unittest.TestCase): """Test the terrain importer for different ground and procedural terrains.""" def test_grid_clone_env_origins(self): """Tests that env origins are consistent when computed using the TerrainImporter and IsaacSim GridCloner.""" # iterate over different number of environments and environment spacing for env_spacing in [1.0, 4.325, 8.0]: for num_envs in [1, 4, 125, 379, 1024]: with self.subTest(num_envs=num_envs, env_spacing=env_spacing): with sim_utils.build_simulation_context(auto_add_lighting=True) as sim: # create terrain importer terrain_importer_cfg = TerrainImporterCfg( num_envs=num_envs, env_spacing=env_spacing, prim_path="/World/ground", terrain_type="plane", # for flat ground, origins are in grid terrain_generator=None, ) terrain_importer = TerrainImporter(terrain_importer_cfg) # obtain env origins using terrain importer terrain_importer_origins = terrain_importer.env_origins # obtain env origins using grid cloner grid_cloner_origins = self.obtain_grid_cloner_env_origins( num_envs, env_spacing, device=sim.device ) # check if the env origins are the same torch.testing.assert_close(terrain_importer_origins, grid_cloner_origins, rtol=1e-5, atol=1e-5) """ Helper functions. """ @staticmethod def obtain_grid_cloner_env_origins(num_envs: int, env_spacing: float, device: str) -> torch.Tensor: """Obtain the env origins generated by IsaacSim GridCloner (grid_cloner.py).""" # create grid cloner cloner = GridCloner(spacing=env_spacing) cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) prim_utils.define_prim("/World/envs/env_0") # clone envs using grid cloner env_origins = cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True ) # return as tensor return torch.tensor(env_origins, dtype=torch.float32, device=device) if __name__ == "__main__": # run main runner = unittest.main(verbosity=2, exit=False) # close sim app simulation_app.close() # report success exit(not runner.result.wasSuccessful())
3,361
Python
38.552941
119
0.618566
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/utils/test_string.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations # NOTE: While we don't actually use the simulation app in this test, we still need to launch it # because warp is only available in the context of a running simulation """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import random import unittest import omni.isaac.orbit.utils.string as string_utils class TestStringUtilities(unittest.TestCase): """Test fixture for checking string utilities.""" def test_case_conversion(self): """Test case conversion between camel case and snake case.""" # test camel case to snake case self.assertEqual(string_utils.to_snake_case("CamelCase"), "camel_case") self.assertEqual(string_utils.to_snake_case("camelCase"), "camel_case") self.assertEqual(string_utils.to_snake_case("CamelCaseString"), "camel_case_string") # test snake case to camel case self.assertEqual(string_utils.to_camel_case("snake_case", to="CC"), "SnakeCase") self.assertEqual(string_utils.to_camel_case("snake_case_string", to="CC"), "SnakeCaseString") self.assertEqual(string_utils.to_camel_case("snake_case_string", to="cC"), "snakeCaseString") def test_resolve_matching_names_with_basic_strings(self): """Test resolving matching names with a basic expression.""" # list of strings target_names = ["a", "b", "c", "d", "e"] # test matching names query_names = ["a|c", "b"] index_list, names_list = string_utils.resolve_matching_names(query_names, target_names) self.assertEqual(index_list, [0, 1, 2]) self.assertEqual(names_list, ["a", "b", "c"]) # test matching names with regex query_names = ["a.*", "b"] index_list, names_list = string_utils.resolve_matching_names(query_names, target_names) self.assertEqual(index_list, [0, 1]) self.assertEqual(names_list, ["a", "b"]) # test duplicate names query_names = ["a|c", "b", "a|c"] with self.assertRaises(ValueError): _ = string_utils.resolve_matching_names(query_names, target_names) # test no regex match query_names = ["a|c", "b", "f"] with self.assertRaises(ValueError): _ = string_utils.resolve_matching_names(query_names, target_names) def test_resolve_matching_names_with_joint_name_strings(self): """Test resolving matching names with joint names.""" # list of strings robot_joint_names = [] for i in ["hip", "thigh", "calf"]: for j in ["FL", "FR", "RL", "RR"]: robot_joint_names.append(f"{j}_{i}_joint") # test matching names index_list, names_list = string_utils.resolve_matching_names(".*", robot_joint_names) self.assertEqual(index_list, list(range(len(robot_joint_names)))) self.assertEqual(names_list, robot_joint_names) # test matching names with regex index_list, names_list = string_utils.resolve_matching_names(".*_joint", robot_joint_names) self.assertEqual(index_list, list(range(len(robot_joint_names)))) self.assertEqual(names_list, robot_joint_names) # test matching names with regex index_list, names_list = string_utils.resolve_matching_names(["FL.*", "FR.*"], robot_joint_names) ground_truth_index_list = [0, 1, 4, 5, 8, 9] self.assertEqual(index_list, ground_truth_index_list) self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list]) # test matching names with regex query_list = [ "FL_hip_joint", "FL_thigh_joint", "FR_hip_joint", "FR_thigh_joint", "FL_calf_joint", "FR_calf_joint", ] index_list, names_list = string_utils.resolve_matching_names(query_list, robot_joint_names) ground_truth_index_list = [0, 1, 4, 5, 8, 9] self.assertNotEqual(names_list, query_list) self.assertEqual(index_list, ground_truth_index_list) self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list]) # test matching names with regex but shuffled # randomize order of previous query list random.shuffle(query_list) index_list, names_list = string_utils.resolve_matching_names(query_list, robot_joint_names) ground_truth_index_list = [0, 1, 4, 5, 8, 9] self.assertNotEqual(names_list, query_list) self.assertEqual(index_list, ground_truth_index_list) self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list]) def test_resolve_matching_names_with_preserved_order(self): # list of strings and query list robot_joint_names = [] for i in ["hip", "thigh", "calf"]: for j in ["FL", "FR", "RL", "RR"]: robot_joint_names.append(f"{j}_{i}_joint") query_list = [ "FL_hip_joint", "FL_thigh_joint", "FR_hip_joint", "FR_thigh_joint", "FL_calf_joint", "FR_calf_joint", ] # test return in target ordering with sublist query_list.reverse() index_list, names_list = string_utils.resolve_matching_names(query_list, robot_joint_names, preserve_order=True) ground_truth_index_list = [9, 8, 5, 1, 4, 0] self.assertEqual(names_list, query_list) self.assertEqual(index_list, ground_truth_index_list) # test return in target ordering with regex expression index_list, names_list = string_utils.resolve_matching_names( ["FR.*", "FL.*"], robot_joint_names, preserve_order=True ) ground_truth_index_list = [1, 5, 9, 0, 4, 8] self.assertEqual(index_list, ground_truth_index_list) self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list]) # test return in target ordering with a mix of regex and non-regex expression index_list, names_list = string_utils.resolve_matching_names( ["FR.*", "FL_calf_joint", "FL_thigh_joint", "FL_hip_joint"], robot_joint_names, preserve_order=True ) ground_truth_index_list = [1, 5, 9, 8, 4, 0] self.assertEqual(index_list, ground_truth_index_list) self.assertEqual(names_list, [robot_joint_names[i] for i in ground_truth_index_list]) def test_resolve_matching_names_values_with_basic_strings(self): """Test resolving matching names with a basic expression.""" # list of strings target_names = ["a", "b", "c", "d", "e"] # test matching names data = {"a|c": 1, "b": 2} index_list, names_list, values_list = string_utils.resolve_matching_names_values(data, target_names) self.assertEqual(index_list, [0, 1, 2]) self.assertEqual(names_list, ["a", "b", "c"]) self.assertEqual(values_list, [1, 2, 1]) # test matching names with regex data = {"a|d|e": 1, "b|c": 2} index_list, names_list, values_list = string_utils.resolve_matching_names_values(data, target_names) self.assertEqual(index_list, [0, 1, 2, 3, 4]) self.assertEqual(names_list, ["a", "b", "c", "d", "e"]) self.assertEqual(values_list, [1, 2, 2, 1, 1]) # test matching names with regex data = {"a|d|e|b": 1, "b|c": 2} with self.assertRaises(ValueError): _ = string_utils.resolve_matching_names_values(data, target_names) # test no regex match query_names = {"a|c": 1, "b": 0, "f": 2} with self.assertRaises(ValueError): _ = string_utils.resolve_matching_names_values(query_names, target_names) def test_resolve_matching_names_values_with_basic_strings_and_preserved_order(self): """Test resolving matching names with a basic expression.""" # list of strings target_names = ["a", "b", "c", "d", "e"] # test matching names data = {"a|c": 1, "b": 2} index_list, names_list, values_list = string_utils.resolve_matching_names_values( data, target_names, preserve_order=True ) self.assertEqual(index_list, [0, 2, 1]) self.assertEqual(names_list, ["a", "c", "b"]) self.assertEqual(values_list, [1, 1, 2]) # test matching names with regex data = {"a|d|e": 1, "b|c": 2} index_list, names_list, values_list = string_utils.resolve_matching_names_values( data, target_names, preserve_order=True ) self.assertEqual(index_list, [0, 3, 4, 1, 2]) self.assertEqual(names_list, ["a", "d", "e", "b", "c"]) self.assertEqual(values_list, [1, 1, 1, 2, 2]) # test matching names with regex data = {"a|d|e|b": 1, "b|c": 2} with self.assertRaises(ValueError): _ = string_utils.resolve_matching_names_values(data, target_names, preserve_order=True) # test no regex match query_names = {"a|c": 1, "b": 0, "f": 2} with self.assertRaises(ValueError): _ = string_utils.resolve_matching_names_values(query_names, target_names, preserve_order=True) if __name__ == "__main__": run_tests()
9,518
Python
47.075757
120
0.61536
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/utils/test_dict.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations # NOTE: While we don't actually use the simulation app in this test, we still need to launch it # because warp is only available in the context of a running simulation """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import unittest import omni.isaac.orbit.utils.dict as dict_utils def test_function(x): """Test function for string <-> callable conversion.""" return x**2 def test_lambda_function(x): """Test function for string <-> callable conversion.""" return x**2 class TestDictUtilities(unittest.TestCase): """Test fixture for checking dictionary utilities in Orbit.""" def test_print_dict(self): """Test printing of dictionary.""" # create a complex nested dictionary test_dict = { "a": 1, "b": 2, "c": {"d": 3, "e": 4, "f": {"g": 5, "h": 6}}, "i": 7, "j": lambda x: x**2, # noqa: E731 "k": dict_utils.class_to_dict, } # print the dictionary dict_utils.print_dict(test_dict) def test_string_callable_function_conversion(self): """Test string <-> callable conversion for function.""" # convert function to string test_string = dict_utils.callable_to_string(test_function) # convert string to function test_function_2 = dict_utils.string_to_callable(test_string) # check that functions are the same self.assertEqual(test_function(2), test_function_2(2)) def test_string_callable_function_with_lambda_in_name_conversion(self): """Test string <-> callable conversion for function which has lambda in its name.""" # convert function to string test_string = dict_utils.callable_to_string(test_lambda_function) # convert string to function test_function_2 = dict_utils.string_to_callable(test_string) # check that functions are the same self.assertEqual(test_function(2), test_function_2(2)) def test_string_callable_lambda_conversion(self): """Test string <-> callable conversion for lambda expression.""" # create lambda function func = lambda x: x**2 # noqa: E731 # convert function to string test_string = dict_utils.callable_to_string(func) # convert string to function func_2 = dict_utils.string_to_callable(test_string) # check that functions are the same self.assertEqual(func(2), func_2(2)) if __name__ == "__main__": run_tests()
2,847
Python
31.735632
95
0.643836
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/utils/test_math.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch import unittest from math import pi as PI """Launch Isaac Sim Simulator first. This is only needed because of warp dependency. """ from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app in headless mode simulation_app = AppLauncher(headless=True).app import omni.isaac.orbit.utils.math as math_utils class TestMathUtilities(unittest.TestCase): """Test fixture for checking math utilities in Orbit.""" def test_is_identity_pose(self): """Test is_identity_pose method.""" identity_pos_one_row = torch.zeros(3) identity_rot_one_row = torch.tensor((1.0, 0.0, 0.0, 0.0)) self.assertTrue(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row)) identity_pos_one_row[0] = 1.0 identity_rot_one_row[1] = 1.0 self.assertFalse(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row)) identity_pos_multi_row = torch.zeros(3, 3) identity_rot_multi_row = torch.zeros(3, 4) identity_rot_multi_row[:, 0] = 1.0 self.assertTrue(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row)) identity_pos_multi_row[0, 0] = 1.0 identity_rot_multi_row[0, 1] = 1.0 self.assertFalse(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row)) def test_axis_angle_from_quat(self): """Test axis_angle_from_quat method.""" # Quaternions of the form (2,4) and (2,2,4) quats = [ torch.Tensor([[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]]), torch.Tensor([ [[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]], [[1.0, 0.0, 0.0, 0.0], [0.9850375, 0.0995007, 0.0995007, 0.0995007]], ]), ] # Angles of the form (2,3) and (2,2,3) angles = [ torch.Tensor([[0.0, 0.0, 0.0], [0.3, 0.0, 1.1]]), torch.Tensor([[[0.0, 0.0, 0.0], [0.3, 0.0, 1.1]], [[0.0, 0.0, 0.0], [0.2, 0.2, 0.2]]]), ] for quat, angle in zip(quats, angles): with self.subTest(quat=quat, angle=angle): self.assertTrue(torch.allclose(math_utils.axis_angle_from_quat(quat), angle, atol=1e-7)) def test_axis_angle_from_quat_approximation(self): """Test Taylor approximation from axis_angle_from_quat method for unstable conversions where theta is very small.""" # Generate a small rotation quaternion # Small angle theta = torch.Tensor([0.0000001]) # Arbitrary normalized axis of rotation in rads, (x,y,z) axis = [-0.302286, 0.205494, -0.930803] # Generate quaternion qw = torch.cos(theta / 2) quat_vect = [qw] + [d * torch.sin(theta / 2) for d in axis] quaternion = torch.tensor(quat_vect, dtype=torch.float32) # Convert quaternion to axis-angle axis_angle_computed = math_utils.axis_angle_from_quat(quaternion) # Expected axis-angle representation axis_angle_expected = torch.tensor([theta * d for d in axis], dtype=torch.float32) # Assert that the computed values are close to the expected values self.assertTrue(torch.allclose(axis_angle_computed, axis_angle_expected, atol=1e-7)) def test_quat_error_magnitude(self): # Define test cases test_cases = [ # q1, q2, expected error # No rotation (torch.Tensor([1, 0, 0, 0]), torch.Tensor([1, 0, 0, 0]), torch.Tensor([0.0])), # PI/2 rotation (torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([0.7071068, 0.7071068, 0, 0]), torch.Tensor([PI / 2])), # PI rotation (torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([0.0, 0.0, 1.0, 0]), torch.Tensor([PI])), ] # Test higher dimension test_cases += tuple([(torch.stack(tensors) for tensors in zip(*test_cases))]) for q1, q2, expected_diff in test_cases: with self.subTest(q1=q1, q2=q2): q12_diff = math_utils.quat_error_magnitude(q1, q2) self.assertTrue(torch.allclose(q12_diff, torch.flatten(expected_diff), atol=1e-7)) if __name__ == "__main__": run_tests()
4,412
Python
37.043103
113
0.601541
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/utils/test_timer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations # NOTE: While we don't actually use the simulation app in this test, we still need to launch it # because warp is only available in the context of a running simulation """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import time import unittest from omni.isaac.orbit.utils.timer import Timer class TestTimer(unittest.TestCase): """Test fixture for the Timer class.""" def setUp(self): # number of decimal places to check self.precision_places = 2 def test_timer_as_object(self): """Test using a `Timer` as a regular object.""" timer = Timer() timer.start() self.assertAlmostEqual(0, timer.time_elapsed, self.precision_places) time.sleep(1) self.assertAlmostEqual(1, timer.time_elapsed, self.precision_places) timer.stop() self.assertAlmostEqual(1, timer.total_run_time, self.precision_places) def test_timer_as_context_manager(self): """Test using a `Timer` as a context manager.""" with Timer() as timer: self.assertAlmostEqual(0, timer.time_elapsed, self.precision_places) time.sleep(1) self.assertAlmostEqual(1, timer.time_elapsed, self.precision_places) if __name__ == "__main__": run_tests()
1,603
Python
29.26415
95
0.680599
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/utils/test_configclass.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations # NOTE: While we don't actually use the simulation app in this test, we still need to launch it # because warp is only available in the context of a running simulation """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import copy import os import unittest from collections.abc import Callable from dataclasses import MISSING, asdict, field from functools import wraps from typing import ClassVar from omni.isaac.orbit.utils.configclass import configclass from omni.isaac.orbit.utils.dict import class_to_dict, update_class_from_dict from omni.isaac.orbit.utils.io import dump_yaml, load_yaml """ Mock classes and functions. """ def dummy_function1() -> int: """Dummy function 1.""" return 1 def dummy_function2() -> int: """Dummy function 2.""" return 2 def dummy_wrapper(func): """Decorator for wrapping function.""" @wraps(func) def wrapper(): return func() + 1 return wrapper @dummy_wrapper def wrapped_dummy_function3(): """Dummy function 3.""" return 3 @dummy_wrapper def wrapped_dummy_function4(): """Dummy function 4.""" return 4 class DummyClass: """Dummy class.""" def __init__(self): """Initialize dummy class.""" self.a = 1 self.b = 2 """ Dummy configuration: Basic """ def double(x): """Dummy function.""" return 2 * x @configclass class ViewerCfg: eye: list = [7.5, 7.5, 7.5] # field missing on purpose lookat: list = field(default_factory=lambda: [0.0, 0.0, 0.0]) @configclass class EnvCfg: num_envs: int = double(28) # uses function for assignment episode_length: int = 2000 viewer: ViewerCfg = ViewerCfg() @configclass class RobotDefaultStateCfg: pos = (0.0, 0.0, 0.0) # type annotation missing on purpose (immutable) rot: tuple = (1.0, 0.0, 0.0, 0.0) dof_pos: tuple = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0) dof_vel = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # type annotation missing on purpose (mutable) @configclass class BasicDemoCfg: """Dummy configuration class.""" device_id: int = 0 env: EnvCfg = EnvCfg() robot_default_state: RobotDefaultStateCfg = RobotDefaultStateCfg() @configclass class BasicDemoPostInitCfg: """Dummy configuration class.""" device_id: int = 0 env: EnvCfg = EnvCfg() robot_default_state: RobotDefaultStateCfg = RobotDefaultStateCfg() def __post_init__(self): self.device_id = 1 self.add_variable = 3 """ Dummy configuration to check type annotations ordering. """ @configclass class TypeAnnotationOrderingDemoCfg: """Config class with type annotations.""" anymal: RobotDefaultStateCfg = RobotDefaultStateCfg() unitree: RobotDefaultStateCfg = RobotDefaultStateCfg() franka: RobotDefaultStateCfg = RobotDefaultStateCfg() @configclass class NonTypeAnnotationOrderingDemoCfg: """Config class without type annotations.""" anymal = RobotDefaultStateCfg() unitree = RobotDefaultStateCfg() franka = RobotDefaultStateCfg() @configclass class InheritedNonTypeAnnotationOrderingDemoCfg(NonTypeAnnotationOrderingDemoCfg): """Inherited config class without type annotations.""" pass """ Dummy configuration: Inheritance """ @configclass class ParentDemoCfg: """Dummy parent configuration with missing fields.""" a: int = MISSING # add new missing field b = 2 # type annotation missing on purpose c: RobotDefaultStateCfg = MISSING # add new missing field j: list[str] = MISSING # add new missing field i: list[str] = MISSING # add new missing field func: Callable = MISSING # add new missing field @configclass class ChildDemoCfg(ParentDemoCfg): """Dummy child configuration with missing fields.""" func = dummy_function1 # set default value for missing field c = RobotDefaultStateCfg() # set default value for missing field func_2: Callable = MISSING # add new missing field d: int = MISSING # add new missing field k: list[str] = ["c", "d"] e: ViewerCfg = MISSING # add new missing field dummy_class = DummyClass def __post_init__(self): self.b = 3 # change value of existing field self.i = ["a", "b"] # change value of existing field @configclass class ChildChildDemoCfg(ChildDemoCfg): """Dummy child configuration with missing fields.""" func_2 = dummy_function2 d = 2 # set default value for missing field def __post_init__(self): """Post initialization function.""" super().__post_init__() self.b = 4 # set default value for missing field self.f = "new" # add new missing field """ Configuration with class inside. """ @configclass class DummyClassCfg: """Dummy class configuration with class type.""" class_name_1: type = DummyClass class_name_2: type[DummyClass] = DummyClass class_name_3 = DummyClass class_name_4: ClassVar[type[DummyClass]] = DummyClass b: str = "dummy" """ Configuration with nested classes. """ @configclass class OutsideClassCfg: """Outermost dummy configuration.""" @configclass class InsideClassCfg: """Inner dummy configuration.""" @configclass class InsideInsideClassCfg: """Dummy configuration with class type.""" u: list[int] = [1, 2, 3] class_type: type = DummyClass b: str = "dummy" inside: InsideClassCfg = InsideClassCfg() x: int = 20 def __post_init__(self): self.inside.b = "dummy_changed" """ Dummy configuration: Functions """ @configclass class FunctionsDemoCfg: """Dummy configuration class with functions as attributes.""" func = dummy_function1 wrapped_func = wrapped_dummy_function3 func_in_dict = {"func": dummy_function1} @configclass class FunctionImplementedDemoCfg: """Dummy configuration class with functions as attributes.""" func = dummy_function1 a: int = 5 k = 100.0 def set_a(self, a: int): self.a = a """ Test solutions: Basic """ basic_demo_cfg_correct = { "env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), "rot": (1.0, 0.0, 0.0, 0.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, "device_id": 0, } basic_demo_cfg_change_correct = { "env": {"num_envs": 22, "episode_length": 2000, "viewer": {"eye": (2.0, 2.0, 2.0), "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), "rot": (1.0, 0.0, 0.0, 0.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, "device_id": 0, } basic_demo_post_init_cfg_correct = { "env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), "rot": (1.0, 0.0, 0.0, 0.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, "device_id": 1, "add_variable": 3, } """ Test solutions: Functions """ functions_demo_cfg_correct = { "func": "__main__:dummy_function1", "wrapped_func": "__main__:wrapped_dummy_function3", "func_in_dict": {"func": "__main__:dummy_function1"}, } functions_demo_cfg_for_updating = { "func": "__main__:dummy_function2", "wrapped_func": "__main__:wrapped_dummy_function4", "func_in_dict": {"func": "__main__:dummy_function2"}, } """ Test fixtures. """ class TestConfigClass(unittest.TestCase): """Test cases for various situations with configclass decorator for configuration.""" def test_str(self): """Test printing the configuration.""" cfg = BasicDemoCfg() print() print(cfg) def test_str_dict(self): """Test printing the configuration using dataclass utility.""" cfg = BasicDemoCfg() print() print("Using dataclass function: ", asdict(cfg)) print("Using internal function: ", cfg.to_dict()) self.assertDictEqual(asdict(cfg), cfg.to_dict()) def test_dict_conversion(self): """Test dictionary conversion of configclass instance.""" cfg = BasicDemoCfg() # dataclass function self.assertDictEqual(asdict(cfg), basic_demo_cfg_correct) self.assertDictEqual(asdict(cfg.env), basic_demo_cfg_correct["env"]) # utility function self.assertDictEqual(class_to_dict(cfg), basic_demo_cfg_correct) self.assertDictEqual(class_to_dict(cfg.env), basic_demo_cfg_correct["env"]) # internal function self.assertDictEqual(cfg.to_dict(), basic_demo_cfg_correct) self.assertDictEqual(cfg.env.to_dict(), basic_demo_cfg_correct["env"]) def test_dict_conversion_order(self): """Tests that order is conserved when converting to dictionary.""" true_outer_order = ["device_id", "env", "robot_default_state"] true_env_order = ["num_envs", "episode_length", "viewer"] # create config cfg = BasicDemoCfg() # check ordering for label, parsed_value in zip(true_outer_order, cfg.__dict__.keys()): self.assertEqual(label, parsed_value) for label, parsed_value in zip(true_env_order, cfg.env.__dict__.keys()): self.assertEqual(label, parsed_value) # convert config to dictionary cfg_dict = class_to_dict(cfg) # check ordering for label, parsed_value in zip(true_outer_order, cfg_dict.keys()): self.assertEqual(label, parsed_value) for label, parsed_value in zip(true_env_order, cfg_dict["env"].keys()): self.assertEqual(label, parsed_value) # check ordering when copied cfg_dict_copied = copy.deepcopy(cfg_dict) cfg_dict_copied.pop("robot_default_state") # check ordering for label, parsed_value in zip(true_outer_order, cfg_dict_copied.keys()): self.assertEqual(label, parsed_value) for label, parsed_value in zip(true_env_order, cfg_dict_copied["env"].keys()): self.assertEqual(label, parsed_value) def test_config_update_via_constructor(self): """Test updating configclass through initialization.""" cfg = BasicDemoCfg(env=EnvCfg(num_envs=22, viewer=ViewerCfg(eye=(2.0, 2.0, 2.0)))) self.assertDictEqual(asdict(cfg), basic_demo_cfg_change_correct) def test_config_update_after_init(self): """Test updating configclass using instance members.""" cfg = BasicDemoCfg() cfg.env.num_envs = 22 cfg.env.viewer.eye = (2.0, 2.0, 2.0) # note: changes from list to tuple self.assertDictEqual(asdict(cfg), basic_demo_cfg_change_correct) def test_config_update_dict(self): """Test updating configclass using dictionary.""" cfg = BasicDemoCfg() cfg_dict = {"env": {"num_envs": 22, "viewer": {"eye": (2.0, 2.0, 2.0)}}} update_class_from_dict(cfg, cfg_dict) self.assertDictEqual(asdict(cfg), basic_demo_cfg_change_correct) def test_config_update_dict_using_internal(self): """Test updating configclass from a dictionary using configclass method.""" cfg = BasicDemoCfg() cfg_dict = {"env": {"num_envs": 22, "viewer": {"eye": (2.0, 2.0, 2.0)}}} cfg.from_dict(cfg_dict) self.assertDictEqual(cfg.to_dict(), basic_demo_cfg_change_correct) def test_config_update_dict_using_post_init(self): cfg = BasicDemoPostInitCfg() self.assertDictEqual(cfg.to_dict(), basic_demo_post_init_cfg_correct) def test_invalid_update_key(self): """Test invalid key update.""" cfg = BasicDemoCfg() cfg_dict = {"env": {"num_envs": 22, "viewer": {"pos": (2.0, 2.0, 2.0)}}} with self.assertRaises(KeyError): update_class_from_dict(cfg, cfg_dict) def test_multiple_instances(self): """Test multiple instances with twice instantiation.""" # create two config instances cfg1 = BasicDemoCfg() cfg2 = BasicDemoCfg() # check variables # mutable -- variables should be different self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye)) self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat)) self.assertNotEqual(id(cfg1.robot_default_state), id(cfg2.robot_default_state)) # immutable -- variables are the same self.assertEqual(id(cfg1.robot_default_state.dof_pos), id(cfg2.robot_default_state.dof_pos)) self.assertEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs)) self.assertEqual(id(cfg1.device_id), id(cfg2.device_id)) # check values self.assertDictEqual(cfg1.env.to_dict(), cfg2.env.to_dict()) self.assertDictEqual(cfg1.robot_default_state.to_dict(), cfg2.robot_default_state.to_dict()) def test_alter_values_multiple_instances(self): """Test alterations in multiple instances of the same configclass.""" # create two config instances cfg1 = BasicDemoCfg() cfg2 = BasicDemoCfg() # alter configurations cfg1.env.num_envs = 22 # immutable data: int cfg1.env.viewer.eye[0] = 1.0 # mutable data: list cfg1.env.viewer.lookat[2] = 12.0 # mutable data: list # check variables # values should be different self.assertNotEqual(cfg1.env.num_envs, cfg2.env.num_envs) self.assertNotEqual(cfg1.env.viewer.eye, cfg2.env.viewer.eye) self.assertNotEqual(cfg1.env.viewer.lookat, cfg2.env.viewer.lookat) # mutable -- variables are different ids self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye)) self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat)) # immutable -- altered variables are different ids self.assertNotEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs)) def test_multiple_instances_with_replace(self): """Test multiple instances with creation through replace function.""" # create two config instances cfg1 = BasicDemoCfg() cfg2 = cfg1.replace() # check variable IDs # mutable -- variables should be different self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye)) self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat)) self.assertNotEqual(id(cfg1.robot_default_state), id(cfg2.robot_default_state)) # immutable -- variables are the same self.assertEqual(id(cfg1.robot_default_state.dof_pos), id(cfg2.robot_default_state.dof_pos)) self.assertEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs)) self.assertEqual(id(cfg1.device_id), id(cfg2.device_id)) # check values self.assertDictEqual(cfg1.to_dict(), cfg2.to_dict()) def test_alter_values_multiple_instances_wth_replace(self): """Test alterations in multiple instances through replace function.""" # create two config instances cfg1 = BasicDemoCfg() cfg2 = cfg1.replace(device_id=1) # alter configurations cfg1.env.num_envs = 22 # immutable data: int cfg1.env.viewer.eye[0] = 1.0 # mutable data: list cfg1.env.viewer.lookat[2] = 12.0 # mutable data: list # check variables # values should be different self.assertNotEqual(cfg1.env.num_envs, cfg2.env.num_envs) self.assertNotEqual(cfg1.env.viewer.eye, cfg2.env.viewer.eye) self.assertNotEqual(cfg1.env.viewer.lookat, cfg2.env.viewer.lookat) # mutable -- variables are different ids self.assertNotEqual(id(cfg1.env.viewer.eye), id(cfg2.env.viewer.eye)) self.assertNotEqual(id(cfg1.env.viewer.lookat), id(cfg2.env.viewer.lookat)) # immutable -- altered variables are different ids self.assertNotEqual(id(cfg1.env.num_envs), id(cfg2.env.num_envs)) self.assertNotEqual(id(cfg1.device_id), id(cfg2.device_id)) def test_configclass_type_ordering(self): """Checks ordering of config objects when no type annotation is provided.""" cfg_1 = TypeAnnotationOrderingDemoCfg() cfg_2 = NonTypeAnnotationOrderingDemoCfg() cfg_3 = InheritedNonTypeAnnotationOrderingDemoCfg() # check ordering self.assertEqual(list(cfg_1.__dict__.keys()), list(cfg_2.__dict__.keys())) self.assertEqual(list(cfg_3.__dict__.keys()), list(cfg_2.__dict__.keys())) self.assertEqual(list(cfg_1.__dict__.keys()), list(cfg_3.__dict__.keys())) def test_functions_config(self): """Tests having functions as values in the configuration instance.""" cfg = FunctionsDemoCfg() # check types self.assertEqual(cfg.__annotations__["func"], type(dummy_function1)) self.assertEqual(cfg.__annotations__["wrapped_func"], type(wrapped_dummy_function3)) self.assertEqual(cfg.__annotations__["func_in_dict"], dict) # check calling self.assertEqual(cfg.func(), 1) self.assertEqual(cfg.wrapped_func(), 4) self.assertEqual(cfg.func_in_dict["func"](), 1) def test_function_impl_config(self): cfg = FunctionImplementedDemoCfg() # change value self.assertEqual(cfg.a, 5) cfg.set_a(10) self.assertEqual(cfg.a, 10) def test_dict_conversion_functions_config(self): """Tests conversion of config with functions into dictionary.""" cfg = FunctionsDemoCfg() cfg_dict = class_to_dict(cfg) self.assertEqual(cfg_dict["func"], functions_demo_cfg_correct["func"]) self.assertEqual(cfg_dict["wrapped_func"], functions_demo_cfg_correct["wrapped_func"]) self.assertEqual(cfg_dict["func_in_dict"]["func"], functions_demo_cfg_correct["func_in_dict"]["func"]) def test_update_functions_config_with_functions(self): """Tests updating config with functions.""" cfg = FunctionsDemoCfg() # update config update_class_from_dict(cfg, functions_demo_cfg_for_updating) # check calling self.assertEqual(cfg.func(), 2) self.assertEqual(cfg.wrapped_func(), 5) self.assertEqual(cfg.func_in_dict["func"](), 2) def test_missing_type_in_config(self): """Tests missing type annotation in config. Should complain that 'c' is missing type annotation since it cannot be inferred from 'MISSING' value. """ with self.assertRaises(TypeError): @configclass class MissingTypeDemoCfg: a: int = 1 b = 2 c = MISSING def test_missing_default_value_in_config(self): """Tests missing default value in config. Should complain that 'a' is missing default value since it cannot be inferred from type annotation. """ with self.assertRaises(ValueError): @configclass class MissingTypeDemoCfg: a: int b = 2 def test_required_argument_for_missing_type_in_config(self): """Tests required positional argument for missing type annotation in config creation.""" @configclass class MissingTypeDemoCfg: a: int = 1 b = 2 c: int = MISSING # should complain that 'c' is missed in positional arguments # TODO: Uncomment this when we move to 3.10. # with self.assertRaises(TypeError): # cfg = MissingTypeDemoCfg(a=1) # should not complain cfg = MissingTypeDemoCfg(a=1, c=3) self.assertEqual(cfg.a, 1) self.assertEqual(cfg.b, 2) def test_config_inheritance(self): """Tests that inheritance works properly.""" # check variables cfg = ChildDemoCfg(a=20, d=3, e=ViewerCfg(), j=["c", "d"]) self.assertEqual(cfg.func, dummy_function1) self.assertEqual(cfg.a, 20) self.assertEqual(cfg.d, 3) self.assertEqual(cfg.j, ["c", "d"]) # check post init self.assertEqual(cfg.b, 3) self.assertEqual(cfg.i, ["a", "b"]) def test_config_double_inheritance(self): """Tests that inheritance works properly when inheriting twice.""" # check variables cfg = ChildChildDemoCfg(a=20, d=3, e=ViewerCfg(), j=["c", "d"]) self.assertEqual(cfg.func, dummy_function1) self.assertEqual(cfg.func_2, dummy_function2) self.assertEqual(cfg.a, 20) self.assertEqual(cfg.d, 3) self.assertEqual(cfg.j, ["c", "d"]) # check post init self.assertEqual(cfg.b, 4) self.assertEqual(cfg.f, "new") self.assertEqual(cfg.i, ["a", "b"]) def test_config_with_class_type(self): """Tests that configclass works properly with class type.""" cfg = DummyClassCfg() # since python 3.10, annotations are stored as strings annotations = {k: eval(v) for k, v in cfg.__annotations__.items()} # check types self.assertEqual(annotations["class_name_1"], type) self.assertEqual(annotations["class_name_2"], type[DummyClass]) self.assertEqual(annotations["class_name_3"], type[DummyClass]) self.assertEqual(annotations["class_name_4"], ClassVar[type[DummyClass]]) # check values self.assertEqual(cfg.class_name_1, DummyClass) self.assertEqual(cfg.class_name_2, DummyClass) self.assertEqual(cfg.class_name_3, DummyClass) self.assertEqual(cfg.class_name_4, DummyClass) self.assertEqual(cfg.b, "dummy") def test_nested_config_class_declarations(self): """Tests that configclass works properly with nested class class declarations.""" cfg = OutsideClassCfg() # check types self.assertNotIn("InsideClassCfg", cfg.__annotations__) self.assertNotIn("InsideClassCfg", OutsideClassCfg.__annotations__) self.assertNotIn("InsideInsideClassCfg", OutsideClassCfg.InsideClassCfg.__annotations__) self.assertNotIn("InsideInsideClassCfg", cfg.inside.__annotations__) # check values self.assertEqual(cfg.inside.class_type, DummyClass) self.assertEqual(cfg.inside.b, "dummy_changed") self.assertEqual(cfg.x, 20) def test_config_dumping(self): """Check that config dumping works properly.""" # file for dumping dirname = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(dirname, "output", "configclass", "test_config.yaml") # create config cfg = ChildDemoCfg(a=20, d=3, e=ViewerCfg(), j=["c", "d"]) # save config dump_yaml(filename, cfg) # load config cfg_loaded = load_yaml(filename) # check dictionaries are the same self.assertEqual(list(cfg.to_dict().keys()), list(cfg_loaded.keys())) self.assertDictEqual(cfg.to_dict(), cfg_loaded) # save config with sorted order won't work! # save config dump_yaml(filename, cfg, sort_keys=True) # load config cfg_loaded = load_yaml(filename) # check dictionaries are the same self.assertNotEqual(list(cfg.to_dict().keys()), list(cfg_loaded.keys())) self.assertDictEqual(cfg.to_dict(), cfg_loaded) if __name__ == "__main__": run_tests()
23,835
Python
32.619182
115
0.632599
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/assets/check_ridgeback_franka.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to simulate a mobile manipulator. .. code-block:: bash # Usage ./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_ridgeback_franka.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser( description="This script demonstrates how to simulate a mobile manipulator with dummy joints." ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation ## # Pre-defined configs ## from omni.isaac.orbit_assets.ridgeback_franka import RIDGEBACK_FRANKA_PANDA_CFG # isort:skip def design_scene(): """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # add robots and return them return add_robots() def add_robots() -> Articulation: """Adds robots to the scene.""" robot_cfg = RIDGEBACK_FRANKA_PANDA_CFG # -- Spawn robot robot_cfg.spawn.func("/World/Robot_1", robot_cfg.spawn, translation=(0.0, -1.0, 0.0)) robot_cfg.spawn.func("/World/Robot_2", robot_cfg.spawn, translation=(0.0, 1.0, 0.0)) # -- Create interface robot = Articulation(cfg=robot_cfg.replace(prim_path="/World/Robot.*")) return robot def run_simulator(sim: sim_utils.SimulationContext, robot: Articulation): """Runs the simulator by applying actions to the robot at every time-step""" # dummy action actions = robot.data.default_joint_pos.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() # episode counter sim_time = 0.0 ep_step_count = 0 # Simulate physics while simulation_app.is_running(): # reset if ep_step_count % 1000 == 0: # reset counters sim_time = 0.0 ep_step_count = 0 # reset dof state joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset internals robot.reset() # reset command actions = torch.rand_like(robot.data.default_joint_pos) + robot.data.default_joint_pos # -- base actions[:, 0:3] = 0.0 # -- gripper actions[:, -2:] = 0.04 print("[INFO]: Resetting robots state...") # change the gripper action if ep_step_count % 200 == 0: # flip command for the gripper actions[:, -2:] = 0.0 if actions[0, -2] > 0.0 else 0.04 # change the base action # -- forward and backward (x-axis) if ep_step_count == 200: actions[:, :3] = 0.0 actions[:, 0] = 1.0 if ep_step_count == 300: actions[:, :3] = 0.0 actions[:, 0] = -1.0 # -- right and left (y-axis) if ep_step_count == 400: actions[:, :3] = 0.0 actions[:, 1] = 1.0 if ep_step_count == 500: actions[:, :3] = 0.0 actions[:, 1] = -1.0 # -- turn right and left (z-axis) if ep_step_count == 600: actions[:, :3] = 0.0 actions[:, 2] = 1.0 if ep_step_count == 700: actions[:, :3] = 0.0 actions[:, 2] = -1.0 if ep_step_count == 900: actions[:, :3] = 0.0 actions[:, 2] = 1.0 # change the arm action if ep_step_count % 100: actions[:, 3:10] = torch.rand(robot.num_instances, 7, device=robot.device) actions[:, 3:10] += robot.data.default_joint_pos[:, 3:10] # apply action robot.set_joint_velocity_target(actions[:, :3], joint_ids=[0, 1, 2]) robot.set_joint_position_target(actions[:, 3:], joint_ids=[3, 4, 5, 6, 7, 8, 9, 10, 11]) robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt ep_step_count += 1 # update buffers robot.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim = sim_utils.SimulationContext(sim_utils.SimulationCfg()) # Set main camera sim.set_camera_view([1.5, 1.5, 1.5], [0.0, 0.0, 0.0]) # design scene robot = design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, robot) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,194
Python
29.380117
109
0.588371
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/assets/test_rigid_object.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # ignore private usage of variables warning # pyright: reportPrivateUsage=none from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # Can set this to False to see the GUI for debugging # This will also add lights to the scene HEADLESS = True # launch omniverse app app_launcher = AppLauncher(headless=HEADLESS) simulation_app = app_launcher.app """Rest everything follows.""" import ctypes import torch import unittest import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.sim import build_simulation_context from omni.isaac.orbit.sim.spawners import materials from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.math import default_orientation, random_orientation def generate_cubes_scene(num_cubes: int = 1, height=1.0) -> tuple[RigidObject, torch.Tensor]: """Generate a scene with the provided number of cubes. Args: num_cubes: Number of cubes to generate. height: Height of the cubes. Returns: RigidObject: The rigid object representing the cubes. origins: The origins of the cubes. """ origins = torch.tensor([(i * 1.0, 0, height) for i in range(num_cubes)]) # Create Top-level Xforms, one for each cube for i, origin in enumerate(origins): prim_utils.create_prim(f"/World/Table_{i}", "Xform", translation=origin) # Create rigid object cube_object_cfg = RigidObjectCfg( prim_path="/World/Table_.*/Object", spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd"), init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, height)), ) cube_object = RigidObject(cfg=cube_object_cfg) return cube_object, origins class TestRigidObject(unittest.TestCase): """Test for rigid object class.""" """ Tests """ def test_initialization(self): """Test initialization for with rigid body API at the provided prim path.""" for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context(device=device, auto_add_lighting=True) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes) # Check that boundedness of rigid object is correct self.assertEqual(ctypes.c_long.from_address(id(cube_object)).value, 1) # Play sim sim.reset() # Check if object is initialized self.assertTrue(cube_object._is_initialized) self.assertEqual(len(cube_object.body_names), 1) # Check buffers that exists and have correct shapes self.assertEqual(cube_object.data.root_pos_w.shape, (num_cubes, 3)) self.assertEqual(cube_object.data.root_quat_w.shape, (num_cubes, 4)) # Simulate physics for _ in range(2): # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) def test_external_force_on_single_body(self): """Test application of external force on the base of the object. In this test, we apply a force equal to the weight of an object on the base of one of the objects. We check that the object does not move. For the other object, we do not apply any force and check that it falls down. """ for num_cubes in (2, 4): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context(device=device, add_ground_plane=True, auto_add_lighting=True) as sim: cube_object, origins = generate_cubes_scene(num_cubes=num_cubes) # Play the simulator sim.reset() # Find bodies to apply the force body_ids, body_names = cube_object.find_bodies(".*") # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device) # Every 2nd cube should have a force applied to it external_wrench_b[0::2, :, 2] = 9.81 * cube_object.root_physx_view.get_masses()[0] # Now we are ready! for _ in range(5): # reset root state root_state = cube_object.data.default_root_state.clone() # need to shift the position of the cubes otherwise they will be on top of each other root_state[:, :3] = origins cube_object.write_root_state_to_sim(root_state) # reset object cube_object.reset() # apply force cube_object.set_external_force_and_torque( external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids ) # perform simulation for _ in range(5): # apply action to the object cube_object.write_data_to_sim() # perform step sim.step() # update buffers cube_object.update(sim.cfg.dt) # First object should still be at the same Z position (1.0) torch.testing.assert_close( cube_object.data.root_pos_w[0::2, 2], torch.ones(num_cubes // 2, device=sim.device) ) # Second object should have fallen, so it's Z height should be less than initial height of 1.0 self.assertTrue(torch.all(cube_object.data.root_pos_w[1::2, 2] < 1.0)) def test_set_rigid_object_state(self): """Test setting the state of the rigid object. In this test, we set the state of the rigid object to a random state and check that the object is in that state after simulation. We set gravity to zero as we don't want any external forces acting on the object to ensure state remains static. """ for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): # Turn off gravity for this test as we don't want any external forces acting on the object # to ensure state remains static with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes) # Play the simulator sim.reset() state_types = ["root_pos_w", "root_quat_w", "root_lin_vel_w", "root_ang_vel_w"] # Set each state type individually as they are dependent on each other for state_type_to_randomize in state_types: state_dict = { "root_pos_w": torch.zeros_like(cube_object.data.root_pos_w, device=sim.device), "root_quat_w": default_orientation(num=num_cubes, device=sim.device), "root_lin_vel_w": torch.zeros_like(cube_object.data.root_lin_vel_w, device=sim.device), "root_ang_vel_w": torch.zeros_like(cube_object.data.root_ang_vel_w, device=sim.device), } # Now we are ready! for _ in range(5): # reset object cube_object.reset() # Set random state if state_type_to_randomize == "root_quat_w": state_dict[state_type_to_randomize] = random_orientation( num=num_cubes, device=sim.device ) else: state_dict[state_type_to_randomize] = torch.randn(num_cubes, 3, device=sim.device) # perform simulation for _ in range(5): root_state = torch.cat( [ state_dict["root_pos_w"], state_dict["root_quat_w"], state_dict["root_lin_vel_w"], state_dict["root_ang_vel_w"], ], dim=-1, ) # reset root state cube_object.write_root_state_to_sim(root_state=root_state) sim.step() # assert that set root quantities are equal to the ones set in the state_dict for key, expected_value in state_dict.items(): value = getattr(cube_object.data, key) torch.testing.assert_close(value, expected_value, rtol=1e-5, atol=1e-5) cube_object.update(sim.cfg.dt) def test_reset_rigid_object(self): """Test resetting the state of the rigid object.""" for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context(device=device, gravity_enabled=True, auto_add_lighting=True) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes) # Play the simulator sim.reset() for i in range(5): # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) # Move the object to a random position root_state = cube_object.data.default_root_state.clone() root_state[:, :3] = torch.randn(num_cubes, 3, device=sim.device) # Random orientation root_state[:, 3:7] = random_orientation(num=num_cubes, device=sim.device) cube_object.write_root_state_to_sim(root_state) if i % 2 == 0: # reset object cube_object.reset() # Reset should zero external forces and torques and set last body velocity to zero self.assertFalse(cube_object.has_external_wrench) self.assertEqual(torch.count_nonzero(cube_object._external_force_b), 0) self.assertEqual(torch.count_nonzero(cube_object._external_torque_b), 0) self.assertEqual(torch.count_nonzero(cube_object._last_body_vel_w), 0) def test_rigid_body_set_material_properties(self): """Test getting and setting material properties of rigid object.""" for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context( device=device, gravity_enabled=True, add_ground_plane=True, auto_add_lighting=True ) as sim: # Create rigid object(s) cube_object, _ = generate_cubes_scene(num_cubes=num_cubes) # Play sim sim.reset() # Set material properties static_friction = torch.FloatTensor(num_cubes, 1).uniform_(0.4, 0.8) dynamic_friction = torch.FloatTensor(num_cubes, 1).uniform_(0.4, 0.8) restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2) materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) indices = torch.tensor(range(num_cubes), dtype=torch.int) # Add friction to cube cube_object.root_physx_view.set_material_properties(materials, indices) # Simulate physics # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) # Get material properties materials_to_check = cube_object.root_physx_view.get_material_properties() # Check if material properties are set correctly torch.testing.assert_close(materials_to_check.reshape(num_cubes, 3), materials) def test_rigid_body_no_friction(self): """Test that a rigid object with no friction will maintain it's velocity when sliding across a plane.""" for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context(device=device, auto_add_lighting=True) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=0.0) # Create ground plane with no friction cfg = sim_utils.GroundPlaneCfg( physics_material=materials.RigidBodyMaterialCfg( static_friction=0.0, dynamic_friction=0.0, restitution=0.0, ) ) cfg.func("/World/GroundPlane", cfg) # Play sim sim.reset() # Set material friction properties to be all zero static_friction = torch.zeros(num_cubes, 1) dynamic_friction = torch.zeros(num_cubes, 1) restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2) cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) indices = torch.tensor(range(num_cubes), dtype=torch.int) cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) # Set initial velocity # Initial velocity in X to get the block moving initial_velocity = torch.zeros((num_cubes, 6), device=sim.cfg.device) initial_velocity[:, 0] = 0.1 cube_object.write_root_velocity_to_sim(initial_velocity) # Simulate physics for _ in range(5): # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) # Non-deterministic when on GPU, so we use different tolerances if device == "cuda:0": tolerance = 1e-2 else: tolerance = 1e-5 torch.testing.assert_close( cube_object.data.root_lin_vel_w, initial_velocity[:, :3], rtol=1e-5, atol=tolerance ) def test_rigid_body_with_static_friction(self): """Test that static friction applied to rigid object works as expected. This test works by applying a force to the object and checking if the object moves or not based on the mu (coefficient of static friction) value set for the object. We set the static friction to be non-zero and apply a force to the object. When the force applied is below mu, the object should not move. When the force applied is above mu, the object should move. """ for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context(device=device, add_ground_plane=True, auto_add_lighting=True) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=0.03125) # Create ground plane with no friction cfg = sim_utils.GroundPlaneCfg( physics_material=materials.RigidBodyMaterialCfg( static_friction=0.0, dynamic_friction=0.0, ) ) cfg.func("/World/GroundPlane", cfg) # Play sim sim.reset() # Set static friction to be non-zero static_friction_coefficient = 0.5 static_friction = torch.Tensor([[static_friction_coefficient]] * num_cubes) dynamic_friction = torch.zeros(num_cubes, 1) restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2) cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) indices = torch.tensor(range(num_cubes), dtype=torch.int) # Add friction to cube cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) # 2 cases: force applied is below and above mu # below mu: block should not move as the force applied is <= mu # above mu: block should move as the force applied is > mu for force in "below_mu", "above_mu": with self.subTest(force=force): external_wrench_b = torch.zeros((num_cubes, 1, 6), device=sim.device) if force == "below_mu": external_wrench_b[:, 0, 0] = static_friction_coefficient * 0.999 else: external_wrench_b[:, 0, 0] = static_friction_coefficient * 1.001 cube_object.set_external_force_and_torque( external_wrench_b[..., :3], external_wrench_b[..., 3:], ) # Get root state initial_root_state = cube_object.data.root_state_w # Simulate physics for _ in range(10): # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) if force == "below_mu": # Assert that the block has not moved torch.testing.assert_close( cube_object.data.root_state_w, initial_root_state, rtol=1e-5, atol=1e-5 ) else: torch.testing.assert_close( cube_object.data.root_state_w, initial_root_state, rtol=1e-5, atol=1e-5 ) def test_rigid_body_with_restitution(self): """Test that restitution when applied to rigid object works as expected. This test works by dropping a block from a height and checking if the block bounces or not based on the restitution value set for the object. We set the restitution to be non-zero and drop the block from a height. When the restitution is 0, the block should not bounce. When the restitution is 1, the block should bounce with the same energy. When the restitution is between 0 and 1, the block should bounce with less energy. """ for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context(device=device, add_ground_plane=True, auto_add_lighting=True) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=1.0) # Create ground plane such that has a restitution of 1.0 (perfectly elastic collision) cfg = sim_utils.GroundPlaneCfg( physics_material=materials.RigidBodyMaterialCfg( restitution=1.0, ) ) cfg.func("/World/GroundPlane", cfg) indices = torch.tensor(range(num_cubes), dtype=torch.int) # Play sim sim.reset() # 3 cases: inelastic, partially elastic, elastic # inelastic: resitution = 0, block should not bounce # partially elastic: 0 <= restitution <= 1, block should bounce with less energy # elastic: restitution = 1, block should bounce with same energy for expected_collision_type in "inelastic", "partially_elastic", "elastic": root_state = torch.zeros(1, 13, device=sim.device) root_state[0, 3] = 1.0 # To make orientation a quaternion root_state[0, 2] = 0.1 # Set an initial drop height root_state[0, 9] = -1.0 # Set an initial downward velocity cube_object.write_root_state_to_sim(root_state=root_state) prev_z_velocity = 0.0 curr_z_velocity = 0.0 with self.subTest(expected_collision_type=expected_collision_type): # cube_object.reset() # Set static friction to be non-zero if expected_collision_type == "inelastic": restitution_coefficient = 0.0 elif expected_collision_type == "partially_elastic": restitution_coefficient = 0.5 else: restitution_coefficient = 1.0 restitution = 0.5 static_friction = torch.zeros(num_cubes, 1) dynamic_friction = torch.zeros(num_cubes, 1) restitution = torch.Tensor([[restitution_coefficient]] * num_cubes) cube_object_materials = torch.cat( [static_friction, dynamic_friction, restitution], dim=-1 ) # Add friction to cube cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2] while torch.all(curr_z_velocity <= 0.0): # Simulate physics curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2] # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) if torch.all(curr_z_velocity <= 0.0): # Still in the air prev_z_velocity = curr_z_velocity # We have made contact with the ground and can verify expected collision type # based on how velocity has changed after the collision if expected_collision_type == "inelastic": # Assert that the block has lost most energy by checking that the z velocity is < 1/2 previous # velocity. This is because the floor's resitution means it will bounce back an object that itself # has restitution set to 0.0 self.assertTrue(torch.all(torch.le(curr_z_velocity / 2, abs(prev_z_velocity)))) elif expected_collision_type == "partially_elastic": # Assert that the block has lost some energy by checking that the z velocity is less self.assertTrue(torch.all(torch.le(abs(curr_z_velocity), abs(prev_z_velocity)))) elif expected_collision_type == "elastic": # Assert that the block has not lost any energy by checking that the z velocity is the same torch.testing.assert_close(abs(curr_z_velocity), abs(prev_z_velocity)) def test_rigid_body_set_mass(self): """Test getting and setting mass of rigid object.""" for num_cubes in (1, 2): for device in ("cuda:0", "cpu"): with self.subTest(num_cubes=num_cubes, device=device): with build_simulation_context( device=device, gravity_enabled=False, add_ground_plane=True, auto_add_lighting=True ) as sim: cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=1.0) # Play sim sim.reset() # Get masses before increasing original_masses = cube_object.root_physx_view.get_masses() self.assertEqual(original_masses.shape, (num_cubes, 1)) # Randomize mass of the object masses = original_masses + torch.FloatTensor(num_cubes, 1).uniform_(4, 8) indices = torch.tensor(range(num_cubes), dtype=torch.int) # Add friction to cube cube_object.root_physx_view.set_masses(masses, indices) torch.testing.assert_close(cube_object.root_physx_view.get_masses(), masses) # Simulate physics # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) masses_to_check = cube_object.root_physx_view.get_masses() # Check if mass is set correctly torch.testing.assert_close(masses, masses_to_check) if __name__ == "__main__": run_tests()
28,821
Python
49.922261
134
0.487041
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/assets/test_articulation.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # ignore private usage of variables warning # pyright: reportPrivateUsage=none from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import ctypes import torch import unittest import omni.isaac.core.utils.stage as stage_utils import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets import Articulation, ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Pre-defined configs ## from omni.isaac.orbit_assets import ANYMAL_C_CFG, FRANKA_PANDA_CFG, SHADOW_HAND_CFG # isort:skip class TestArticulation(unittest.TestCase): """Test for articulation class.""" def setUp(self): """Create a blank new stage for each test.""" # Create a new stage stage_utils.create_new_stage() # Simulation time-step self.dt = 0.005 # Load kit helper sim_cfg = sim_utils.SimulationCfg(dt=self.dt, device="cuda:0") self.sim = sim_utils.SimulationContext(sim_cfg) def tearDown(self): """Stops simulator after each test.""" # stop simulation self.sim.stop() # clear the stage self.sim.clear_instance() """ Tests """ def test_initialization_floating_base_non_root(self): """Test initialization for a floating-base with articulation root on a rigid body under the provided prim path.""" # Create articulation robot_cfg = ArticulationCfg( prim_path="/World/Robot", spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"), init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)), actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=0.0, damping=0.0)}, ) robot = Articulation(cfg=robot_cfg) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertTrue(robot._is_initialized) # Check that floating base self.assertFalse(robot.is_fixed_base) # Check buffers that exists and have correct shapes self.assertTrue(robot.data.root_pos_w.shape == (1, 3)) self.assertTrue(robot.data.root_quat_w.shape == (1, 4)) self.assertTrue(robot.data.joint_pos.shape == (1, 21)) # Check some internal physx data for debugging # -- joint related self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count) # -- link related self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count) # -- link names (check within articulation ordering is correct) prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]] self.assertListEqual(prim_path_body_names, robot.body_names) # Check that the body_physx_view is deprecated with self.assertWarns(DeprecationWarning): robot.body_physx_view # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update robot robot.update(self.dt) def test_initialization_floating_base(self): """Test initialization for a floating-base with articulation root on provided prim path.""" # Create articulation robot = Articulation(cfg=ANYMAL_C_CFG.replace(prim_path="/World/Robot")) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertTrue(robot._is_initialized) # Check that floating base self.assertFalse(robot.is_fixed_base) # Check buffers that exists and have correct shapes self.assertTrue(robot.data.root_pos_w.shape == (1, 3)) self.assertTrue(robot.data.root_quat_w.shape == (1, 4)) self.assertTrue(robot.data.joint_pos.shape == (1, 12)) # Check some internal physx data for debugging # -- joint related self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count) # -- link related self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count) # -- link names (check within articulation ordering is correct) prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]] self.assertListEqual(prim_path_body_names, robot.body_names) # Check that the body_physx_view is deprecated with self.assertWarns(DeprecationWarning): robot.body_physx_view # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update robot robot.update(self.dt) def test_initialization_fixed_base(self): """Test initialization for fixed base.""" # Create articulation robot = Articulation(cfg=FRANKA_PANDA_CFG.replace(prim_path="/World/Robot")) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertTrue(robot._is_initialized) # Check that fixed base self.assertTrue(robot.is_fixed_base) # Check buffers that exists and have correct shapes self.assertTrue(robot.data.root_pos_w.shape == (1, 3)) self.assertTrue(robot.data.root_quat_w.shape == (1, 4)) self.assertTrue(robot.data.joint_pos.shape == (1, 9)) # Check some internal physx data for debugging # -- joint related self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count) # -- link related self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count) # -- link names (check within articulation ordering is correct) prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]] self.assertListEqual(prim_path_body_names, robot.body_names) # Check that the body_physx_view is deprecated with self.assertWarns(DeprecationWarning): robot.body_physx_view # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update robot robot.update(self.dt) def test_initialization_fixed_base_single_joint(self): """Test initialization for fixed base articulation with a single joint.""" # Create articulation robot_cfg = ArticulationCfg( spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Simple/revolute_articulation.usd"), actuators={ "joint": ImplicitActuatorCfg( joint_names_expr=[".*"], effort_limit=400.0, velocity_limit=100.0, stiffness=0.0, damping=10.0, ), }, ) robot = Articulation(cfg=robot_cfg.replace(prim_path="/World/Robot")) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertTrue(robot._is_initialized) # Check that fixed base self.assertTrue(robot.is_fixed_base) # Check buffers that exists and have correct shapes self.assertTrue(robot.data.root_pos_w.shape == (1, 3)) self.assertTrue(robot.data.root_quat_w.shape == (1, 4)) self.assertTrue(robot.data.joint_pos.shape == (1, 1)) # Check some internal physx data for debugging # -- joint related self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count) # -- link related self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count) # -- link names (check within articulation ordering is correct) prim_path_body_names = [path.split("/")[-1] for path in robot.root_physx_view.link_paths[0]] self.assertListEqual(prim_path_body_names, robot.body_names) # Check that the body_physx_view is deprecated with self.assertWarns(DeprecationWarning): robot.body_physx_view # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update robot robot.update(self.dt) def test_initialization_hand_with_tendons(self): """Test initialization for fixed base articulated hand with tendons.""" # Create articulation robot_cfg = SHADOW_HAND_CFG robot = Articulation(cfg=robot_cfg.replace(prim_path="/World/Robot")) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertTrue(robot._is_initialized) # Check that fixed base self.assertTrue(robot.is_fixed_base) # Check buffers that exists and have correct shapes self.assertTrue(robot.data.root_pos_w.shape == (1, 3)) self.assertTrue(robot.data.root_quat_w.shape == (1, 4)) self.assertTrue(robot.data.joint_pos.shape == (1, 24)) # Check some internal physx data for debugging # -- joint related self.assertEqual(robot.root_physx_view.max_dofs, robot.root_physx_view.shared_metatype.dof_count) # -- link related self.assertEqual(robot.root_physx_view.max_links, robot.root_physx_view.shared_metatype.link_count) # Simulate physics for _ in range(10): # perform rendering self.sim.step() # update robot robot.update(self.dt) def test_out_of_range_default_joint_pos(self): """Test that the default joint position from configuration is out of range.""" # Create articulation robot_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Robot") robot_cfg.init_state.joint_pos = { "panda_joint1": 10.0, "panda_joint[2, 4]": -20.0, } robot = Articulation(robot_cfg) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertFalse(robot._is_initialized) def test_out_of_range_default_joint_vel(self): """Test that the default joint velocity from configuration is out of range.""" # Create articulation robot_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Robot") robot_cfg.init_state.joint_vel = { "panda_joint1": 100.0, "panda_joint[2, 4]": -60.0, } robot = Articulation(robot_cfg) # Check that boundedness of articulation is correct self.assertEqual(ctypes.c_long.from_address(id(robot)).value, 1) # Play sim self.sim.reset() # Check if robot is initialized self.assertFalse(robot._is_initialized) def test_external_force_on_single_body(self): """Test application of external force on the base of the robot.""" # Robots robot_cfg = ANYMAL_C_CFG robot_cfg.spawn.func("/World/Anymal_c/Robot_1", robot_cfg.spawn, translation=(0.0, -0.5, 0.65)) robot_cfg.spawn.func("/World/Anymal_c/Robot_2", robot_cfg.spawn, translation=(0.0, 0.5, 0.65)) # create handles for the robots robot = Articulation(robot_cfg.replace(prim_path="/World/Anymal_c/Robot.*")) # Play the simulator self.sim.reset() # Find bodies to apply the force body_ids, _ = robot.find_bodies("base") # Sample a large force external_wrench_b = torch.zeros(robot.num_instances, len(body_ids), 6, device=self.sim.device) external_wrench_b[..., 1] = 1000.0 # Now we are ready! for _ in range(5): # reset root state root_state = robot.data.default_root_state.clone() root_state[0, :2] = torch.tensor([0.0, -0.5], device=self.sim.device) root_state[1, :2] = torch.tensor([0.0, 0.5], device=self.sim.device) robot.write_root_state_to_sim(root_state) # reset dof state joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset robot robot.reset() # apply force robot.set_external_force_and_torque( external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids ) # perform simulation for _ in range(100): # apply action to the robot robot.set_joint_position_target(robot.data.default_joint_pos.clone()) robot.write_data_to_sim() # perform step self.sim.step() # update buffers robot.update(self.dt) # check condition that the robots have fallen down self.assertTrue(robot.data.root_pos_w[0, 2].item() < 0.2) self.assertTrue(robot.data.root_pos_w[1, 2].item() < 0.2) def test_external_force_on_multiple_bodies(self): """Test application of external force on the legs of the robot.""" # Robots robot_cfg = ANYMAL_C_CFG robot_cfg.spawn.func("/World/Anymal_c/Robot_1", robot_cfg.spawn, translation=(0.0, -0.5, 0.65)) robot_cfg.spawn.func("/World/Anymal_c/Robot_2", robot_cfg.spawn, translation=(0.0, 0.5, 0.65)) # create handles for the robots robot = Articulation(robot_cfg.replace(prim_path="/World/Anymal_c/Robot.*")) # Play the simulator self.sim.reset() # Find bodies to apply the force body_ids, _ = robot.find_bodies(".*_SHANK") # Sample a large force external_wrench_b = torch.zeros(robot.num_instances, len(body_ids), 6, device=self.sim.device) external_wrench_b[..., 1] = 100.0 # Now we are ready! for _ in range(5): # reset root state root_state = robot.data.default_root_state.clone() root_state[0, :2] = torch.tensor([0.0, -0.5], device=self.sim.device) root_state[1, :2] = torch.tensor([0.0, 0.5], device=self.sim.device) robot.write_root_state_to_sim(root_state) # reset dof state joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset robot robot.reset() # apply force robot.set_external_force_and_torque( external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids ) # perform simulation for _ in range(100): # apply action to the robot robot.set_joint_position_target(robot.data.default_joint_pos.clone()) robot.write_data_to_sim() # perform step self.sim.step() # update buffers robot.update(self.dt) # check condition # since there is a moment applied on the robot, the robot should rotate self.assertTrue(robot.data.root_ang_vel_w[0, 2].item() > 0.1) self.assertTrue(robot.data.root_ang_vel_w[1, 2].item() > 0.1) def test_loading_gains_from_usd(self): """Test that gains are loaded from USD file if actuator model has them as None.""" # Create articulation robot_cfg = ArticulationCfg( prim_path="/World/Robot", spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"), init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)), actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=None, damping=None)}, ) robot = Articulation(cfg=robot_cfg) # Play sim self.sim.reset() # Expected gains # -- Stiffness values expected_stiffness = { ".*_waist.*": 20.0, ".*_upper_arm.*": 10.0, "pelvis": 10.0, ".*_lower_arm": 2.0, ".*_thigh:0": 10.0, ".*_thigh:1": 20.0, ".*_thigh:2": 10.0, ".*_shin": 5.0, ".*_foot.*": 2.0, } indices_list, _, values_list = string_utils.resolve_matching_names_values(expected_stiffness, robot.joint_names) expected_stiffness = torch.zeros(robot.num_instances, robot.num_joints, device=robot.device) expected_stiffness[:, indices_list] = torch.tensor(values_list, device=robot.device) # -- Damping values expected_damping = { ".*_waist.*": 5.0, ".*_upper_arm.*": 5.0, "pelvis": 5.0, ".*_lower_arm": 1.0, ".*_thigh:0": 5.0, ".*_thigh:1": 5.0, ".*_thigh:2": 5.0, ".*_shin": 0.1, ".*_foot.*": 1.0, } indices_list, _, values_list = string_utils.resolve_matching_names_values(expected_damping, robot.joint_names) expected_damping = torch.zeros_like(expected_stiffness) expected_damping[:, indices_list] = torch.tensor(values_list, device=robot.device) # Check that gains are loaded from USD file torch.testing.assert_close(robot.actuators["body"].stiffness, expected_stiffness) torch.testing.assert_close(robot.actuators["body"].damping, expected_damping) def test_setting_gains_from_cfg(self): """Test that gains are loaded from the configuration correctly. Note: We purposefully give one argument as int and other as float to check that it is handled correctly. """ # Create articulation robot_cfg = ArticulationCfg( prim_path="/World/Robot", spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"), init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)), actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=10, damping=2.0)}, ) robot = Articulation(cfg=robot_cfg) # Play sim self.sim.reset() # Expected gains expected_stiffness = torch.full((robot.num_instances, robot.num_joints), 10.0, device=robot.device) expected_damping = torch.full_like(expected_stiffness, 2.0) # Check that gains are loaded from USD file torch.testing.assert_close(robot.actuators["body"].stiffness, expected_stiffness) torch.testing.assert_close(robot.actuators["body"].damping, expected_damping) def test_setting_gains_from_cfg_dict(self): """Test that gains are loaded from the configuration dictionary correctly. Note: We purposefully give one argument as int and other as float to check that it is handled correctly. """ # Create articulation robot_cfg = ArticulationCfg( prim_path="/World/Robot", spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd"), init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)), actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness={".*": 10}, damping={".*": 2.0})}, ) robot = Articulation(cfg=robot_cfg) # Play sim self.sim.reset() # Expected gains expected_stiffness = torch.full((robot.num_instances, robot.num_joints), 10.0, device=robot.device) expected_damping = torch.full_like(expected_stiffness, 2.0) # Check that gains are loaded from USD file torch.testing.assert_close(robot.actuators["body"].stiffness, expected_stiffness) torch.testing.assert_close(robot.actuators["body"].damping, expected_damping) if __name__ == "__main__": run_tests()
21,138
Python
40.69428
120
0.614155
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/assets/check_external_force.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script checks if the external force is applied correctly on the robot. .. code-block:: bash # Usage to apply force on base ./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_external_force.py --body base --force 1000 # Usage to apply force on legs ./orbit.sh -p source/extensions/omni.isaac.orbit/test/assets/check_external_force.py --body .*_SHANK --force 100 """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to external force on a legged robot.") parser.add_argument("--body", default="base", type=str, help="Name of the body to apply force on.") parser.add_argument("--force", default=1000.0, type=float, help="Force to apply on the body.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.sim import SimulationContext ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip def main(): """Main function.""" # Load kit helper sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) # Spawn things into stage # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DistantLightCfg(intensity=1000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light/greyLight", cfg) # Robots robot_cfg = ANYMAL_C_CFG robot_cfg.spawn.func("/World/Anymal_c/Robot_1", robot_cfg.spawn, translation=(0.0, -0.5, 0.65)) robot_cfg.spawn.func("/World/Anymal_c/Robot_2", robot_cfg.spawn, translation=(0.0, 0.5, 0.65)) # create handles for the robots robot = Articulation(robot_cfg.replace(prim_path="/World/Anymal_c/Robot.*")) # Play the simulator sim.reset() # Find bodies to apply the force body_ids, body_names = robot.find_bodies(args_cli.body) # Sample a large force external_wrench_b = torch.zeros(robot.num_instances, len(body_ids), 6, device=sim.device) external_wrench_b[..., 1] = args_cli.force # Now we are ready! print("[INFO]: Setup complete...") print("[INFO]: Applying force on the robot: ", args_cli.body, " -> ", body_names) # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 100 == 0: # reset counters sim_time = 0.0 count = 0 # reset root state root_state = robot.data.default_root_state.clone() root_state[0, :2] = torch.tensor([0.0, -0.5], device=sim.device) root_state[1, :2] = torch.tensor([0.0, 0.5], device=sim.device) robot.write_root_state_to_sim(root_state) # reset dof state joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.reset() # apply force robot.set_external_force_and_torque( external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids ) # reset command print(">>>>>>>> Reset!") # apply action to the robot robot.set_joint_position_target(robot.data.default_joint_pos.clone()) robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers robot.update(sim_dt) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,295
Python
31.545454
116
0.641444
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/markers/test_visualization_markers.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher, run_tests # launch omniverse app config = {"headless": True} simulation_app = AppLauncher(config).app """Rest everything follows.""" import torch import unittest import omni.isaac.core.utils.stage as stage_utils from omni.isaac.core.simulation_context import SimulationContext import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers, VisualizationMarkersCfg from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG, POSITION_GOAL_MARKER_CFG from omni.isaac.orbit.utils.math import random_orientation from omni.isaac.orbit.utils.timer import Timer class TestUsdVisualizationMarkers(unittest.TestCase): """Test fixture for the VisualizationMarker class.""" def setUp(self): """Create a blank new stage for each test.""" # Simulation time-step self.dt = 0.01 # Open a new stage stage_utils.create_new_stage() # Load kit helper self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="torch", device="cuda:0") def tearDown(self) -> None: """Stops simulator after each test.""" # stop simulation self.sim.stop() # close stage stage_utils.close_stage() # clear the simulation context self.sim.clear_instance() def test_instantiation(self): """Test that the class can be initialized properly.""" config = VisualizationMarkersCfg( prim_path="/World/Visuals/test", markers={ "test": sim_utils.SphereCfg(radius=1.0), }, ) test_marker = VisualizationMarkers(config) print(test_marker) # check number of markers self.assertEqual(test_marker.num_prototypes, 1) def test_usd_marker(self): """Test with marker from a USD.""" # create a marker config = FRAME_MARKER_CFG.replace(prim_path="/World/Visuals/test_frames") test_marker = VisualizationMarkers(config) # play the simulation self.sim.reset() # create a buffer num_frames = 0 # run with randomization of poses for count in range(1000): # sample random poses if count % 50 == 0: num_frames = torch.randint(10, 1000, (1,)).item() frame_translations = torch.randn((num_frames, 3)) frame_rotations = random_orientation(num_frames, device=self.sim.device) # set the marker test_marker.visualize(translations=frame_translations, orientations=frame_rotations) # update the kit self.sim.step() # asset that count is correct self.assertEqual(test_marker.count, num_frames) def test_usd_marker_color(self): """Test with marker from a USD with its color modified.""" # create a marker config = FRAME_MARKER_CFG.copy() config.prim_path = "/World/Visuals/test_frames" config.markers["frame"].visual_material = sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)) test_marker = VisualizationMarkers(config) # play the simulation self.sim.reset() # run with randomization of poses for count in range(1000): # sample random poses if count % 50 == 0: num_frames = torch.randint(10, 1000, (1,)).item() frame_translations = torch.randn((num_frames, 3)) frame_rotations = random_orientation(num_frames, device=self.sim.device) # set the marker test_marker.visualize(translations=frame_translations, orientations=frame_rotations) # update the kit self.sim.step() def test_multiple_prototypes_marker(self): """Test with multiple prototypes of spheres.""" # create a marker config = POSITION_GOAL_MARKER_CFG.replace(prim_path="/World/Visuals/test_protos") test_marker = VisualizationMarkers(config) # play the simulation self.sim.reset() # run with randomization of poses for count in range(1000): # sample random poses if count % 50 == 0: num_frames = torch.randint(100, 1000, (1,)).item() frame_translations = torch.randn((num_frames, 3)) # randomly choose a prototype marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,)) # set the marker test_marker.visualize(translations=frame_translations, marker_indices=marker_indices) # update the kit self.sim.step() def test_visualization_time_based_on_prototypes(self): """Test with time taken when number of prototypes is increased.""" # create a marker config = POSITION_GOAL_MARKER_CFG.replace(prim_path="/World/Visuals/test_protos") test_marker = VisualizationMarkers(config) # play the simulation self.sim.reset() # number of frames num_frames = 4096 # check that visibility is true self.assertTrue(test_marker.is_visible()) # run with randomization of poses and indices frame_translations = torch.randn((num_frames, 3)) marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,)) # set the marker with Timer("Marker visualization with explicit indices") as timer: test_marker.visualize(translations=frame_translations, marker_indices=marker_indices) # save the time time_with_marker_indices = timer.time_elapsed with Timer("Marker visualization with no indices") as timer: test_marker.visualize(translations=frame_translations) # save the time time_with_no_marker_indices = timer.time_elapsed # update the kit self.sim.step() # check that the time is less self.assertLess(time_with_no_marker_indices, time_with_marker_indices) def test_visualization_time_based_on_visibility(self): """Test with visibility of markers. When invisible, the visualize call should return.""" # create a marker config = POSITION_GOAL_MARKER_CFG.replace(prim_path="/World/Visuals/test_protos") test_marker = VisualizationMarkers(config) # play the simulation self.sim.reset() # number of frames num_frames = 4096 # check that visibility is true self.assertTrue(test_marker.is_visible()) # run with randomization of poses and indices frame_translations = torch.randn((num_frames, 3)) marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,)) # set the marker with Timer("Marker visualization") as timer: test_marker.visualize(translations=frame_translations, marker_indices=marker_indices) # save the time time_with_visualization = timer.time_elapsed # update the kit self.sim.step() # make invisible test_marker.set_visibility(False) # check that visibility is false self.assertFalse(test_marker.is_visible()) # run with randomization of poses and indices frame_translations = torch.randn((num_frames, 3)) marker_indices = torch.randint(0, test_marker.num_prototypes, (num_frames,)) # set the marker with Timer("Marker no visualization") as timer: test_marker.visualize(translations=frame_translations, marker_indices=marker_indices) # save the time time_with_no_visualization = timer.time_elapsed # check that the time is less self.assertLess(time_with_no_visualization, time_with_visualization) if __name__ == "__main__": run_tests()
8,132
Python
37.545024
112
0.631702
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/test/markers/check_markers_visibility.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script checks if the debug markers are visible from the camera. To check if the markers are visible on different rendering modalities, you can switch them by going through the synthetic data generation tool in the Isaac Sim UI. For more information, please check: https://www.youtube.com/watch?v=vLk-f9LWj48&ab_channel=NVIDIAOmniverse .. code-block:: bash # Usage ./orbit.sh -p source/extensions/omni.isaac.orbit/test/markers/check_markers_visibility.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Check if the debug markers are visible from the camera.") parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors import RayCasterCfg, patterns from omni.isaac.orbit.utils import configclass ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip @configclass class SensorsSceneCfg(InteractiveSceneCfg): """Design the scene with sensors on the robot.""" # ground plane ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # robot robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # sensors height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", update_period=0.02, offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=True, mesh_prim_paths=["/World/defaultGroundPlane"], ) def run_simulator( sim: sim_utils.SimulationContext, scene: InteractiveScene, ): """Run the simulator.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # Reset if count % 500 == 0: # reset counter count = 0 # reset the scene entities # root state root_state = scene["robot"].data.default_root_state.clone() root_state[:, :3] += scene.env_origins scene["robot"].write_root_state_to_sim(root_state) # set joint positions with some noise joint_pos, joint_vel = ( scene["robot"].data.default_joint_pos.clone(), scene["robot"].data.default_joint_vel.clone(), ) scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers scene.reset() print("[INFO]: Resetting robot state...") # Apply default actions to the robot # -- generate actions/commands targets = scene["robot"].data.default_joint_pos # -- apply action to the robot scene["robot"].set_joint_position_target(targets) # -- write data to sim scene.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers scene.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.005, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) # design scene scene_cfg = SensorsSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,678
Python
29.581699
106
0.655622
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Package containing the core framework.""" import os import toml # Conveniences to other module directories via relative paths ORBIT_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")) """Path to the extension source directory.""" ORBIT_METADATA = toml.load(os.path.join(ORBIT_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" # Configure the module-level variables __version__ = ORBIT_METADATA["package"]["version"]
635
Python
30.799998
85
0.727559
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/device_base.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base class for teleoperation interface.""" from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Callable from typing import Any class DeviceBase(ABC): """An interface class for teleoperation devices.""" def __init__(self): """Initialize the teleoperation interface.""" pass def __str__(self) -> str: """Returns: A string containing the information of joystick.""" return f"{self.__class__.__name__}" """ Operations """ @abstractmethod def reset(self): """Reset the internals.""" raise NotImplementedError @abstractmethod def add_callback(self, key: Any, func: Callable): """Add additional functions to bind keyboard. Args: key: The button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ raise NotImplementedError @abstractmethod def advance(self) -> Any: """Provides the joystick event state. Returns: The processed output form the joystick. """ raise NotImplementedError
1,343
Python
23.888888
92
0.627699
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package providing interfaces to different teleoperation devices. Currently, the following categories of devices are supported: * **Keyboard**: Standard keyboard with WASD and arrow keys. * **Spacemouse**: 3D mouse with 6 degrees of freedom. * **Gamepad**: Gamepad with 2D two joysticks and buttons. Example: Xbox controller. All device interfaces inherit from the :class:`DeviceBase` class, which provides a common interface for all devices. The device interface reads the input data when the :meth:`DeviceBase.advance` method is called. It also provides the function :meth:`DeviceBase.add_callback` to add user-defined callback functions to be called when a particular input is pressed from the peripheral device. """ from .device_base import DeviceBase from .gamepad import Se2Gamepad, Se3Gamepad from .keyboard import Se2Keyboard, Se3Keyboard from .spacemouse import Se2SpaceMouse, Se3SpaceMouse
1,033
Python
40.359998
110
0.791868
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/se3_spacemouse.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Spacemouse controller for SE(3) control.""" from __future__ import annotations import hid import numpy as np import threading import time from collections.abc import Callable from scipy.spatial.transform.rotation import Rotation from ..device_base import DeviceBase from .utils import convert_buffer class Se3SpaceMouse(DeviceBase): """A space-mouse controller for sending SE(3) commands as delta poses. This class implements a space-mouse controller to provide commands to a robotic arm with a gripper. It uses the `HID-API`_ which interfaces with USD and Bluetooth HID-class devices across multiple platforms [1]. The command comprises of two parts: * delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians. * gripper: a binary command to open or close the gripper. Note: The interface finds and uses the first supported device connected to the computer. Currently tested for following devices: - SpaceMouse Compact: https://3dconnexion.com/de/product/spacemouse-compact/ .. _HID-API: https://github.com/libusb/hidapi """ def __init__(self, pos_sensitivity: float = 0.4, rot_sensitivity: float = 0.8): """Initialize the space-mouse layer. Args: pos_sensitivity: Magnitude of input position command scaling. Defaults to 0.4. rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 0.8. """ # store inputs self.pos_sensitivity = pos_sensitivity self.rot_sensitivity = rot_sensitivity # acquire device interface self._device = hid.device() self._find_device() # read rotations self._read_rotation = False # command buffers self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) # dictionary for additional callbacks self._additional_callbacks = dict() # run a thread for listening to device updates self._thread = threading.Thread(target=self._run_device) self._thread.daemon = True self._thread.start() def __del__(self): """Destructor for the class.""" self._thread.join() def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Spacemouse Controller for SE(3): {self.__class__.__name__}\n" msg += f"\tManufacturer: {self._device.get_manufacturer_string()}\n" msg += f"\tProduct: {self._device.get_product_string()}\n" msg += "\t----------------------------------------------\n" msg += "\tRight button: reset command\n" msg += "\tLeft button: toggle gripper command (open/close)\n" msg += "\tMove mouse laterally: move arm horizontally in x-y plane\n" msg += "\tMove mouse vertically: move arm vertically\n" msg += "\tTwist mouse about an axis: rotate arm about a corresponding axis" return msg """ Operations """ def reset(self): # default flags self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) def add_callback(self, key: str, func: Callable): # check keys supported by callback if key not in ["L", "R"]: raise ValueError(f"Only left (L) and right (R) buttons supported. Provided: {key}.") # TODO: Improve this to allow multiple buttons on same key. self._additional_callbacks[key] = func def advance(self) -> tuple[np.ndarray, bool]: """Provides the result from spacemouse event state. Returns: A tuple containing the delta pose command and gripper commands. """ rot_vec = Rotation.from_euler("XYZ", self._delta_rot).as_rotvec() # if new command received, reset event flag to False until keyboard updated. return np.concatenate([self._delta_pos, rot_vec]), self._close_gripper """ Internal helpers. """ def _find_device(self): """Find the device connected to computer.""" found = False # implement a timeout for device search for _ in range(5): for device in hid.enumerate(): if device["product_string"] == "SpaceMouse Compact": # set found flag found = True vendor_id = device["vendor_id"] product_id = device["product_id"] # connect to the device self._device.open(vendor_id, product_id) # check if device found if not found: time.sleep(1.0) else: break # no device found: return false if not found: raise OSError("No device found by SpaceMouse. Is the device connected?") def _run_device(self): """Listener thread that keeps pulling new messages.""" # keep running while True: # read the device data data = self._device.read(7) if data is not None: # readings from 6-DoF sensor if data[0] == 1: self._delta_pos[1] = self.pos_sensitivity * convert_buffer(data[1], data[2]) self._delta_pos[0] = self.pos_sensitivity * convert_buffer(data[3], data[4]) self._delta_pos[2] = self.pos_sensitivity * convert_buffer(data[5], data[6]) * -1.0 elif data[0] == 2 and not self._read_rotation: self._delta_rot[1] = self.rot_sensitivity * convert_buffer(data[1], data[2]) self._delta_rot[0] = self.rot_sensitivity * convert_buffer(data[3], data[4]) self._delta_rot[2] = self.rot_sensitivity * convert_buffer(data[5], data[6]) # readings from the side buttons elif data[0] == 3: # press left button if data[1] == 1: # close gripper self._close_gripper = not self._close_gripper # additional callbacks if "L" in self._additional_callbacks: self._additional_callbacks["L"] # right button is for reset if data[1] == 2: # reset layer self.reset() # additional callbacks if "R" in self._additional_callbacks: self._additional_callbacks["R"] if data[1] == 3: self._read_rotation = not self._read_rotation
6,921
Python
38.781609
115
0.569426
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Spacemouse device for SE(2) and SE(3) control.""" from .se2_spacemouse import Se2SpaceMouse from .se3_spacemouse import Se3SpaceMouse
261
Python
25.199997
56
0.758621
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/utils.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Helper functions for SpaceMouse.""" # MIT License # # Copyright (c) 2022 Stanford Vision and Learning Lab and UT Robot Perception and Learning Lab # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. def convert_buffer(b1, b2): """Converts raw SpaceMouse readings to commands. Args: b1: 8-bit byte b2: 8-bit byte Returns: Scaled value from Space-mouse message """ return _scale_to_control(_to_int16(b1, b2)) """ Private methods. """ def _to_int16(y1, y2): """Convert two 8 bit bytes to a signed 16 bit integer. Args: y1: 8-bit byte y2: 8-bit byte Returns: 16-bit integer """ x = (y1) | (y2 << 8) if x >= 32768: x = -(65536 - x) return x def _scale_to_control(x, axis_scale=350.0, min_v=-1.0, max_v=1.0): """Normalize raw HID readings to target range. Args: x: Raw reading from HID axis_scale: (Inverted) scaling factor for mapping raw input value min_v: Minimum limit after scaling max_v: Maximum limit after scaling Returns: Clipped, scaled input from HID """ x = x / axis_scale return min(max(x, min_v), max_v)
2,326
Python
28.455696
94
0.686586
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/spacemouse/se2_spacemouse.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Spacemouse controller for SE(2) control.""" from __future__ import annotations import hid import numpy as np import threading import time from collections.abc import Callable from ..device_base import DeviceBase from .utils import convert_buffer class Se2SpaceMouse(DeviceBase): r"""A space-mouse controller for sending SE(2) commands as delta poses. This class implements a space-mouse controller to provide commands to mobile base. It uses the `HID-API`_ which interfaces with USD and Bluetooth HID-class devices across multiple platforms. The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`. Note: The interface finds and uses the first supported device connected to the computer. Currently tested for following devices: - SpaceMouse Compact: https://3dconnexion.com/de/product/spacemouse-compact/ .. _HID-API: https://github.com/libusb/hidapi """ def __init__(self, v_x_sensitivity: float = 0.8, v_y_sensitivity: float = 0.4, omega_z_sensitivity: float = 1.0): """Initialize the spacemouse layer. Args: v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 0.8. v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 0.4. omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0. """ # store inputs self.v_x_sensitivity = v_x_sensitivity self.v_y_sensitivity = v_y_sensitivity self.omega_z_sensitivity = omega_z_sensitivity # acquire device interface self._device = hid.device() self._find_device() # command buffers self._base_command = np.zeros(3) # dictionary for additional callbacks self._additional_callbacks = dict() # run a thread for listening to device updates self._thread = threading.Thread(target=self._run_device) self._thread.daemon = True self._thread.start() def __del__(self): """Destructor for the class.""" self._thread.join() def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Spacemouse Controller for SE(2): {self.__class__.__name__}\n" msg += f"\tManufacturer: {self._device.get_manufacturer_string()}\n" msg += f"\tProduct: {self._device.get_product_string()}\n" msg += "\t----------------------------------------------\n" msg += "\tRight button: reset command\n" msg += "\tMove mouse laterally: move base horizontally in x-y plane\n" msg += "\tTwist mouse about z-axis: yaw base about a corresponding axis" return msg """ Operations """ def reset(self): # default flags self._base_command.fill(0.0) def add_callback(self, key: str, func: Callable): # check keys supported by callback if key not in ["L", "R"]: raise ValueError(f"Only left (L) and right (R) buttons supported. Provided: {key}.") # TODO: Improve this to allow multiple buttons on same key. self._additional_callbacks[key] = func def advance(self) -> np.ndarray: """Provides the result from spacemouse event state. Returns: A 3D array containing the linear (x,y) and angular velocity (z). """ return self._base_command """ Internal helpers. """ def _find_device(self): """Find the device connected to computer.""" found = False # implement a timeout for device search for _ in range(5): for device in hid.enumerate(): if device["product_string"] == "SpaceMouse Compact": # set found flag found = True vendor_id = device["vendor_id"] product_id = device["product_id"] # connect to the device self._device.open(vendor_id, product_id) # check if device found if not found: time.sleep(1.0) else: break # no device found: return false if not found: raise OSError("No device found by SpaceMouse. Is the device connected?") def _run_device(self): """Listener thread that keeps pulling new messages.""" # keep running while True: # read the device data data = self._device.read(13) if data is not None: # readings from 6-DoF sensor if data[0] == 1: # along y-axis self._base_command[1] = self.v_y_sensitivity * convert_buffer(data[1], data[2]) # along x-axis self._base_command[0] = self.v_x_sensitivity * convert_buffer(data[3], data[4]) elif data[0] == 2: # along z-axis self._base_command[2] = self.omega_z_sensitivity * convert_buffer(data[3], data[4]) # readings from the side buttons elif data[0] == 3: # press left button if data[1] == 1: # additional callbacks if "L" in self._additional_callbacks: self._additional_callbacks["L"] # right button is for reset if data[1] == 2: # reset layer self.reset() # additional callbacks if "R" in self._additional_callbacks: self._additional_callbacks["R"]
5,913
Python
36.66879
117
0.566041
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/se2_gamepad.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Gamepad controller for SE(2) control.""" from __future__ import annotations import numpy as np import weakref from collections.abc import Callable import carb import omni from ..device_base import DeviceBase class Se2Gamepad(DeviceBase): r"""A gamepad controller for sending SE(2) commands as velocity commands. This class is designed to provide a gamepad controller for mobile base (such as quadrupeds). It uses the Omniverse gamepad interface to listen to gamepad events and map them to robot's task-space commands. The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`. Key bindings: ====================== ========================= ======================== Command Key (+ve axis) Key (-ve axis) ====================== ========================= ======================== Move along x-axis left stick up left stick down Move along y-axis left stick right left stick left Rotate along z-axis right stick right right stick left ====================== ========================= ======================== .. seealso:: The official documentation for the gamepad interface: `Carb Gamepad Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Gamepad>`__. """ def __init__( self, v_x_sensitivity: float = 1.0, v_y_sensitivity: float = 1.0, omega_z_sensitivity: float = 1.0, dead_zone: float = 0.01, ): """Initialize the gamepad layer. Args: v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 1.0. v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 1.0. omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0. dead_zone: Magnitude of dead zone for gamepad. An event value from the gamepad less than this value will be ignored. Defaults to 0.01. """ # turn off simulator gamepad control carb_settings_iface = carb.settings.get_settings() carb_settings_iface.set_bool("/persistent/app/omniverse/gamepadCameraControl", False) # store inputs self.v_x_sensitivity = v_x_sensitivity self.v_y_sensitivity = v_y_sensitivity self.omega_z_sensitivity = omega_z_sensitivity self.dead_zone = dead_zone # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._gamepad = self._appwindow.get_gamepad(0) # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called self._gamepad_sub = self._input.subscribe_to_gamepad_events( self._gamepad, lambda event, *args, obj=weakref.proxy(self): obj._on_gamepad_event(event, *args), ) # bindings for gamepad to command self._create_key_bindings() # command buffers # When using the gamepad, two values are provided for each axis. # For example: when the left stick is moved down, there are two evens: `left_stick_down = 0.8` # and `left_stick_up = 0.0`. If only the value of left_stick_up is used, the value will be 0.0, # which is not the desired behavior. Therefore, we save both the values into the buffer and use # the maximum value. # (positive, negative), (x, y, yaw) self._base_command_raw = np.zeros([2, 3]) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Unsubscribe from gamepad events.""" self._input.unsubscribe_from_gamepad_events(self._gamepad, self._gamepad_sub) self._gamepad_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Gamepad Controller for SE(2): {self.__class__.__name__}\n" msg += f"\tDevice name: {self._input.get_gamepad_name(self._gamepad)}\n" msg += "\t----------------------------------------------\n" msg += "\tMove in X-Y plane: left stick\n" msg += "\tRotate in Z-axis: right stick\n" return msg """ Operations """ def reset(self): # default flags self._base_command_raw.fill(0.0) def add_callback(self, key: carb.input.GamepadInput, func: Callable): """Add additional functions to bind gamepad. A list of available gamepad keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.GamepadInput>`__. Args: key: The gamepad button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> np.ndarray: """Provides the result from gamepad event state. Returns: A 3D array containing the linear (x,y) and angular velocity (z). """ return self._resolve_command_buffer(self._base_command_raw) """ Internal helpers. """ def _on_gamepad_event(self, event: carb.input.GamepadEvent, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.GamepadInput """ # check if the event is a button press cur_val = event.value if abs(cur_val) < self.dead_zone: cur_val = 0 # -- left and right stick if event.input in self._INPUT_STICK_VALUE_MAPPING: direction, axis, value = self._INPUT_STICK_VALUE_MAPPING[event.input] # change the value only if the stick is moved (soft press) self._base_command_raw[direction, axis] = value * cur_val # additional callbacks if event.input in self._additional_callbacks: self._additional_callbacks[event.input]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" self._INPUT_STICK_VALUE_MAPPING = { # forward command carb.input.GamepadInput.LEFT_STICK_UP: (0, 0, self.v_x_sensitivity), # backward command carb.input.GamepadInput.LEFT_STICK_DOWN: (1, 0, self.v_x_sensitivity), # right command carb.input.GamepadInput.LEFT_STICK_RIGHT: (0, 1, self.v_y_sensitivity), # left command carb.input.GamepadInput.LEFT_STICK_LEFT: (1, 1, self.v_y_sensitivity), # yaw command (positive) carb.input.GamepadInput.RIGHT_STICK_RIGHT: (0, 2, self.omega_z_sensitivity), # yaw command (negative) carb.input.GamepadInput.RIGHT_STICK_LEFT: (1, 2, self.omega_z_sensitivity), } def _resolve_command_buffer(self, raw_command: np.ndarray) -> np.ndarray: """Resolves the command buffer. Args: raw_command: The raw command from the gamepad. Shape is (2, 3) This is a 2D array since gamepad dpad/stick returns two values corresponding to the positive and negative direction. The first index is the direction (0: positive, 1: negative) and the second index is value (absolute) of the command. Returns: Resolved command. Shape is (3,) """ # compare the positive and negative value decide the sign of the value # if the positive value is larger, the sign is positive (i.e. False, 0) # if the negative value is larger, the sign is positive (i.e. True, 1) command_sign = raw_command[1, :] > raw_command[0, :] # extract the command value command = raw_command.max(axis=0) # apply the sign # if the sign is positive, the value is already positive. # if the sign is negative, the value is negative after applying the sign. command[command_sign] *= -1 return command
8,563
Python
41.396039
192
0.605045
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/se3_gamepad.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Gamepad controller for SE(3) control.""" import numpy as np import weakref from collections.abc import Callable from scipy.spatial.transform.rotation import Rotation import carb import omni from ..device_base import DeviceBase class Se3Gamepad(DeviceBase): """A gamepad controller for sending SE(3) commands as delta poses and binary command (open/close). This class is designed to provide a gamepad controller for a robotic arm with a gripper. It uses the gamepad interface to listen to gamepad events and map them to the robot's task-space commands. The command comprises of two parts: * delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians. * gripper: a binary command to open or close the gripper. Stick and Button bindings: ============================ ========================= ========================= Description Stick/Button (+ve axis) Stick/Button (-ve axis) ============================ ========================= ========================= Toggle gripper(open/close) X Button X Button Move along x-axis Left Stick Up Left Stick Down Move along y-axis Left Stick Left Left Stick Right Move along z-axis Right Stick Up Right Stick Down Rotate along x-axis D-Pad Left D-Pad Right Rotate along y-axis D-Pad Down D-Pad Up Rotate along z-axis Right Stick Left Right Stick Right ============================ ========================= ========================= .. seealso:: The official documentation for the gamepad interface: `Carb Gamepad Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Gamepad>`__. """ def __init__(self, pos_sensitivity: float = 1.0, rot_sensitivity: float = 1.6, dead_zone: float = 0.01): """Initialize the gamepad layer. Args: pos_sensitivity: Magnitude of input position command scaling. Defaults to 1.0. rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 1.6. dead_zone: Magnitude of dead zone for gamepad. An event value from the gamepad less than this value will be ignored. Defaults to 0.01. """ # turn off simulator gamepad control carb_settings_iface = carb.settings.get_settings() carb_settings_iface.set_bool("/persistent/app/omniverse/gamepadCameraControl", False) # store inputs self.pos_sensitivity = pos_sensitivity self.rot_sensitivity = rot_sensitivity self.dead_zone = dead_zone # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._gamepad = self._appwindow.get_gamepad(0) # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called self._gamepad_sub = self._input.subscribe_to_gamepad_events( self._gamepad, lambda event, *args, obj=weakref.proxy(self): obj._on_gamepad_event(event, *args), ) # bindings for gamepad to command self._create_key_bindings() # command buffers self._close_gripper = False # When using the gamepad, two values are provided for each axis. # For example: when the left stick is moved down, there are two evens: `left_stick_down = 0.8` # and `left_stick_up = 0.0`. If only the value of left_stick_up is used, the value will be 0.0, # which is not the desired behavior. Therefore, we save both the values into the buffer and use # the maximum value. # (positive, negative), (x, y, z, roll, pitch, yaw) self._delta_pose_raw = np.zeros([2, 6]) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Unsubscribe from gamepad events.""" self._input.unsubscribe_from_gamepad_events(self._gamepad, self._gamepad_sub) self._gamepad_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Gamepad Controller for SE(3): {self.__class__.__name__}\n" msg += f"\tDevice name: {self._input.get_gamepad_name(self._gamepad)}\n" msg += "\t----------------------------------------------\n" msg += "\tToggle gripper (open/close): X\n" msg += "\tMove arm along x-axis: Left Stick Up/Down\n" msg += "\tMove arm along y-axis: Left Stick Left/Right\n" msg += "\tMove arm along z-axis: Right Stick Up/Down\n" msg += "\tRotate arm along x-axis: D-Pad Right/Left\n" msg += "\tRotate arm along y-axis: D-Pad Down/Up\n" msg += "\tRotate arm along z-axis: Right Stick Left/Right\n" return msg """ Operations """ def reset(self): # default flags self._close_gripper = False self._delta_pose_raw.fill(0.0) def add_callback(self, key: carb.input.GamepadInput, func: Callable): """Add additional functions to bind gamepad. A list of available gamepad keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.GamepadInput>`__. Args: key: The gamepad button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> tuple[np.ndarray, bool]: """Provides the result from gamepad event state. Returns: A tuple containing the delta pose command and gripper commands. """ # -- resolve position command delta_pos = self._resolve_command_buffer(self._delta_pose_raw[:, :3]) # -- resolve rotation command delta_rot = self._resolve_command_buffer(self._delta_pose_raw[:, 3:]) # -- convert to rotation vector rot_vec = Rotation.from_euler("XYZ", delta_rot).as_rotvec() # return the command and gripper state return np.concatenate([delta_pos, rot_vec]), self._close_gripper """ Internal helpers. """ def _on_gamepad_event(self, event, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=gamepadeventtype#carb.input.Gamepad """ # check if the event is a button press cur_val = event.value if abs(cur_val) < self.dead_zone: cur_val = 0 # -- button if event.input == carb.input.GamepadInput.X: # toggle gripper based on the button pressed if cur_val > 0.5: self._close_gripper = not self._close_gripper # -- left and right stick if event.input in self._INPUT_STICK_VALUE_MAPPING: direction, axis, value = self._INPUT_STICK_VALUE_MAPPING[event.input] # change the value only if the stick is moved (soft press) self._delta_pose_raw[direction, axis] = value * cur_val # -- dpad (4 arrow buttons on the console) if event.input in self._INPUT_DPAD_VALUE_MAPPING: direction, axis, value = self._INPUT_DPAD_VALUE_MAPPING[event.input] # change the value only if button is pressed on the DPAD if cur_val > 0.5: self._delta_pose_raw[direction, axis] = value self._delta_pose_raw[1 - direction, axis] = 0 else: self._delta_pose_raw[:, axis] = 0 # additional callbacks if event.input in self._additional_callbacks: self._additional_callbacks[event.input]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" # map gamepad input to the element in self._delta_pose_raw # the first index is the direction (0: positive, 1: negative) # the second index is the axis (0: x, 1: y, 2: z, 3: roll, 4: pitch, 5: yaw) # the third index is the sensitivity of the command self._INPUT_STICK_VALUE_MAPPING = { # forward command carb.input.GamepadInput.LEFT_STICK_UP: (0, 0, self.pos_sensitivity), # backward command carb.input.GamepadInput.LEFT_STICK_DOWN: (1, 0, self.pos_sensitivity), # right command carb.input.GamepadInput.LEFT_STICK_RIGHT: (0, 1, self.pos_sensitivity), # left command carb.input.GamepadInput.LEFT_STICK_LEFT: (1, 1, self.pos_sensitivity), # upward command carb.input.GamepadInput.RIGHT_STICK_UP: (0, 2, self.pos_sensitivity), # downward command carb.input.GamepadInput.RIGHT_STICK_DOWN: (1, 2, self.pos_sensitivity), # yaw command (positive) carb.input.GamepadInput.RIGHT_STICK_RIGHT: (0, 5, self.rot_sensitivity), # yaw command (negative) carb.input.GamepadInput.RIGHT_STICK_LEFT: (1, 5, self.rot_sensitivity), } self._INPUT_DPAD_VALUE_MAPPING = { # pitch command (positive) carb.input.GamepadInput.DPAD_UP: (1, 4, self.rot_sensitivity * 0.8), # pitch command (negative) carb.input.GamepadInput.DPAD_DOWN: (0, 4, self.rot_sensitivity * 0.8), # roll command (positive) carb.input.GamepadInput.DPAD_RIGHT: (1, 3, self.rot_sensitivity * 0.8), # roll command (negative) carb.input.GamepadInput.DPAD_LEFT: (0, 3, self.rot_sensitivity * 0.8), } def _resolve_command_buffer(self, raw_command: np.ndarray) -> np.ndarray: """Resolves the command buffer. Args: raw_command: The raw command from the gamepad. Shape is (2, 3) This is a 2D array since gamepad dpad/stick returns two values corresponding to the positive and negative direction. The first index is the direction (0: positive, 1: negative) and the second index is value (absolute) of the command. Returns: Resolved command. Shape is (3,) """ # compare the positive and negative value decide the sign of the value # if the positive value is larger, the sign is positive (i.e. False, 0) # if the negative value is larger, the sign is positive (i.e. True, 1) delta_command_sign = raw_command[1, :] > raw_command[0, :] # extract the command value delta_command = raw_command.max(axis=0) # apply the sign # if the sign is positive, the value is already positive. # if the sign is negative, the value is negative after applying the sign. delta_command[delta_command_sign] *= -1 return delta_command
11,390
Python
45.493877
192
0.599034
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/gamepad/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Gamepad device for SE(2) and SE(3) control.""" from .se2_gamepad import Se2Gamepad from .se3_gamepad import Se3Gamepad
246
Python
23.699998
56
0.743902
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/se2_keyboard.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Keyboard controller for SE(2) control.""" from __future__ import annotations import numpy as np import weakref from collections.abc import Callable import carb import omni from ..device_base import DeviceBase class Se2Keyboard(DeviceBase): r"""A keyboard controller for sending SE(2) commands as velocity commands. This class is designed to provide a keyboard controller for mobile base (such as quadrupeds). It uses the Omniverse keyboard interface to listen to keyboard events and map them to robot's task-space commands. The command comprises of the base linear and angular velocity: :math:`(v_x, v_y, \omega_z)`. Key bindings: ====================== ========================= ======================== Command Key (+ve axis) Key (-ve axis) ====================== ========================= ======================== Move along x-axis Numpad 8 / Arrow Up Numpad 2 / Arrow Down Move along y-axis Numpad 4 / Arrow Right Numpad 6 / Arrow Left Rotate along z-axis Numpad 7 / X Numpad 9 / Y ====================== ========================= ======================== .. seealso:: The official documentation for the keyboard interface: `Carb Keyboard Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Keyboard>`__. """ def __init__(self, v_x_sensitivity: float = 0.8, v_y_sensitivity: float = 0.4, omega_z_sensitivity: float = 1.0): """Initialize the keyboard layer. Args: v_x_sensitivity: Magnitude of linear velocity along x-direction scaling. Defaults to 0.8. v_y_sensitivity: Magnitude of linear velocity along y-direction scaling. Defaults to 0.4. omega_z_sensitivity: Magnitude of angular velocity along z-direction scaling. Defaults to 1.0. """ # store inputs self.v_x_sensitivity = v_x_sensitivity self.v_y_sensitivity = v_y_sensitivity self.omega_z_sensitivity = omega_z_sensitivity # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._keyboard = self._appwindow.get_keyboard() # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called self._keyboard_sub = self._input.subscribe_to_keyboard_events( self._keyboard, lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args), ) # bindings for keyboard to command self._create_key_bindings() # command buffers self._base_command = np.zeros(3) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Release the keyboard interface.""" self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub) self._keyboard_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Keyboard Controller for SE(2): {self.__class__.__name__}\n" msg += f"\tKeyboard name: {self._input.get_keyboard_name(self._keyboard)}\n" msg += "\t----------------------------------------------\n" msg += "\tReset all commands: L\n" msg += "\tMove forward (along x-axis): Numpad 8 / Arrow Up\n" msg += "\tMove backward (along x-axis): Numpad 2 / Arrow Down\n" msg += "\tMove right (along y-axis): Numpad 4 / Arrow Right\n" msg += "\tMove left (along y-axis): Numpad 6 / Arrow Left\n" msg += "\tYaw positively (along z-axis): Numpad 7 / X\n" msg += "\tYaw negatively (along z-axis): Numpad 9 / Y" return msg """ Operations """ def reset(self): # default flags self._base_command.fill(0.0) def add_callback(self, key: str, func: Callable): """Add additional functions to bind keyboard. A list of available keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput>`__. Args: key: The keyboard button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> np.ndarray: """Provides the result from keyboard event state. Returns: 3D array containing the linear (x,y) and angular velocity (z). """ return self._base_command """ Internal helpers. """ def _on_keyboard_event(self, event, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput """ # apply the command when pressed if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name == "L": self.reset() elif event.input.name in self._INPUT_KEY_MAPPING: self._base_command += self._INPUT_KEY_MAPPING[event.input.name] # remove the command when un-pressed if event.type == carb.input.KeyboardEventType.KEY_RELEASE: if event.input.name in self._INPUT_KEY_MAPPING: self._base_command -= self._INPUT_KEY_MAPPING[event.input.name] # additional callbacks if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name in self._additional_callbacks: self._additional_callbacks[event.input.name]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" self._INPUT_KEY_MAPPING = { # forward command "NUMPAD_8": np.asarray([1.0, 0.0, 0.0]) * self.v_x_sensitivity, "UP": np.asarray([1.0, 0.0, 0.0]) * self.v_x_sensitivity, # back command "NUMPAD_2": np.asarray([-1.0, 0.0, 0.0]) * self.v_x_sensitivity, "DOWN": np.asarray([-1.0, 0.0, 0.0]) * self.v_x_sensitivity, # right command "NUMPAD_4": np.asarray([0.0, 1.0, 0.0]) * self.v_y_sensitivity, "LEFT": np.asarray([0.0, 1.0, 0.0]) * self.v_y_sensitivity, # left command "NUMPAD_6": np.asarray([0.0, -1.0, 0.0]) * self.v_y_sensitivity, "RIGHT": np.asarray([0.0, -1.0, 0.0]) * self.v_y_sensitivity, # yaw command (positive) "NUMPAD_7": np.asarray([0.0, 0.0, 1.0]) * self.omega_z_sensitivity, "X": np.asarray([0.0, 0.0, 1.0]) * self.omega_z_sensitivity, # yaw command (negative) "NUMPAD_9": np.asarray([0.0, 0.0, -1.0]) * self.omega_z_sensitivity, "Z": np.asarray([0.0, 0.0, -1.0]) * self.omega_z_sensitivity, }
7,354
Python
42.264706
195
0.584444
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/devices/keyboard/se3_keyboard.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Keyboard controller for SE(3) control.""" import numpy as np import weakref from collections.abc import Callable from scipy.spatial.transform.rotation import Rotation import carb import omni from ..device_base import DeviceBase class Se3Keyboard(DeviceBase): """A keyboard controller for sending SE(3) commands as delta poses and binary command (open/close). This class is designed to provide a keyboard controller for a robotic arm with a gripper. It uses the Omniverse keyboard interface to listen to keyboard events and map them to robot's task-space commands. The command comprises of two parts: * delta pose: a 6D vector of (x, y, z, roll, pitch, yaw) in meters and radians. * gripper: a binary command to open or close the gripper. Key bindings: ============================== ================= ================= Description Key (+ve axis) Key (-ve axis) ============================== ================= ================= Toggle gripper (open/close) K Move along x-axis W S Move along y-axis A D Move along z-axis Q E Rotate along x-axis Z X Rotate along y-axis T G Rotate along z-axis C V ============================== ================= ================= .. seealso:: The official documentation for the keyboard interface: `Carb Keyboard Interface <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html#carb.input.Keyboard>`__. """ def __init__(self, pos_sensitivity: float = 0.4, rot_sensitivity: float = 0.8): """Initialize the keyboard layer. Args: pos_sensitivity: Magnitude of input position command scaling. Defaults to 0.05. rot_sensitivity: Magnitude of scale input rotation commands scaling. Defaults to 0.5. """ # store inputs self.pos_sensitivity = pos_sensitivity self.rot_sensitivity = rot_sensitivity # acquire omniverse interfaces self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._keyboard = self._appwindow.get_keyboard() # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called. self._keyboard_sub = self._input.subscribe_to_keyboard_events( self._keyboard, lambda event, *args, obj=weakref.proxy(self): obj._on_keyboard_event(event, *args), ) # bindings for keyboard to command self._create_key_bindings() # command buffers self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) # dictionary for additional callbacks self._additional_callbacks = dict() def __del__(self): """Release the keyboard interface.""" self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub) self._keyboard_sub = None def __str__(self) -> str: """Returns: A string containing the information of joystick.""" msg = f"Keyboard Controller for SE(3): {self.__class__.__name__}\n" msg += f"\tKeyboard name: {self._input.get_keyboard_name(self._keyboard)}\n" msg += "\t----------------------------------------------\n" msg += "\tToggle gripper (open/close): K\n" msg += "\tMove arm along x-axis: W/S\n" msg += "\tMove arm along y-axis: A/D\n" msg += "\tMove arm along z-axis: Q/E\n" msg += "\tRotate arm along x-axis: Z/X\n" msg += "\tRotate arm along y-axis: T/G\n" msg += "\tRotate arm along z-axis: C/V" return msg """ Operations """ def reset(self): # default flags self._close_gripper = False self._delta_pos = np.zeros(3) # (x, y, z) self._delta_rot = np.zeros(3) # (roll, pitch, yaw) def add_callback(self, key: str, func: Callable): """Add additional functions to bind keyboard. A list of available keys are present in the `carb documentation <https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput>`__. Args: key: The keyboard button to check against. func: The function to call when key is pressed. The callback function should not take any arguments. """ self._additional_callbacks[key] = func def advance(self) -> tuple[np.ndarray, bool]: """Provides the result from keyboard event state. Returns: A tuple containing the delta pose command and gripper commands. """ # convert to rotation vector rot_vec = Rotation.from_euler("XYZ", self._delta_rot).as_rotvec() # return the command and gripper state return np.concatenate([self._delta_pos, rot_vec]), self._close_gripper """ Internal helpers. """ def _on_keyboard_event(self, event, *args, **kwargs): """Subscriber callback to when kit is updated. Reference: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput """ # apply the command when pressed if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name == "L": self.reset() if event.input.name == "K": self._close_gripper = not self._close_gripper elif event.input.name in ["W", "S", "A", "D", "Q", "E"]: self._delta_pos += self._INPUT_KEY_MAPPING[event.input.name] elif event.input.name in ["Z", "X", "T", "G", "C", "V"]: self._delta_rot += self._INPUT_KEY_MAPPING[event.input.name] # remove the command when un-pressed if event.type == carb.input.KeyboardEventType.KEY_RELEASE: if event.input.name in ["W", "S", "A", "D", "Q", "E"]: self._delta_pos -= self._INPUT_KEY_MAPPING[event.input.name] elif event.input.name in ["Z", "X", "T", "G", "C", "V"]: self._delta_rot -= self._INPUT_KEY_MAPPING[event.input.name] # additional callbacks if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name in self._additional_callbacks: self._additional_callbacks[event.input.name]() # since no error, we are fine :) return True def _create_key_bindings(self): """Creates default key binding.""" self._INPUT_KEY_MAPPING = { # toggle: gripper command "K": True, # x-axis (forward) "W": np.asarray([1.0, 0.0, 0.0]) * self.pos_sensitivity, "S": np.asarray([-1.0, 0.0, 0.0]) * self.pos_sensitivity, # y-axis (right-left) "D": np.asarray([0.0, 1.0, 0.0]) * self.pos_sensitivity, "A": np.asarray([0.0, -1.0, 0.0]) * self.pos_sensitivity, # z-axis (up-down) "Q": np.asarray([0.0, 0.0, 1.0]) * self.pos_sensitivity, "E": np.asarray([0.0, 0.0, -1.0]) * self.pos_sensitivity, # roll (around x-axis) "Z": np.asarray([1.0, 0.0, 0.0]) * self.rot_sensitivity, "X": np.asarray([-1.0, 0.0, 0.0]) * self.rot_sensitivity, # pitch (around y-axis) "T": np.asarray([0.0, 1.0, 0.0]) * self.rot_sensitivity, "G": np.asarray([0.0, -1.0, 0.0]) * self.rot_sensitivity, # yaw (around z-axis) "C": np.asarray([0.0, 0.0, 1.0]) * self.rot_sensitivity, "V": np.asarray([0.0, 0.0, -1.0]) * self.rot_sensitivity, }
8,140
Python
42.074074
195
0.561057
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/sensor_base_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.utils import configclass from .sensor_base import SensorBase @configclass class SensorBaseCfg: """Configuration parameters for a sensor.""" class_type: type[SensorBase] = MISSING """The associated sensor class. The class should inherit from :class:`omni.isaac.orbit.sensors.sensor_base.SensorBase`. """ prim_path: str = MISSING """Prim path (or expression) to the sensor. .. note:: The expression can contain the environment namespace regex ``{ENV_REGEX_NS}`` which will be replaced with the environment namespace. Example: ``{ENV_REGEX_NS}/Robot/sensor`` will be replaced with ``/World/envs/env_.*/Robot/sensor``. """ update_period: float = 0.0 """Update period of the sensor buffers (in seconds). Defaults to 0.0 (update every step).""" history_length: int = 0 """Number of past frames to store in the sensor buffers. Defaults to 0, which means that only the current data is stored (no history).""" debug_vis: bool = False """Whether to visualize the sensor. Defaults to False."""
1,297
Python
27.844444
107
0.690825
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package containing various sensor classes implementations. This subpackage contains the sensor classes that are compatible with Isaac Sim. We include both USD-based and custom sensors: * **USD-prim sensors**: Available in Omniverse and require creating a USD prim for them. For instance, RTX ray tracing camera and lidar sensors. * **USD-schema sensors**: Available in Omniverse and require creating a USD schema on an existing prim. For instance, contact sensors and frame transformers. * **Custom sensors**: Implemented in Python and do not require creating any USD prim or schema. For instance, warp-based ray-casters. Due to the above categorization, the prim paths passed to the sensor's configuration class are interpreted differently based on the sensor type. The following table summarizes the interpretation of the prim paths for different sensor types: +---------------------+---------------------------+---------------------------------------------------------------+ | Sensor Type | Example Prim Path | Pre-check | +=====================+===========================+===============================================================+ | Camera | /World/robot/base/camera | Leaf is available, and it will spawn a USD camera | +---------------------+---------------------------+---------------------------------------------------------------+ | Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists | +---------------------+---------------------------+---------------------------------------------------------------+ | Ray Caster | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +---------------------+---------------------------+---------------------------------------------------------------+ | Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +---------------------+---------------------------+---------------------------------------------------------------+ """ from .camera import * # noqa: F401, F403 from .contact_sensor import * # noqa: F401, F403 from .frame_transformer import * # noqa: F401 from .ray_caster import * # noqa: F401, F403 from .sensor_base import SensorBase # noqa: F401 from .sensor_base_cfg import SensorBaseCfg # noqa: F401
2,577
Python
60.380951
115
0.510671
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/sensor_base.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base class for sensors. This class defines an interface for sensors similar to how the :class:`omni.isaac.orbit.robot.robot_base.RobotBase` class works. Each sensor class should inherit from this class and implement the abstract methods. """ from __future__ import annotations import inspect import torch import weakref from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any import omni.kit.app import omni.timeline import omni.isaac.orbit.sim as sim_utils if TYPE_CHECKING: from .sensor_base_cfg import SensorBaseCfg class SensorBase(ABC): """The base class for implementing a sensor. The implementation is based on lazy evaluation. The sensor data is only updated when the user tries accessing the data through the :attr:`data` property or sets ``force_compute=True`` in the :meth:`update` method. This is done to avoid unnecessary computation when the sensor data is not used. The sensor is updated at the specified update period. If the update period is zero, then the sensor is updated at every simulation step. """ def __init__(self, cfg: SensorBaseCfg): """Initialize the sensor class. Args: cfg: The configuration parameters for the sensor. """ # check that config is valid if cfg.history_length < 0: raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}") # store inputs self.cfg = cfg # flag for whether the sensor is initialized self._is_initialized = False # flag for whether the sensor is in visualization mode self._is_visualizing = False # note: Use weakref on callbacks to ensure that this object can be deleted when its destructor is called. # add callbacks for stage play/stop # The order is set to 10 which is arbitrary but should be lower priority than the default order of 0 timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.PLAY), lambda event, obj=weakref.proxy(self): obj._initialize_callback(event), order=10, ) self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.STOP), lambda event, obj=weakref.proxy(self): obj._invalidate_initialize_callback(event), order=10, ) # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) self._debug_vis_handle = None # set initial state of debug visualization self.set_debug_vis(self.cfg.debug_vis) def __del__(self): """Unsubscribe from the callbacks.""" # clear physics events handles if self._initialize_handle: self._initialize_handle.unsubscribe() self._initialize_handle = None if self._invalidate_initialize_handle: self._invalidate_initialize_handle.unsubscribe() self._invalidate_initialize_handle = None # clear debug visualization if self._debug_vis_handle: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None """ Properties """ @property def num_instances(self) -> int: """Number of instances of the sensor. This is equal to the number of sensors per environment multiplied by the number of environments. """ return self._num_envs @property def device(self) -> str: """Memory device for computation.""" return self._device @property @abstractmethod def data(self) -> Any: """Data from the sensor. This property is only updated when the user tries to access the data. This is done to avoid unnecessary computation when the sensor data is not used. For updating the sensor when this property is accessed, you can use the following code snippet in your sensor implementation: .. code-block:: python # update sensors if needed self._update_outdated_buffers() # return the data (where `_data` is the data for the sensor) return self._data """ raise NotImplementedError @property def has_debug_vis_implementation(self) -> bool: """Whether the sensor has a debug visualization implemented.""" # check if function raises NotImplementedError source_code = inspect.getsource(self._set_debug_vis_impl) return "NotImplementedError" not in source_code """ Operations """ def set_debug_vis(self, debug_vis: bool) -> bool: """Sets whether to visualize the sensor data. Args: debug_vis: Whether to visualize the sensor data. Returns: Whether the debug visualization was successfully set. False if the sensor does not support debug visualization. """ # check if debug visualization is supported if not self.has_debug_vis_implementation: return False # toggle debug visualization objects self._set_debug_vis_impl(debug_vis) # toggle debug visualization flag self._is_visualizing = debug_vis # toggle debug visualization handles if debug_vis: # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: app_interface = omni.kit.app.get_app_interface() self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) ) else: # remove the subscriber if it exists if self._debug_vis_handle is not None: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None # return success return True def reset(self, env_ids: Sequence[int] | None = None): """Resets the sensor internals. Args: env_ids: The sensor ids to reset. Defaults to None. """ # Resolve sensor ids if env_ids is None: env_ids = slice(None) # Reset the timestamp for the sensors self._timestamp[env_ids] = 0.0 self._timestamp_last_update[env_ids] = 0.0 # Set all reset sensors to outdated so that they are updated when data is called the next time. self._is_outdated[env_ids] = True def update(self, dt: float, force_recompute: bool = False): # Update the timestamp for the sensors self._timestamp += dt self._is_outdated |= self._timestamp - self._timestamp_last_update + 1e-6 >= self.cfg.update_period # Update the buffers # TODO (from @mayank): Why is there a history length here when it doesn't mean anything in the sensor base?!? # It is only for the contact sensor but there we should redefine the update function IMO. if force_recompute or self._is_visualizing or (self.cfg.history_length > 0): self._update_outdated_buffers() """ Implementation specific. """ @abstractmethod def _initialize_impl(self): """Initializes the sensor-related handles and internal buffers.""" # Obtain Simulation Context sim = sim_utils.SimulationContext.instance() if sim is None: raise RuntimeError("Simulation Context is not initialized!") # Obtain device and backend self._device = sim.device self._backend = sim.backend self._sim_physics_dt = sim.get_physics_dt() # Count number of environments env_prim_path_expr = self.cfg.prim_path.rsplit("/", 1)[0] self._parent_prims = sim_utils.find_matching_prims(env_prim_path_expr) self._num_envs = len(self._parent_prims) # Boolean tensor indicating whether the sensor data has to be refreshed self._is_outdated = torch.ones(self._num_envs, dtype=torch.bool, device=self._device) # Current timestamp (in seconds) self._timestamp = torch.zeros(self._num_envs, device=self._device) # Timestamp from last update self._timestamp_last_update = torch.zeros_like(self._timestamp) @abstractmethod def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the sensor data for provided environment ids. This function does not perform any time-based checks and directly fills the data into the data container. Args: env_ids: The indices of the sensors that are ready to capture. """ raise NotImplementedError def _set_debug_vis_impl(self, debug_vis: bool): """Set debug visualization into visualization objects. This function is responsible for creating the visualization objects if they don't exist and input ``debug_vis`` is True. If the visualization objects exist, the function should set their visibility into the stage. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") def _debug_vis_callback(self, event): """Callback for debug visualization. This function calls the visualization objects and sets the data to visualize into them. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") """ Internal simulation callbacks. """ def _initialize_callback(self, event): """Initializes the scene elements. Note: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ if not self._is_initialized: self._initialize_impl() self._is_initialized = True def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" self._is_initialized = False """ Helper functions. """ def _update_outdated_buffers(self): """Fills the sensor data for the outdated sensors.""" outdated_env_ids = self._is_outdated.nonzero().squeeze(-1) if len(outdated_env_ids) > 0: # obtain new data self._update_buffers_impl(outdated_env_ids) # update the timestamp from last update self._timestamp_last_update[outdated_env_ids] = self._timestamp[outdated_env_ids] # set outdated flag to false for the updated sensors self._is_outdated[outdated_env_ids] = False
11,068
Python
37.975352
128
0.644471
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from tensordict import TensorDict from typing import TYPE_CHECKING, ClassVar, Literal import omni.physics.tensors.impl.api as physx from omni.isaac.core.prims import XFormPrimView import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.sensors.camera import CameraData from omni.isaac.orbit.sensors.camera.utils import convert_orientation_convention, create_rotation_matrix_from_view from omni.isaac.orbit.utils.warp import raycast_mesh from .ray_caster import RayCaster if TYPE_CHECKING: from .ray_caster_camera_cfg import RayCasterCameraCfg class RayCasterCamera(RayCaster): """A ray-casting camera sensor. The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are defined in the sensor's local coordinate frame. The sensor has the same interface as the :class:`omni.isaac.orbit.sensors.Camera` that implements the camera class through USD camera prims. However, this class provides a faster image generation. The sensor converts meshes from the list of primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these Warp meshes only. Currently, only the following annotators are supported: - ``"distance_to_camera"``: An image containing the distance to camera optical center. - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - ``"normals"``: An image containing the local surface normal vectors at each pixel. .. note:: Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes is a work in progress. """ cfg: RayCasterCameraCfg """The configuration parameters.""" UNSUPPORTED_TYPES: ClassVar[set[str]] = { "rgb", "instance_id_segmentation", "instance_id_segmentation_fast", "instance_segmentation", "instance_segmentation_fast", "semantic_segmentation", "skeleton_data", "motion_vectors", "bounding_box_2d_tight", "bounding_box_2d_tight_fast", "bounding_box_2d_loose", "bounding_box_2d_loose_fast", "bounding_box_3d", "bounding_box_3d_fast", } """A set of sensor types that are not supported by the ray-caster camera.""" def __init__(self, cfg: RayCasterCameraCfg): """Initializes the camera object. Args: cfg: The configuration parameters. Raises: ValueError: If the provided data types are not supported by the ray-caster camera. """ # perform check on supported data types self._check_supported_data_types(cfg) # initialize base class super().__init__(cfg) # create empty variables for storing output data self._data = CameraData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"Ray-Caster-Camera @ '{self.cfg.prim_path}': \n" f"\tview type : {self._view.__class__}\n" f"\tupdate period (s) : {self.cfg.update_period}\n" f"\tnumber of meshes : {len(RayCaster.meshes)}\n" f"\tnumber of sensors : {self._view.count}\n" f"\tnumber of rays/sensor: {self.num_rays}\n" f"\ttotal number of rays : {self.num_rays * self._view.count}\n" f"\timage shape : {self.image_shape}" ) """ Properties """ @property def data(self) -> CameraData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data @property def image_shape(self) -> tuple[int, int]: """A tuple containing (height, width) of the camera sensor.""" return (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width) @property def frame(self) -> torch.tensor: """Frame number when the measurement took place.""" return self._frame """ Operations. """ def set_intrinsic_matrices( self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None ): """Set the intrinsic matrix of the camera. Args: matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). focal_length: Focal length to use when computing aperture values. Defaults to 1.0. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. """ # resolve env_ids if env_ids is None: env_ids = slice(None) # save new intrinsic matrices and focal length self._data.intrinsic_matrices[env_ids] = matrices.to(self._device) self._focal_length = focal_length # recompute ray directions self.ray_starts[env_ids], self.ray_directions[env_ids] = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices[env_ids], self._device ) def reset(self, env_ids: Sequence[int] | None = None): # reset the timestamps super().reset(env_ids) # resolve None if env_ids is None: env_ids = slice(None) # reset the data # note: this recomputation is useful if one performs events such as randomizations on the camera poses. pos_w, quat_w = self._compute_camera_world_poses(env_ids) self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w # Reset the frame count self._frame[env_ids] = 0 def set_world_poses( self, positions: torch.Tensor | None = None, orientations: torch.Tensor | None = None, env_ids: Sequence[int] | None = None, convention: Literal["opengl", "ros", "world"] = "ros", ): """Set the pose of the camera w.r.t. the world frame using specified convention. Since different fields use different conventions for camera orientations, the method allows users to set the camera poses in the specified convention. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention See :meth:`omni.isaac.orbit.sensors.camera.utils.convert_orientation_convention` for more details on the conventions. Args: positions: The cartesian coordinates (in meters). Shape is (N, 3). Defaults to None, in which case the camera position in not changed. orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the camera orientation in not changed. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. convention: The convention in which the poses are fed. Defaults to "ros". Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # get current positions pos_w, quat_w = self._compute_view_world_poses(env_ids) if positions is not None: # transform to camera frame pos_offset_world_frame = positions - pos_w self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w), pos_offset_world_frame) if orientations is not None: # convert rotation matrix from input convention to world quat_w_set = convert_orientation_convention(orientations, origin=convention, target="world") self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w), quat_w_set) # update the data pos_w, quat_w = self._compute_camera_world_poses(env_ids) self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None ): """Set the poses of the camera from the eye position and look-at target position. Args: eyes: The positions of the camera's eye. Shape is N, 3). targets: The target locations to look at. Shape is (N, 3). env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. NotImplementedError: If the stage up-axis is not "Y" or "Z". """ # camera position and rotation in opengl convention orientations = math_utils.quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device)) self.set_world_poses(eyes, orientations, env_ids, convention="opengl") """ Implementation. """ def _initialize_rays_impl(self): # Create all indices buffer self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) # Create frame count buffer self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) # create buffers self._create_buffers() # compute intrinsic matrices self._compute_intrinsic_matrices() # compute ray stars and directions self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device ) self.num_rays = self.ray_directions.shape[1] # create buffer to store ray hits self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) # set offsets quat_w = convert_orientation_convention( torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" ) self._offset_quat = quat_w.repeat(self._view.count, 1) self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # increment frame count self._frame[env_ids] += 1 # compute poses from current view pos_w, quat_w = self._compute_camera_world_poses(env_ids) # update the data self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w # note: full orientation is considered ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) ray_starts_w += pos_w.unsqueeze(1) ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) # ray cast and store the hits # TODO: Make ray-casting work for multiple meshes? # necessary for regular dictionaries. self.ray_hits_w, ray_depth, ray_normal, _ = raycast_mesh( ray_starts_w, ray_directions_w, mesh=RayCasterCamera.meshes[self.cfg.mesh_prim_paths[0]], max_dist=self.cfg.max_distance, return_distance=any( [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] ), return_normal="normals" in self.cfg.data_types, ) # update output buffers if "distance_to_image_plane" in self.cfg.data_types: # note: data is in camera frame so we only take the first component (z-axis of camera frame) distance_to_image_plane = ( math_utils.quat_apply( math_utils.quat_inv(quat_w).repeat(1, self.num_rays), (ray_depth[:, :, None] * ray_directions_w), ) )[:, :, 0] self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view(-1, *self.image_shape) if "distance_to_camera" in self.cfg.data_types: self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape) if "normals" in self.cfg.data_types: self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) def _debug_vis_callback(self, event): # in case it crashes be safe if not hasattr(self, "ray_hits_w"): return # show ray hit positions self.ray_visualizer.visualize(self.ray_hits_w.view(-1, 3)) """ Private Helpers """ def _check_supported_data_types(self, cfg: RayCasterCameraCfg): """Checks if the data types are supported by the ray-caster camera.""" # check if there is any intersection in unsupported types # reason: we cannot obtain this data from simplified warp-based ray caster common_elements = set(cfg.data_types) & RayCasterCamera.UNSUPPORTED_TYPES if common_elements: raise ValueError( f"RayCasterCamera class does not support the following sensor types: {common_elements}." "\n\tThis is because these sensor types cannot be obtained in a fast way using ''warp''." "\n\tHint: If you need to work with these sensor types, we recommend using the USD camera" " interface from the omni.isaac.orbit.sensors.camera module." ) def _create_buffers(self): """Create buffers for storing data.""" # prepare drift self.drift = torch.zeros(self._view.count, 3, device=self.device) # create the data object # -- pose of the cameras self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) # -- intrinsic matrix self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) self._data.intrinsic_matrices[:, 2, 2] = 1.0 self._data.image_shape = self.image_shape # -- output data # create the buffers to store the annotator data. self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device) self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count for name in self.cfg.data_types: if name in ["distance_to_image_plane", "distance_to_camera"]: shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width) elif name in ["normals"]: shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 3) else: raise ValueError(f"Received unknown data type: {name}. Please check the configuration.") # allocate tensor to store the data self._data.output[name] = torch.zeros((self._view.count, *shape), device=self._device) def _compute_intrinsic_matrices(self): """Computes the intrinsic matrices for the camera based on the config provided.""" # get the sensor properties pattern_cfg = self.cfg.pattern_cfg # compute the intrinsic matrix vertical_aperture = pattern_cfg.horizontal_aperture * pattern_cfg.height / pattern_cfg.width f_x = pattern_cfg.width * pattern_cfg.focal_length / pattern_cfg.horizontal_aperture f_y = pattern_cfg.height * pattern_cfg.focal_length / vertical_aperture c_x = pattern_cfg.horizontal_aperture_offset * f_x + pattern_cfg.width / 2 c_y = pattern_cfg.vertical_aperture_offset * f_y + pattern_cfg.height / 2 # allocate the intrinsic matrices self._data.intrinsic_matrices[:, 0, 0] = f_x self._data.intrinsic_matrices[:, 0, 2] = c_x self._data.intrinsic_matrices[:, 1, 1] = f_y self._data.intrinsic_matrices[:, 1, 2] = c_y # save focal length self._focal_length = pattern_cfg.focal_length def _compute_view_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: """Obtains the pose of the view the camera is attached to in the world frame. Returns: A tuple of the position (in meters) and quaternion (w, x, y, z). """ # obtain the poses of the sensors # note: clone arg doesn't exist for xform prim view so we need to do this manually if isinstance(self._view, XFormPrimView): pos_w, quat_w = self._view.get_world_poses(env_ids) elif isinstance(self._view, physx.ArticulationView): pos_w, quat_w = self._view.get_root_transforms()[env_ids].split([3, 4], dim=-1) quat_w = math_utils.convert_quat(quat_w, to="wxyz") elif isinstance(self._view, physx.RigidBodyView): pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) quat_w = math_utils.convert_quat(quat_w, to="wxyz") else: raise RuntimeError(f"Unsupported view type: {type(self._view)}") # return the pose return pos_w.clone(), quat_w.clone() def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: """Computes the pose of the camera in the world frame. This function applies the offset pose to the pose of the view the camera is attached to. Returns: A tuple of the position (in meters) and quaternion (w, x, y, z) in "world" convention. """ # get the pose of the view the camera is attached to pos_w, quat_w = self._compute_view_world_poses(env_ids) # apply offsets # need to apply quat because offset relative to parent frame pos_w += math_utils.quat_apply(quat_w, self._offset_pos[env_ids]) quat_w = math_utils.quat_mul(quat_w, self._offset_quat[env_ids]) return pos_w, quat_w
18,416
Python
45.0425
120
0.627987
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass @dataclass class RayCasterData: """Data container for the ray-cast sensor.""" pos_w: torch.Tensor = None """Position of the sensor origin in world frame. Shape is (N, 3), where N is the number of sensors. """ quat_w: torch.Tensor = None """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4), where N is the number of sensors. """ ray_hits_w: torch.Tensor = None """The ray hit positions in the world frame. Shape is (N, B, 3), where N is the number of sensors, B is the number of rays in the scan pattern per sensor. """
830
Python
24.968749
82
0.668675
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for Warp-based ray-cast sensor.""" from . import patterns from .ray_caster import RayCaster from .ray_caster_camera import RayCasterCamera from .ray_caster_camera_cfg import RayCasterCameraCfg from .ray_caster_cfg import RayCasterCfg from .ray_caster_data import RayCasterData
415
Python
28.714284
56
0.787952
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import numpy as np import re import torch from collections.abc import Sequence from typing import TYPE_CHECKING, ClassVar import carb import omni.physics.tensors.impl.api as physx import warp as wp from omni.isaac.core.prims import XFormPrimView from pxr import UsdGeom, UsdPhysics import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.terrains.trimesh.utils import make_plane from omni.isaac.orbit.utils.math import convert_quat, quat_apply, quat_apply_yaw from omni.isaac.orbit.utils.warp import convert_to_warp_mesh, raycast_mesh from ..sensor_base import SensorBase from .ray_caster_data import RayCasterData if TYPE_CHECKING: from .ray_caster_cfg import RayCasterCfg class RayCaster(SensorBase): """A ray-casting sensor. The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against a set of meshes with a given ray pattern. The meshes are parsed from the list of primitive paths provided in the configuration. These are then converted to warp meshes and stored in the `warp_meshes` list. The ray-caster then ray-casts against these warp meshes using the ray pattern provided in the configuration. .. note:: Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes is a work in progress. """ cfg: RayCasterCfg """The configuration parameters.""" meshes: ClassVar[dict[str, wp.Mesh]] = {} """The warp meshes available for raycasting. The keys correspond to the prim path for the meshes, and values are the corresponding warp Mesh objects. Note: We store a global dictionary of all warp meshes to prevent re-loading the mesh for different ray-cast sensor instances. """ def __init__(self, cfg: RayCasterCfg): """Initializes the ray-caster object. Args: cfg: The configuration parameters. """ # check if sensor path is valid # note: currently we do not handle environment indices if there is a regex pattern in the leaf # For example, if the prim path is "/World/Sensor_[1,2]". sensor_path = cfg.prim_path.split("/")[-1] sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None if sensor_path_is_regex: raise RuntimeError( f"Invalid prim path for the ray-caster sensor: {self.cfg.prim_path}." "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." ) # Initialize base class super().__init__(cfg) # Create empty variables for storing output data self._data = RayCasterData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"Ray-caster @ '{self.cfg.prim_path}': \n" f"\tview type : {self._view.__class__}\n" f"\tupdate period (s) : {self.cfg.update_period}\n" f"\tnumber of meshes : {len(RayCaster.meshes)}\n" f"\tnumber of sensors : {self._view.count}\n" f"\tnumber of rays/sensor: {self.num_rays}\n" f"\ttotal number of rays : {self.num_rays * self._view.count}" ) """ Properties """ @property def num_instances(self) -> int: return self._view.count @property def data(self) -> RayCasterData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timers and counters super().reset(env_ids) # resolve None if env_ids is None: env_ids = slice(None) # resample the drift self.drift[env_ids].uniform_(*self.cfg.drift_range) """ Implementation. """ def _initialize_impl(self): super()._initialize_impl() # create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # check if the prim at path is an articulated or rigid prim # we do this since for physics-based view classes we can access their data directly # otherwise we need to use the xform view class which is slower found_supported_prim_class = False prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) if prim is None: raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") # create view based on the type of prim if prim.HasAPI(UsdPhysics.ArticulationRootAPI): self._view = self._physics_sim_view.create_articulation_view(self.cfg.prim_path.replace(".*", "*")) found_supported_prim_class = True elif prim.HasAPI(UsdPhysics.RigidBodyAPI): self._view = self._physics_sim_view.create_rigid_body_view(self.cfg.prim_path.replace(".*", "*")) found_supported_prim_class = True else: self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False) found_supported_prim_class = True carb.log_warn(f"The prim at path {prim.GetPath().pathString} is not a physics prim! Using XFormPrimView.") # check if prim view class is found if not found_supported_prim_class: raise RuntimeError(f"Failed to find a valid prim view class for the prim paths: {self.cfg.prim_path}") # load the meshes by parsing the stage self._initialize_warp_meshes() # initialize the ray start and directions self._initialize_rays_impl() def _initialize_warp_meshes(self): # check number of mesh prims provided if len(self.cfg.mesh_prim_paths) != 1: raise NotImplementedError( f"RayCaster currently only supports one mesh prim. Received: {len(self.cfg.mesh_prim_paths)}" ) # read prims to ray-cast for mesh_prim_path in self.cfg.mesh_prim_paths: # check if mesh already casted into warp mesh if mesh_prim_path in RayCaster.meshes: continue # check if the prim is a plane - handle PhysX plane as a special case # if a plane exists then we need to create an infinite mesh that is a plane mesh_prim = sim_utils.get_first_matching_child_prim( mesh_prim_path, lambda prim: prim.GetTypeName() == "Plane" ) # if we did not find a plane then we need to read the mesh if mesh_prim is None: # obtain the mesh prim mesh_prim = sim_utils.get_first_matching_child_prim( mesh_prim_path, lambda prim: prim.GetTypeName() == "Mesh" ) # check if valid if mesh_prim is None or not mesh_prim.IsValid(): raise RuntimeError(f"Invalid mesh prim path: {mesh_prim_path}") # cast into UsdGeomMesh mesh_prim = UsdGeom.Mesh(mesh_prim) # read the vertices and faces points = np.asarray(mesh_prim.GetPointsAttr().Get()) indices = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get()) wp_mesh = convert_to_warp_mesh(points, indices, device=self.device) # print info carb.log_info( f"Read mesh prim: {mesh_prim.GetPath()} with {len(points)} vertices and {len(indices)} faces." ) else: mesh = make_plane(size=(2e6, 2e6), height=0.0, center_zero=True) wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self.device) # print info carb.log_info(f"Created infinite plane mesh prim: {mesh_prim.GetPath()}.") # add the warp mesh to the list RayCaster.meshes[mesh_prim_path] = wp_mesh # throw an error if no meshes are found if all([mesh_prim_path not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths]): raise RuntimeError( f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" ) def _initialize_rays_impl(self): # compute ray stars and directions self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device) self.num_rays = len(self.ray_directions) # apply offset transformation to the rays offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device) offset_quat = torch.tensor(list(self.cfg.offset.rot), device=self._device) self.ray_directions = quat_apply(offset_quat.repeat(len(self.ray_directions), 1), self.ray_directions) self.ray_starts += offset_pos # repeat the rays for each sensor self.ray_starts = self.ray_starts.repeat(self._view.count, 1, 1) self.ray_directions = self.ray_directions.repeat(self._view.count, 1, 1) # prepare drift self.drift = torch.zeros(self._view.count, 3, device=self.device) # fill the data buffer self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # obtain the poses of the sensors if isinstance(self._view, XFormPrimView): pos_w, quat_w = self._view.get_world_poses(env_ids) elif isinstance(self._view, physx.ArticulationView): pos_w, quat_w = self._view.get_root_transforms()[env_ids].split([3, 4], dim=-1) quat_w = convert_quat(quat_w, to="wxyz") elif isinstance(self._view, physx.RigidBodyView): pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) quat_w = convert_quat(quat_w, to="wxyz") else: raise RuntimeError(f"Unsupported view type: {type(self._view)}") # note: we clone here because we are read-only operations pos_w = pos_w.clone() quat_w = quat_w.clone() # apply drift pos_w += self.drift[env_ids] # store the poses self._data.pos_w[env_ids] = pos_w self._data.quat_w[env_ids] = quat_w # ray cast based on the sensor poses if self.cfg.attach_yaw_only: # only yaw orientation is considered and directions are not rotated ray_starts_w = quat_apply_yaw(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) ray_starts_w += pos_w.unsqueeze(1) ray_directions_w = self.ray_directions[env_ids] else: # full orientation is considered ray_starts_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) ray_starts_w += pos_w.unsqueeze(1) ray_directions_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) # ray cast and store the hits # TODO: Make this work for multiple meshes? self._data.ray_hits_w[env_ids] = raycast_mesh( ray_starts_w, ray_directions_w, max_dist=self.cfg.max_distance, mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], )[0] def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: if not hasattr(self, "ray_visualizer"): self.ray_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set their visibility to true self.ray_visualizer.set_visibility(True) else: if hasattr(self, "ray_visualizer"): self.ray_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # show ray hit positions self.ray_visualizer.visualize(self._data.ray_hits_w.view(-1, 3)) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._view = None
12,994
Python
42.902027
130
0.621133
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_camera_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ray-cast sensor.""" from __future__ import annotations from typing import Literal from omni.isaac.orbit.utils import configclass from .ray_caster_camera import RayCasterCamera from .ray_caster_cfg import RayCasterCfg @configclass class RayCasterCameraCfg(RayCasterCfg): """Configuration for the ray-cast sensor.""" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" convention: Literal["opengl", "ros", "world"] = "ros" """The convention in which the frame offset is applied. Defaults to "ros". - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention. - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. """ class_type: type = RayCasterCamera offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" data_types: list[str] = ["distance_to_image_plane"] """List of sensor names/types to enable for the camera. Defaults to ["distance_to_image_plane"].""" def __post_init__(self): # for cameras, this quantity should be False always. self.attach_yaw_only = False
1,885
Python
35.26923
122
0.637135
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/ray_caster_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ray-cast sensor.""" from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.markers import VisualizationMarkersCfg from omni.isaac.orbit.markers.config import RAY_CASTER_MARKER_CFG from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .patterns.patterns_cfg import PatternBaseCfg from .ray_caster import RayCaster @configclass class RayCasterCfg(SensorBaseCfg): """Configuration for the ray-cast sensor.""" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" class_type: type = RayCaster mesh_prim_paths: list[str] = MISSING """The list of mesh primitive paths to ray cast against. Note: Currently, only a single static mesh is supported. We are working on supporting multiple static meshes and dynamic meshes. """ offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" attach_yaw_only: bool = MISSING """Whether the rays' starting positions and directions only track the yaw orientation. This is useful for ray-casting height maps, where only yaw rotation is needed. """ pattern_cfg: PatternBaseCfg = MISSING """The pattern that defines the local ray starting positions and directions.""" max_distance: float = 1e6 """Maximum distance (in meters) from the sensor to ray cast to. Defaults to 1e6.""" drift_range: tuple[float, float] = (0.0, 0.0) """The range of drift (in meters) to add to the ray starting positions (xyz). Defaults to (0.0, 0.0). For floating base robots, this is useful for simulating drift in the robot's pose estimation. """ visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. Note: This attribute is only used when debug visualization is enabled. """
2,541
Python
34.802816
107
0.696576
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING if TYPE_CHECKING: from . import patterns_cfg def grid_pattern(cfg: patterns_cfg.GridPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: """A regular grid pattern for ray casting. The grid pattern is made from rays that are parallel to each other. They span a 2D grid in the sensor's local coordinates from ``(-length/2, -width/2)`` to ``(length/2, width/2)``, which is defined by the ``size = (length, width)`` and ``resolution`` parameters in the config. Args: cfg: The configuration instance for the pattern. device: The device to create the pattern on. Returns: The starting positions and directions of the rays. Raises: ValueError: If the ordering is not "xy" or "yx". ValueError: If the resolution is less than or equal to 0. """ # check valid arguments if cfg.ordering not in ["xy", "yx"]: raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{cfg.ordering}'.") if cfg.resolution <= 0: raise ValueError(f"Resolution must be greater than 0. Received: '{cfg.resolution}'.") # resolve mesh grid indexing (note: torch meshgrid is different from numpy meshgrid) # check: https://github.com/pytorch/pytorch/issues/15301 indexing = cfg.ordering if cfg.ordering == "xy" else "ij" # define grid pattern x = torch.arange(start=-cfg.size[0] / 2, end=cfg.size[0] / 2 + 1.0e-9, step=cfg.resolution, device=device) y = torch.arange(start=-cfg.size[1] / 2, end=cfg.size[1] / 2 + 1.0e-9, step=cfg.resolution, device=device) grid_x, grid_y = torch.meshgrid(x, y, indexing=indexing) # store into ray starts num_rays = grid_x.numel() ray_starts = torch.zeros(num_rays, 3, device=device) ray_starts[:, 0] = grid_x.flatten() ray_starts[:, 1] = grid_y.flatten() # define ray-cast directions ray_directions = torch.zeros_like(ray_starts) ray_directions[..., :] = torch.tensor(list(cfg.direction), device=device) return ray_starts, ray_directions def pinhole_camera_pattern( cfg: patterns_cfg.PinholeCameraPatternCfg, intrinsic_matrices: torch.Tensor, device: str ) -> tuple[torch.Tensor, torch.Tensor]: """The image pattern for ray casting. .. caution:: This function does not follow the standard pattern interface. It requires the intrinsic matrices of the cameras to be passed in. This is because we want to be able to randomize the intrinsic matrices of the cameras, which is not possible with the standard pattern interface. Args: cfg: The configuration instance for the pattern. intrinsic_matrices: The intrinsic matrices of the cameras. Shape is (N, 3, 3). device: The device to create the pattern on. Returns: The starting positions and directions of the rays. The shape of the tensors are (N, H * W, 3) and (N, H * W, 3) respectively. """ # get image plane mesh grid grid = torch.meshgrid( torch.arange(start=0, end=cfg.width, dtype=torch.int32, device=device), torch.arange(start=0, end=cfg.height, dtype=torch.int32, device=device), indexing="xy", ) pixels = torch.vstack(list(map(torch.ravel, grid))).T # convert to homogeneous coordinate system pixels = torch.hstack([pixels, torch.ones((len(pixels), 1), device=device)]) # get pixel coordinates in camera frame pix_in_cam_frame = torch.matmul(torch.inverse(intrinsic_matrices), pixels.T) # robotics camera frame is (x forward, y left, z up) from camera frame with (x right, y down, z forward) # transform to robotics camera frame transform_vec = torch.tensor([1, -1, -1], device=device).unsqueeze(0).unsqueeze(2) pix_in_cam_frame = pix_in_cam_frame[:, [2, 0, 1], :] * transform_vec # normalize ray directions ray_directions = (pix_in_cam_frame / torch.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1) # for camera, we always ray-cast from the sensor's origin ray_starts = torch.zeros_like(ray_directions, device=device) return ray_starts, ray_directions def bpearl_pattern(cfg: patterns_cfg.BpearlPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: """The RS-Bpearl pattern for ray casting. The `Robosense RS-Bpearl`_ is a short-range LiDAR that has a 360 degrees x 90 degrees super wide field of view. It is designed for near-field blind-spots detection. .. _Robosense RS-Bpearl: https://www.roscomponents.com/en/lidar-laser-scanner/267-rs-bpearl.html Args: cfg: The configuration instance for the pattern. device: The device to create the pattern on. Returns: The starting positions and directions of the rays. """ h = torch.arange(-cfg.horizontal_fov / 2, cfg.horizontal_fov / 2, cfg.horizontal_res, device=device) v = torch.tensor(list(cfg.vertical_ray_angles), device=device) pitch, yaw = torch.meshgrid(v, h, indexing="xy") pitch, yaw = torch.deg2rad(pitch.reshape(-1)), torch.deg2rad(yaw.reshape(-1)) pitch += torch.pi / 2 x = torch.sin(pitch) * torch.cos(yaw) y = torch.sin(pitch) * torch.sin(yaw) z = torch.cos(pitch) ray_directions = -torch.stack([x, y, z], dim=1) ray_starts = torch.zeros_like(ray_directions) return ray_starts, ray_directions
5,519
Python
41.137404
110
0.67784
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for ray-casting patterns used by the ray-caster.""" from .patterns import bpearl_pattern, grid_pattern, pinhole_camera_pattern from .patterns_cfg import BpearlPatternCfg, GridPatternCfg, PatternBaseCfg, PinholeCameraPatternCfg
365
Python
35.599996
99
0.791781
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ray-cast sensor.""" from __future__ import annotations import torch from collections.abc import Callable, Sequence from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.utils import configclass from . import patterns @configclass class PatternBaseCfg: """Base configuration for a pattern.""" func: Callable[[PatternBaseCfg, str], tuple[torch.Tensor, torch.Tensor]] = MISSING """Function to generate the pattern. The function should take in the configuration and the device name as arguments. It should return the pattern's starting positions and directions as a tuple of torch.Tensor. """ @configclass class GridPatternCfg(PatternBaseCfg): """Configuration for the grid pattern for ray-casting. Defines a 2D grid of rays in the coordinates of the sensor. .. attention:: The points are ordered based on the :attr:`ordering` attribute. """ func: Callable = patterns.grid_pattern resolution: float = MISSING """Grid resolution (in meters).""" size: tuple[float, float] = MISSING """Grid size (length, width) (in meters).""" direction: tuple[float, float, float] = (0.0, 0.0, -1.0) """Ray direction. Defaults to (0.0, 0.0, -1.0).""" ordering: Literal["xy", "yx"] = "xy" """Specifies the ordering of points in the generated grid. Defaults to ``"xy"``. Consider a grid pattern with points at :math:`(x, y)` where :math:`x` and :math:`y` are the grid indices. The ordering of the points can be specified as "xy" or "yx". This determines the outer and inner loop order when iterating over the grid points. * If *"xy"* is selected, the points are ordered with outer loop over "x" and inner loop over "y". * If *"yx"* is selected, the points are ordered with outer loop over "y" and inner loop over "x". For example, the grid pattern points with :math:`X = (0, 1, 2)` and :math:`Y = (3, 4)`: * *"xy"* ordering: :math:`[(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4)]` * *"yx"* ordering: :math:`[(0, 3), (1, 3), (2, 3), (1, 4), (2, 4), (2, 4)]` """ @configclass class PinholeCameraPatternCfg(PatternBaseCfg): """Configuration for a pinhole camera depth image pattern for ray-casting.""" func: Callable = patterns.pinhole_camera_pattern focal_length: float = 24.0 """Perspective focal length (in cm). Defaults to 24.0cm. Longer lens lengths narrower FOV, shorter lens lengths wider FOV. """ horizontal_aperture: float = 20.955 """Horizontal aperture (in mm). Defaults to 20.955mm. Emulates sensor/film width on a camera. Note: The default value is the horizontal aperture of a 35 mm spherical projector. """ horizontal_aperture_offset: float = 0.0 """Offsets Resolution/Film gate horizontally. Defaults to 0.0.""" vertical_aperture_offset: float = 0.0 """Offsets Resolution/Film gate vertically. Defaults to 0.0.""" width: int = MISSING """Width of the image (in pixels).""" height: int = MISSING """Height of the image (in pixels).""" @configclass class BpearlPatternCfg(PatternBaseCfg): """Configuration for the Bpearl pattern for ray-casting.""" func: Callable = patterns.bpearl_pattern horizontal_fov: float = 360.0 """Horizontal field of view (in degrees). Defaults to 360.0.""" horizontal_res: float = 10.0 """Horizontal resolution (in degrees). Defaults to 10.0.""" # fmt: off vertical_ray_angles: Sequence[float] = [ 89.5, 86.6875, 83.875, 81.0625, 78.25, 75.4375, 72.625, 69.8125, 67.0, 64.1875, 61.375, 58.5625, 55.75, 52.9375, 50.125, 47.3125, 44.5, 41.6875, 38.875, 36.0625, 33.25, 30.4375, 27.625, 24.8125, 22, 19.1875, 16.375, 13.5625, 10.75, 7.9375, 5.125, 2.3125 ] # fmt: on """Vertical ray angles (in degrees). Defaults to a list of 32 angles. Note: We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles are not evenly spaced. """
4,172
Python
32.384
111
0.659396
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import carb import omni.physics.tensors.impl.api as physx from pxr import UsdPhysics import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.utils.math import ( combine_frame_transforms, convert_quat, is_identity_pose, subtract_frame_transforms, ) from ..sensor_base import SensorBase from .frame_transformer_data import FrameTransformerData if TYPE_CHECKING: from .frame_transformer_cfg import FrameTransformerCfg class FrameTransformer(SensorBase): """A sensor for reporting frame transforms. This class provides an interface for reporting the transform of one or more frames (target frames) with respect to another frame (source frame). The source frame is specified by the user as a prim path (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of prim paths (:attr:`FrameTransformerCfg.target_frames`). The source frame and target frames are assumed to be rigid bodies. The transform of the target frames with respect to the source frame is computed by first extracting the transform of the source frame and target frames from the physics engine and then computing the relative transform between the two. Additionally, the user can specify an offset for the source frame and each target frame. This is useful for specifying the transform of the desired frame with respect to the body's center of mass, for instance. A common example of using this sensor is to track the position and orientation of the end effector of a robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the manipulator. .. note:: Currently, this implementation only handles frames within an articulation. This is because the frame regex expressions are resolved based on their parent prim path. This can be extended to handle frames outside of articulation by using the frame prim path instead. However, this would require additional checks to ensure that the user-specified frames are valid which is not currently implemented. .. warning:: The implementation assumes that the parent body of a target frame is not the same as that of the source frame (i.e. :attr:`FrameTransformerCfg.prim_path`). While a corner case, this can occur if the user specifies the same prim path for both the source frame and target frame. In this case, the target frame will be ignored and not reported. This is a limitation of the current implementation and will be fixed in a future release. """ cfg: FrameTransformerCfg """The configuration parameters.""" def __init__(self, cfg: FrameTransformerCfg): """Initializes the frame transformer object. Args: cfg: The configuration parameters. """ # initialize base class super().__init__(cfg) # Create empty variables for storing output data self._data: FrameTransformerData = FrameTransformerData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"FrameTransformer @ '{self.cfg.prim_path}': \n" f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" f"\tnumber of envs: {self._num_envs}\n" f"\tsource body frame: {self._source_frame_body_name}\n" f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" ) """ Properties """ @property def data(self) -> FrameTransformerData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data """ Operations """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timers and counters super().reset(env_ids) # resolve None if env_ids is None: env_ids = ... """ Implementation. """ def _initialize_impl(self): super()._initialize_impl() # resolve source frame offset source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is # not the identity quaternion for efficiency in _update_buffer_impl self._apply_source_frame_offset = True # Handle source frame offsets if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): carb.log_verbose(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") self._apply_source_frame_offset = False else: carb.log_verbose(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) # Keep track of mapping from the rigid body name to the desired frame, as there may be multiple frames # based upon the same body name and we don't want to create unnecessary views body_names_to_frames: dict[str, set[str]] = {} # The offsets associated with each target frame target_offsets: dict[str, dict[str, torch.Tensor]] = {} # The frames whose offsets are not identity non_identity_offset_frames: list[str] = [] # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl self._apply_target_frame_offset = False # Collect all target frames, their associated body prim paths and their offsets so that we can extract # the prim, check that it has the appropriate rigid body API in a single loop. # First element is None because user can't specify source frame name frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] # First element is None because source frame offset is handled separately frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] for frame, prim_path, offset in zip(frames, frame_prim_paths, frame_offsets): # Find correct prim matching_prims = sim_utils.find_matching_prims(prim_path) if len(matching_prims) == 0: raise ValueError( f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." " No matching prims were found." ) for prim in matching_prims: # Get the prim path of the matching prim matching_prim_path = prim.GetPath().pathString # Check if it is a rigid prim if not prim.HasAPI(UsdPhysics.RigidBodyAPI): raise ValueError( f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" " rigid body. The class only supports transformations between rigid bodies." ) # Get the name of the body body_name = matching_prim_path.rsplit("/", 1)[-1] # Use body name if frame isn't specified by user frame_name = frame if frame is not None else body_name # Keep track of which frames are associated with which bodies if body_name in body_names_to_frames: body_names_to_frames[body_name].add(frame_name) else: body_names_to_frames[body_name] = {frame_name} if offset is not None: offset_pos = torch.tensor(offset.pos, device=self.device) offset_quat = torch.tensor(offset.rot, device=self.device) # Check if we need to apply offsets (optimized code path in _update_buffer_impl) if not is_identity_pose(offset_pos, offset_quat): non_identity_offset_frames.append(frame_name) self._apply_target_frame_offset = True target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} if not self._apply_target_frame_offset: carb.log_info( f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" f" are identity: {frames[1:]}" ) else: carb.log_info( f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" f" {non_identity_offset_frames}" ) # The names of bodies that RigidPrimView will be tracking to later extract transforms from tracked_body_names = list(body_names_to_frames.keys()) # Construct regex expression for the body names body_names_regex = r"(" + "|".join(tracked_body_names) + r")" body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" # Create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # Create a prim view for all frames and initialize it # order of transforms coming out of view will be source frame followed by target frame(s) self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*")) # Determine the order in which regex evaluated body names so we can later index into frame transforms # by frame name correctly all_prim_paths = self._frame_physx_view.prim_paths # Only need first env as the names and their ordering are the same across environments first_env_prim_paths = all_prim_paths[0 : len(tracked_body_names)] first_env_body_names = [first_env_prim_path.split("/")[-1] for first_env_prim_path in first_env_prim_paths] # Re-parse the list as it may have moved when resolving regex above # -- source frame self._source_frame_body_name = self.cfg.prim_path.split("/")[-1] source_frame_index = first_env_body_names.index(self._source_frame_body_name) # -- target frames self._target_frame_body_names = first_env_body_names[:] self._target_frame_body_names.remove(self._source_frame_body_name) # Determine indices into all tracked body frames for both source and target frames all_ids = torch.arange(self._num_envs * len(tracked_body_names)) self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] # The name of each of the target frame(s) - either user specified or defaulted to the body name self._target_frame_names: list[str] = [] # The position and rotation components of target frame offsets target_frame_offset_pos = [] target_frame_offset_quat = [] # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations # when updating sensor in _update_buffers_impl duplicate_frame_indices = [] # Go through each body name and determine the number of duplicates we need for that frame # and extract the offsets. This is all done to handles the case where multiple frames # reference the same body, but have different names and/or offsets for i, body_name in enumerate(self._target_frame_body_names): for frame in body_names_to_frames[body_name]: target_frame_offset_pos.append(target_offsets[frame]["pos"]) target_frame_offset_quat.append(target_offsets[frame]["quat"]) self._target_frame_names.append(frame) duplicate_frame_indices.append(i) # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) num_target_body_frames = len(tracked_body_names) - 1 self._duplicate_frame_indices = torch.cat( [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] ) # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) # fill the data buffer self._data.target_frame_names = self._target_frame_names self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # default to all sensors if len(env_ids) == self._num_envs: env_ids = ... # Extract transforms from view - shape is: # (the total number of source and target body frames being tracked * self._num_envs, 7) transforms = self._frame_physx_view.get_transforms() # Convert quaternions as PhysX uses xyzw form transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") # Process source frame transform source_frames = transforms[self._source_frame_body_ids] # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_source_frame_offset: source_pos_w, source_quat_w = combine_frame_transforms( source_frames[:, :3], source_frames[:, 3:], self._source_frame_offset_pos, self._source_frame_offset_quat, ) else: source_pos_w = source_frames[:, :3] source_quat_w = source_frames[:, 3:] # Process target frame transforms target_frames = transforms[self._target_frame_body_ids] duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_target_frame_offset: target_pos_w, target_quat_w = combine_frame_transforms( duplicated_target_frame_pos_w, duplicated_target_frame_quat_w, self._target_frame_offset_pos, self._target_frame_offset_quat, ) else: target_pos_w = duplicated_target_frame_pos_w target_quat_w = duplicated_target_frame_quat_w # Compute the transform of the target frame with respect to the source frame total_num_frames = len(self._target_frame_names) target_pos_source, target_quat_source = subtract_frame_transforms( source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), target_pos_w, target_quat_w, ) # Update buffers # note: The frame names / ordering don't change so no need to update them after initialization self._data.source_pos_w[:] = source_pos_w.view(-1, 3) self._data.source_quat_w[:] = source_quat_w.view(-1, 4) self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: if not hasattr(self, "frame_visualizer"): self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set their visibility to true self.frame_visualizer.set_visibility(True) else: if hasattr(self, "frame_visualizer"): self.frame_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # Update the visualized markers if self.frame_visualizer is not None: self.frame_visualizer.visualize(self._data.target_pos_w.view(-1, 3), self._data.target_quat_w.view(-1, 4)) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._frame_physx_view = None
18,936
Python
50.181081
118
0.647972
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG, VisualizationMarkersCfg from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .frame_transformer import FrameTransformer @configclass class OffsetCfg: """The offset pose of one frame relative to another frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" @configclass class FrameTransformerCfg(SensorBaseCfg): """Configuration for the frame transformer sensor.""" @configclass class FrameCfg: """Information specific to a coordinate frame.""" prim_path: str = MISSING """The prim path corresponding to the parent rigid body. This prim should be part of the same articulation as :attr:`FrameTransformerCfg.prim_path`. """ name: str | None = None """User-defined name for the new coordinate frame. Defaults to None. If None, then the name is extracted from the leaf of the prim path. """ offset: OffsetCfg = OffsetCfg() """The pose offset from the parent prim frame.""" class_type: type = FrameTransformer prim_path: str = MISSING """The prim path of the body to transform from (source frame).""" source_frame_offset: OffsetCfg = OffsetCfg() """The pose offset from the source prim frame.""" target_frames: list[FrameCfg] = MISSING """A list of the target frames. This allows a single FrameTransformer to handle multiple target prims. For example, in a quadruped, we can use a single FrameTransformer to track each foot's position and orientation in the body frame using four frame offsets. """ visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. Note: This attribute is only used when debug visualization is enabled. """
2,405
Python
32.887323
109
0.689813
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for frame transformer sensor.""" from .frame_transformer import FrameTransformer from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg from .frame_transformer_data import FrameTransformerData
342
Python
30.181815
65
0.80117
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch import warnings from dataclasses import dataclass @dataclass class FrameTransformerData: """Data container for the frame transformer sensor.""" target_frame_names: list[str] = None """Target frame names (this denotes the order in which that frame data is ordered). The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. This usually follows the order in which the frames are defined in the config. However, in the case of regex matching, the order may be different. """ target_pos_source: torch.Tensor = None """Position of the target frame(s) relative to source frame. Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. """ target_quat_source: torch.Tensor = None """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. """ target_pos_w: torch.Tensor = None """Position of the target frame(s) after offset (in world frame). Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. """ target_quat_w: torch.Tensor = None """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. """ source_pos_w: torch.Tensor = None """Position of the source frame after offset (in world frame). Shape is (N, 3), where N is the number of environments. """ source_quat_w: torch.Tensor = None """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). Shape is (N, 4), where N is the number of environments. """ @property def target_rot_source(self) -> torch.Tensor: """Alias for :attr:`target_quat_source`. .. deprecated:: v0.2.1 Use :attr:`target_quat_source` instead. Will be removed in v0.3.0. """ warnings.warn("'target_rot_source' is deprecated, use 'target_quat_source' instead.", DeprecationWarning) return self.target_quat_source @property def target_rot_w(self) -> torch.Tensor: """Alias for :attr:`target_quat_w`. .. deprecated:: v0.2.1 Use :attr:`target_quat_w` instead. Will be removed in v0.3.0. """ warnings.warn("'target_rot_w' is deprecated, use 'target_quat_w' instead.", DeprecationWarning) return self.target_quat_w @property def source_rot_w(self) -> torch.Tensor: """Alias for :attr:`source_quat_w`. .. deprecated:: v0.2.1 Use :attr:`source_quat_w` instead. Will be removed in v0.3.0. """ warnings.warn("'source_rot_w' is deprecated, use 'source_quat_w' instead.", DeprecationWarning) return self.source_quat_w
3,123
Python
33.711111
113
0.654819
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math import numpy as np import re import torch from collections.abc import Sequence from tensordict import TensorDict from typing import TYPE_CHECKING, Any, Literal import omni.kit.commands import omni.usd from omni.isaac.core.prims import XFormPrimView from omni.syntheticdata.scripts.SyntheticData import SyntheticData from pxr import UsdGeom import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils import to_camel_case from omni.isaac.orbit.utils.array import convert_to_torch from omni.isaac.orbit.utils.math import quat_from_matrix from ..sensor_base import SensorBase from .camera_data import CameraData from .utils import convert_orientation_convention, create_rotation_matrix_from_view if TYPE_CHECKING: from .camera_cfg import CameraCfg class Camera(SensorBase): r"""The camera sensor for acquiring visual data. This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data. It ensures that the camera follows the ROS convention for the coordinate system. Summarizing from the `replicator extension`_, the following sensor types are supported: - ``"rgb"``: A rendered color image. - ``"distance_to_camera"``: An image containing the distance to camera optical center. - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - ``"normals"``: An image containing the local surface normal vectors at each pixel. - ``"motion_vectors"``: An image containing the motion vector data at each pixel. - ``"semantic_segmentation"``: The semantic segmentation data. - ``"instance_segmentation_fast"``: The instance segmentation data. - ``"instance_id_segmentation_fast"``: The instance id segmentation data. .. note:: Currently the following sensor types are not supported in a "view" format: - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). - ``"bounding_box_3d"``: The 3D view space bounding box data. - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html """ cfg: CameraCfg """The configuration parameters.""" UNSUPPORTED_TYPES: set[str] = { "instance_id_segmentation", "instance_segmentation", "bounding_box_2d_tight", "bounding_box_2d_loose", "bounding_box_3d", "bounding_box_2d_tight_fast", "bounding_box_2d_loose_fast", "bounding_box_3d_fast", } """The set of sensor types that are not supported by the camera class.""" def __init__(self, cfg: CameraCfg): """Initializes the camera sensor. Args: cfg: The configuration parameters. Raises: RuntimeError: If no camera prim is found at the given path. ValueError: If the provided data types are not supported by the camera. """ # check if sensor path is valid # note: currently we do not handle environment indices if there is a regex pattern in the leaf # For example, if the prim path is "/World/Sensor_[1,2]". sensor_path = cfg.prim_path.split("/")[-1] sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None if sensor_path_is_regex: raise RuntimeError( f"Invalid prim path for the camera sensor: {self.cfg.prim_path}." "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." ) # perform check on supported data types self._check_supported_data_types(cfg) # initialize base class super().__init__(cfg) # spawn the asset if self.cfg.spawn is not None: # compute the rotation offset rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32).unsqueeze(0) rot_offset = convert_orientation_convention(rot, origin=self.cfg.offset.convention, target="opengl") rot_offset = rot_offset.squeeze(0).numpy() # spawn the asset self.cfg.spawn.func( self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.offset.pos, orientation=rot_offset ) # check that spawn was successful matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path) if len(matching_prims) == 0: raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.") # UsdGeom Camera prim for the sensor self._sensor_prims: list[UsdGeom.Camera] = list() # Create empty variables for storing output data self._data = CameraData() def __del__(self): """Unsubscribes from callbacks and detach from the replicator registry.""" # unsubscribe callbacks super().__del__() # delete from replicator registry for _, annotators in self._rep_registry.items(): for annotator, render_product_path in zip(annotators, self._render_product_paths): annotator.detach([render_product_path]) annotator = None def __str__(self) -> str: """Returns: A string containing information about the instance.""" # message for class return ( f"Camera @ '{self.cfg.prim_path}': \n" f"\tdata types : {self.data.output.sorted_keys} \n" f"\tsemantic filter : {self.cfg.semantic_filter}\n" f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" f"\tupdate period (s): {self.cfg.update_period}\n" f"\tshape : {self.image_shape}\n" f"\tnumber of sensors : {self._view.count}" ) """ Properties """ @property def num_instances(self) -> int: return self._view.count @property def data(self) -> CameraData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data @property def frame(self) -> torch.tensor: """Frame number when the measurement took place.""" return self._frame @property def render_product_paths(self) -> list[str]: """The path of the render products for the cameras. This can be used via replicator interfaces to attach to writes or external annotator registry. """ return self._render_product_paths @property def image_shape(self) -> tuple[int, int]: """A tuple containing (height, width) of the camera sensor.""" return (self.cfg.height, self.cfg.width) """ Configuration """ def set_intrinsic_matrices( self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None ): """Set parameters of the USD camera from its intrinsic matrix. The intrinsic matrix and focal length are used to set the following parameters to the USD camera: - ``focal_length``: The focal length of the camera. - ``horizontal_aperture``: The horizontal aperture of the camera. - ``vertical_aperture``: The vertical aperture of the camera. - ``horizontal_aperture_offset``: The horizontal offset of the camera. - ``vertical_aperture_offset``: The vertical offset of the camera. .. warning:: Due to limitations of Omniverse camera, we need to assume that the camera is a spherical lens, i.e. has square pixels, and the optical center is centered at the camera eye. If this assumption is not true in the input intrinsic matrix, then the camera will not set up correctly. Args: matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). focal_length: Focal length to use when computing aperture values. Defaults to 1.0. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # iterate over env_ids for i, matrix in zip(env_ids, matrices): # convert to numpy for sanity intrinsic_matrix = np.asarray(matrix, dtype=float) # extract parameters from matrix f_x = intrinsic_matrix[0, 0] c_x = intrinsic_matrix[0, 2] f_y = intrinsic_matrix[1, 1] c_y = intrinsic_matrix[1, 2] # get viewport parameters height, width = self.image_shape height, width = float(height), float(width) # resolve parameters for usd camera params = { "focal_length": focal_length, "horizontal_aperture": width * focal_length / f_x, "vertical_aperture": height * focal_length / f_y, "horizontal_aperture_offset": (c_x - width / 2) / f_x, "vertical_aperture_offset": (c_y - height / 2) / f_y, } # change data for corresponding camera index sensor_prim = self._sensor_prims[i] # set parameters for camera for param_name, param_value in params.items(): # convert to camel case (CC) param_name = to_camel_case(param_name, to="CC") # get attribute from the class param_attr = getattr(sensor_prim, f"Get{param_name}Attr") # set value # note: We have to do it this way because the camera might be on a different # layer (default cameras are on session layer), and this is the simplest # way to set the property on the right layer. omni.usd.set_prop_val(param_attr(), param_value) """ Operations - Set pose. """ def set_world_poses( self, positions: torch.Tensor | None = None, orientations: torch.Tensor | None = None, env_ids: Sequence[int] | None = None, convention: Literal["opengl", "ros", "world"] = "ros", ): r"""Set the pose of the camera w.r.t. the world frame using specified convention. Since different fields use different conventions for camera orientations, the method allows users to set the camera poses in the specified convention. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention See :meth:`omni.isaac.orbit.sensors.camera.utils.convert_orientation_convention` for more details on the conventions. Args: positions: The cartesian coordinates (in meters). Shape is (N, 3). Defaults to None, in which case the camera position in not changed. orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the camera orientation in not changed. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. convention: The convention in which the poses are fed. Defaults to "ros". Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # convert to backend tensor if positions is not None: if isinstance(positions, np.ndarray): positions = torch.from_numpy(positions).to(device=self._device) elif not isinstance(positions, torch.Tensor): positions = torch.tensor(positions, device=self._device) # convert rotation matrix from input convention to OpenGL if orientations is not None: if isinstance(orientations, np.ndarray): orientations = torch.from_numpy(orientations).to(device=self._device) elif not isinstance(orientations, torch.Tensor): orientations = torch.tensor(orientations, device=self._device) orientations = convert_orientation_convention(orientations, origin=convention, target="opengl") # set the pose self._view.set_world_poses(positions, orientations, env_ids) def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None ): """Set the poses of the camera from the eye position and look-at target position. Args: eyes: The positions of the camera's eye. Shape is (N, 3). targets: The target locations to look at. Shape is (N, 3). env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. Raises: RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. NotImplementedError: If the stage up-axis is not "Y" or "Z". """ # resolve env_ids if env_ids is None: env_ids = self._ALL_INDICES # set camera poses using the view orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device)) self._view.set_world_poses(eyes, orientations, env_ids) """ Operations """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timestamps super().reset(env_ids) # resolve None # note: cannot do smart indexing here since we do a for loop over data. if env_ids is None: env_ids = self._ALL_INDICES # reset the data # note: this recomputation is useful if one performs events such as randomizations on the camera poses. self._update_poses(env_ids) self._update_intrinsic_matrices(env_ids) # Reset the frame count self._frame[env_ids] = 0 """ Implementation. """ def _initialize_impl(self): """Initializes the sensor handles and internal buffers. This function creates handles and registers the provided data types with the replicator registry to be able to access the data from the sensor. It also initializes the internal buffers to store the data. Raises: RuntimeError: If the number of camera prims in the view does not match the number of environments. """ import omni.replicator.core as rep # Initialize parent class super()._initialize_impl() # Create a view for the sensor self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False) self._view.initialize() # Check that sizes are correct if self._view.count != self._num_envs: raise RuntimeError( f"Number of camera prims in the view ({self._view.count}) does not match" f" the number of environments ({self._num_envs})." ) # Create all env_ids buffer self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) # Create frame count buffer self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) # Attach the sensor data types to render node self._render_product_paths: list[str] = list() self._rep_registry: dict[str, list[rep.annotators.Annotator]] = {name: list() for name in self.cfg.data_types} # Obtain current stage stage = omni.usd.get_context().get_stage() # Convert all encapsulated prims to Camera for cam_prim_path in self._view.prim_paths: # Get camera prim cam_prim = stage.GetPrimAtPath(cam_prim_path) # Check if prim is a camera if not cam_prim.IsA(UsdGeom.Camera): raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") # Add to list sensor_prim = UsdGeom.Camera(cam_prim) self._sensor_prims.append(sensor_prim) # Get render product # From Isaac Sim 2023.1 onwards, render product is a HydraTexture so we need to extract the path render_prod_path = rep.create.render_product(cam_prim_path, resolution=(self.cfg.width, self.cfg.height)) if not isinstance(render_prod_path, str): render_prod_path = render_prod_path.path self._render_product_paths.append(render_prod_path) # Check if semantic types or semantic filter predicate is provided if isinstance(self.cfg.semantic_filter, list): semantic_filter_predicate = ":*; ".join(self.cfg.semantic_filter) + ":*" elif isinstance(self.cfg.semantic_filter, str): semantic_filter_predicate = self.cfg.semantic_filter else: raise ValueError(f"Semantic types must be a list or a string. Received: {self.cfg.semantic_filter}.") # set the semantic filter predicate # copied from rep.scripts.writes_default.basic_writer.py SyntheticData.Get().set_instance_mapping_semantic_filter(semantic_filter_predicate) # Iterate over each data type and create annotator # TODO: This will move out of the loop once Replicator supports multiple render products within a single # annotator, i.e.: rep_annotator.attach(self._render_product_paths) for name in self.cfg.data_types: # note: we are verbose here to make it easier to understand the code. # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. # if colorize is false, the data is returned as a uint32 image with ids as values. if name == "semantic_segmentation": init_params = {"colorize": self.cfg.colorize_semantic_segmentation} elif name == "instance_segmentation_fast": init_params = {"colorize": self.cfg.colorize_instance_segmentation} elif name == "instance_id_segmentation_fast": init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} else: init_params = None # Resolve device name if "cuda" in self._device: device_name = self._device.split(":")[0] else: device_name = "cpu" # create annotator node rep_annotator = rep.AnnotatorRegistry.get_annotator(name, init_params, device=device_name) rep_annotator.attach(render_prod_path) # add to registry self._rep_registry[name].append(rep_annotator) # Create internal buffers self._create_buffers() def _update_buffers_impl(self, env_ids: Sequence[int]): # Increment frame count self._frame[env_ids] += 1 # -- intrinsic matrix self._update_intrinsic_matrices(env_ids) # -- pose self._update_poses(env_ids) # -- read the data from annotator registry # check if buffer is called for the first time. If so then, allocate the memory if len(self._data.output.sorted_keys) == 0: # this is the first time buffer is called # it allocates memory for all the sensors self._create_annotator_data() else: # iterate over all the data types for name, annotators in self._rep_registry.items(): # iterate over all the annotators for index in env_ids: # get the output output = annotators[index].get_data() # process the output data, info = self._process_annotator_output(name, output) # add data to output self._data.output[name][index] = data # add info to output self._data.info[index][name] = info """ Private Helpers """ def _check_supported_data_types(self, cfg: CameraCfg): """Checks if the data types are supported by the ray-caster camera.""" # check if there is any intersection in unsupported types # reason: these use np structured data types which we can't yet convert to torch tensor common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES if common_elements: # provide alternative fast counterparts fast_common_elements = [] for item in common_elements: if "instance_segmentation" in item or "instance_id_segmentation" in item: fast_common_elements.append(item + "_fast") # raise error raise ValueError( f"Camera class does not support the following sensor types: {common_elements}." "\n\tThis is because these sensor types output numpy structured data types which" "can't be converted to torch tensors easily." "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." f"\n\t\tFast counterparts: {fast_common_elements}" ) def _create_buffers(self): """Create buffers for storing data.""" # create the data object # -- pose of the cameras self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) # -- intrinsic matrix self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) self._data.image_shape = self.image_shape # -- output data # lazy allocation of data dictionary # since the size of the output data is not known in advance, we leave it as None # the memory will be allocated when the buffer() function is called for the first time. self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device) self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)] def _update_intrinsic_matrices(self, env_ids: Sequence[int]): """Compute camera's matrix of intrinsic parameters. Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. Note: The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. The coordinates of points on the image plane are in the homogeneous representation. """ # iterate over all cameras for i in env_ids: # Get corresponding sensor prim sensor_prim = self._sensor_prims[i] # get camera parameters focal_length = sensor_prim.GetFocalLengthAttr().Get() horiz_aperture = sensor_prim.GetHorizontalApertureAttr().Get() # get viewport parameters height, width = self.image_shape # calculate the field of view fov = 2 * math.atan(horiz_aperture / (2 * focal_length)) # calculate the focal length in pixels focal_px = width * 0.5 / math.tan(fov / 2) # create intrinsic matrix for depth linear self._data.intrinsic_matrices[i, 0, 0] = focal_px self._data.intrinsic_matrices[i, 0, 2] = width * 0.5 self._data.intrinsic_matrices[i, 1, 1] = focal_px self._data.intrinsic_matrices[i, 1, 2] = height * 0.5 self._data.intrinsic_matrices[i, 2, 2] = 1 def _update_poses(self, env_ids: Sequence[int]): """Computes the pose of the camera in the world frame with ROS convention. This methods uses the ROS convention to resolve the input pose. In this convention, we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis. Returns: A tuple of the position (in meters) and quaternion (w, x, y, z). """ # check camera prim exists if len(self._sensor_prims) == 0: raise RuntimeError("Camera prim is None. Please call 'sim.play()' first.") # get the poses from the view poses, quat = self._view.get_world_poses(env_ids) self._data.pos_w[env_ids] = poses self._data.quat_w_world[env_ids] = convert_orientation_convention(quat, origin="opengl", target="world") def _create_annotator_data(self): """Create the buffers to store the annotator data. We create a buffer for each annotator and store the data in a dictionary. Since the data shape is not known beforehand, we create a list of buffers and concatenate them later. This is an expensive operation and should be called only once. """ # add data from the annotators for name, annotators in self._rep_registry.items(): # create a list to store the data for each annotator data_all_cameras = list() # iterate over all the annotators for index in self._ALL_INDICES: # get the output output = annotators[index].get_data() # process the output data, info = self._process_annotator_output(name, output) # append the data data_all_cameras.append(data) # store the info self._data.info[index][name] = info # concatenate the data along the batch dimension self._data.output[name] = torch.stack(data_all_cameras, dim=0) def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: """Process the annotator output. This function is called after the data has been collected from all the cameras. """ # extract info and data from the output if isinstance(output, dict): data = output["data"] info = output["info"] else: data = output info = None # convert data into torch tensor data = convert_to_torch(data, device=self.device) # process data for different segmentation types # Note: Replicator returns raw buffers of dtype int32 for segmentation types # so we need to convert them to uint8 4 channel images for colorized types height, width = self.image_shape if name == "semantic_segmentation": if self.cfg.colorize_semantic_segmentation: data = data.view(torch.uint8).reshape(height, width, -1) else: data = data.view(height, width) elif name == "instance_segmentation_fast": if self.cfg.colorize_instance_segmentation: data = data.view(torch.uint8).reshape(height, width, -1) else: data = data.view(height, width) elif name == "instance_id_segmentation_fast": if self.cfg.colorize_instance_id_segmentation: data = data.view(torch.uint8).reshape(height, width, -1) else: data = data.view(height, width) # return the data and info return data, info """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._view = None
28,980
Python
45.074722
137
0.616874
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass from tensordict import TensorDict from typing import Any from .utils import convert_orientation_convention @dataclass class CameraData: """Data container for the camera sensor.""" ## # Frame state. ## pos_w: torch.Tensor = None """Position of the sensor origin in world frame, following ROS convention. Shape is (N, 3) where N is the number of sensors. """ quat_w_world: torch.Tensor = None """Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following the world coordinate frame .. note:: World frame convention follows the camera aligned with forward axis +X and up axis +Z. Shape is (N, 4) where N is the number of sensors. """ ## # Camera data ## image_shape: tuple[int, int] = None """A tuple containing (height, width) of the camera sensor.""" intrinsic_matrices: torch.Tensor = None """The intrinsic matrices for the camera. Shape is (N, 3, 3) where N is the number of sensors. """ output: TensorDict = None """The retrieved sensor data with sensor types as key. The format of the data is available in the `Replicator Documentation`_. For semantic-based data, this corresponds to the ``"data"`` key in the output of the sensor. .. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output """ info: list[dict[str, Any]] = None """The retrieved sensor info with sensor types as key. This contains extra information provided by the sensor such as semantic segmentation label mapping, prim paths. For semantic-based data, this corresponds to the ``"info"`` key in the output of the sensor. For other sensor types, the info is empty. """ ## # Additional Frame orientation conventions ## @property def quat_w_ros(self) -> torch.Tensor: """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following ROS convention. .. note:: ROS convention follows the camera aligned with forward axis +Z and up axis -Y. Shape is (N, 4) where N is the number of sensors. """ return convert_orientation_convention(self.quat_w_world, origin="world", target="ros") @property def quat_w_opengl(self) -> torch.Tensor: """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following Opengl / USD Camera convention. .. note:: OpenGL convention follows the camera aligned with forward axis -Z and up axis +Y. Shape is (N, 4) where N is the number of sensors. """ return convert_orientation_convention(self.quat_w_world, origin="world", target="opengl")
3,019
Python
30.789473
155
0.670421
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for camera wrapper around USD camera prim.""" from .camera import Camera from .camera_cfg import CameraCfg from .camera_data import CameraData from .utils import * # noqa: F401, F403
322
Python
25.916665
59
0.751553
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/utils.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Helper functions to project between pointcloud and depth images.""" from __future__ import annotations import math import numpy as np import torch import torch.nn.functional as F from collections.abc import Sequence from typing import Literal import omni.isaac.core.utils.stage as stage_utils import warp as wp from pxr import UsdGeom import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.utils.array import TensorData, convert_to_torch """ Depth <-> Pointcloud conversions. """ def transform_points( points: TensorData, position: Sequence[float] | None = None, orientation: Sequence[float] | None = None, device: torch.device | str | None = None, ) -> np.ndarray | torch.Tensor: r"""Transform input points in a given frame to a target frame. This function transform points from a source frame to a target frame. The transformation is defined by the position ``t`` and orientation ``R`` of the target frame in the source frame. .. math:: p_{target} = R_{target} \times p_{source} + t_{target} If either the inputs `position` and `orientation` are None, the corresponding transformation is not applied. Args: points: a tensor of shape (p, 3) or (n, p, 3) comprising of 3d points in source frame. position: The position of source frame in target frame. Defaults to None. orientation: The orientation (w, x, y, z) of source frame in target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, i.e. takes the device that matches the depth image. Returns: A tensor of shape (N, 3) comprising of 3D points in target frame. If the input is a numpy array, the output is a numpy array. Otherwise, it is a torch tensor. """ # check if numpy is_numpy = isinstance(points, np.ndarray) # decide device if device is None and is_numpy: device = torch.device("cpu") # convert to torch points = convert_to_torch(points, dtype=torch.float32, device=device) # update the device with the device of the depth image # note: this is needed since warp does not provide the device directly device = points.device # apply rotation if orientation is not None: orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) # apply translation if position is not None: position = convert_to_torch(position, dtype=torch.float32, device=device) # apply transformation points = math_utils.transform_points(points, position, orientation) # return everything according to input type if is_numpy: return points.detach().cpu().numpy() else: return points def create_pointcloud_from_depth( intrinsic_matrix: np.ndarray | torch.Tensor | wp.array, depth: np.ndarray | torch.Tensor | wp.array, keep_invalid: bool = False, position: Sequence[float] | None = None, orientation: Sequence[float] | None = None, device: torch.device | str | None = None, ) -> np.ndarray | torch.Tensor: r"""Creates pointcloud from input depth image and camera intrinsic matrix. This function creates a pointcloud from a depth image and camera intrinsic matrix. The pointcloud is computed using the following equation: .. math:: p_{camera} = K^{-1} \times [u, v, 1]^T \times d where :math:`K` is the camera intrinsic matrix, :math:`u` and :math:`v` are the pixel coordinates and :math:`d` is the depth value at the pixel. Additionally, the pointcloud can be transformed from the camera frame to a target frame by providing the position ``t`` and orientation ``R`` of the camera in the target frame: .. math:: p_{target} = R_{target} \times p_{camera} + t_{target} Args: intrinsic_matrix: A (3, 3) array providing camera's calibration matrix. depth: An array of shape (H, W) with values encoding the depth measurement. keep_invalid: Whether to keep invalid points in the cloud or not. Invalid points correspond to pixels with depth values 0.0 or NaN. Defaults to False. position: The position of the camera in a target frame. Defaults to None. orientation: The orientation (w, x, y, z) of the camera in a target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, i.e. takes the device that matches the depth image. Returns: An array/tensor of shape (N, 3) comprising of 3D coordinates of points. The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray is returned. """ # We use PyTorch here for matrix multiplication since it is compiled with Intel MKL while numpy # by default uses OpenBLAS. With PyTorch (CPU), we could process a depth image of size (480, 640) # in 0.0051 secs, while with numpy it took 0.0292 secs. # convert to numpy matrix is_numpy = isinstance(depth, np.ndarray) # decide device if device is None and is_numpy: device = torch.device("cpu") # convert depth to torch tensor depth = convert_to_torch(depth, dtype=torch.float32, device=device) # update the device with the device of the depth image # note: this is needed since warp does not provide the device directly device = depth.device # convert inputs to torch tensors intrinsic_matrix = convert_to_torch(intrinsic_matrix, dtype=torch.float32, device=device) if position is not None: position = convert_to_torch(position, dtype=torch.float32, device=device) if orientation is not None: orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) # compute pointcloud depth_cloud = math_utils.unproject_depth(depth, intrinsic_matrix) # convert 3D points to world frame depth_cloud = math_utils.transform_points(depth_cloud, position, orientation) # keep only valid entries if flag is set if not keep_invalid: pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(depth_cloud), ~torch.isinf(depth_cloud)), dim=1) depth_cloud = depth_cloud[pts_idx_to_keep, ...] # return everything according to input type if is_numpy: return depth_cloud.detach().cpu().numpy() else: return depth_cloud def create_pointcloud_from_rgbd( intrinsic_matrix: torch.Tensor | np.ndarray | wp.array, depth: torch.Tensor | np.ndarray | wp.array, rgb: torch.Tensor | wp.array | np.ndarray | tuple[float, float, float] = None, normalize_rgb: bool = False, position: Sequence[float] | None = None, orientation: Sequence[float] | None = None, device: torch.device | str | None = None, num_channels: int = 3, ) -> tuple[torch.Tensor, torch.Tensor] | tuple[np.ndarray, np.ndarray]: """Creates pointcloud from input depth image and camera transformation matrix. This function provides the same functionality as :meth:`create_pointcloud_from_depth` but also allows to provide the RGB values for each point. The ``rgb`` attribute is used to resolve the corresponding point's color: - If a ``np.array``/``wp.array``/``torch.tensor`` of shape (H, W, 3), then the corresponding channels encode RGB values. - If a tuple, then the point cloud has a single color specified by the values (r, g, b). - If None, then default color is white, i.e. (0, 0, 0). If the input ``normalize_rgb`` is set to :obj:`True`, then the RGB values are normalized to be in the range [0, 1]. Args: intrinsic_matrix: A (3, 3) array/tensor providing camera's calibration matrix. depth: An array/tensor of shape (H, W) with values encoding the depth measurement. rgb: Color for generated point cloud. Defaults to None. normalize_rgb: Whether to normalize input rgb. Defaults to False. position: The position of the camera in a target frame. Defaults to None. orientation: The orientation `(w, x, y, z)` of the camera in a target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, in which case it takes the device that matches the depth image. num_channels: Number of channels in RGB pointcloud. Defaults to 3. Returns: A tuple of (N, 3) arrays or tensors containing the 3D coordinates of points and their RGB color respectively. The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray is returned. Raises: ValueError: When rgb image is a numpy array but not of shape (H, W, 3) or (H, W, 4). """ # check valid inputs if rgb is not None and not isinstance(rgb, tuple): if len(rgb.shape) == 3: if rgb.shape[2] not in [3, 4]: raise ValueError(f"Input rgb image of invalid shape: {rgb.shape} != (H, W, 3) or (H, W, 4).") else: raise ValueError(f"Input rgb image not three-dimensional. Received shape: {rgb.shape}.") if num_channels not in [3, 4]: raise ValueError(f"Invalid number of channels: {num_channels} != 3 or 4.") # check if input depth is numpy array is_numpy = isinstance(depth, np.ndarray) # decide device if device is None and is_numpy: device = torch.device("cpu") # convert depth to torch tensor if is_numpy: depth = torch.from_numpy(depth).to(device=device) # retrieve XYZ pointcloud points_xyz = create_pointcloud_from_depth(intrinsic_matrix, depth, True, position, orientation, device=device) # get image height and width im_height, im_width = depth.shape[:2] # total number of points num_points = im_height * im_width # extract color value if rgb is not None: if isinstance(rgb, (np.ndarray, torch.Tensor, wp.array)): # copy numpy array to preserve rgb = convert_to_torch(rgb, device=device, dtype=torch.float32) rgb = rgb[:, :, :3] # convert the matrix to (W, H, 3) from (H, W, 3) since depth processing # is done in the order (u, v) where u: (0, W-1) and v: (0 - H-1) points_rgb = rgb.permute(1, 0, 2).reshape(-1, 3) elif isinstance(rgb, (tuple, list)): # same color for all points points_rgb = torch.Tensor((rgb,) * num_points, device=device, dtype=torch.uint8) else: # default color is white points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) else: points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) # normalize color values if normalize_rgb: points_rgb = points_rgb.float() / 255 # remove invalid points pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(points_xyz), ~torch.isinf(points_xyz)), dim=1) points_rgb = points_rgb[pts_idx_to_keep, ...] points_xyz = points_xyz[pts_idx_to_keep, ...] # add additional channels if required if num_channels == 4: points_rgb = torch.nn.functional.pad(points_rgb, (0, 1), mode="constant", value=1.0) # return everything according to input type if is_numpy: return points_xyz.cpu().numpy(), points_rgb.cpu().numpy() else: return points_xyz, points_rgb def convert_orientation_convention( orientation: torch.Tensor, origin: Literal["opengl", "ros", "world"] = "opengl", target: Literal["opengl", "ros", "world"] = "ros", ) -> torch.Tensor: r"""Converts a quaternion representing a rotation from one convention to another. In USD, the camera follows the ``"opengl"`` convention. Thus, it is always in **Y up** convention. This means that the camera is looking down the -Z axis with the +Y axis pointing up , and +X axis pointing right. However, in ROS, the camera is looking down the +Z axis with the +Y axis pointing down, and +X axis pointing right. Thus, the camera needs to be rotated by :math:`180^{\circ}` around the X axis to follow the ROS convention. .. math:: T_{ROS} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} On the other hand, the typical world coordinate system is with +X pointing forward, +Y pointing left, and +Z pointing up. The camera can also be set in this convention by rotating the camera by :math:`90^{\circ}` around the X axis and :math:`-90^{\circ}` around the Y axis. .. math:: T_{WORLD} = \begin{bmatrix} 0 & 0 & -1 & 0 \\ -1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} Thus, based on their application, cameras follow different conventions for their orientation. This function converts a quaternion from one convention to another. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention Args: orientation: Quaternion of form `(w, x, y, z)` with shape (..., 4) in source convention origin: Convention to convert to. Defaults to "ros". target: Convention to convert from. Defaults to "opengl". Returns: Quaternion of form `(w, x, y, z)` with shape (..., 4) in target convention """ if target == origin: return orientation.clone() # -- unify input type if origin == "ros": # convert from ros to opengl convention rotm = math_utils.matrix_from_quat(orientation) rotm[:, :, 2] = -rotm[:, :, 2] rotm[:, :, 1] = -rotm[:, :, 1] # convert to opengl convention quat_gl = math_utils.quat_from_matrix(rotm) elif origin == "world": # convert from world (x forward and z up) to opengl convention rotm = math_utils.matrix_from_quat(orientation) rotm = torch.matmul( rotm, math_utils.matrix_from_euler( torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ" ), ) # convert to isaac-sim convention quat_gl = math_utils.quat_from_matrix(rotm) else: quat_gl = orientation # -- convert to target convention if target == "ros": # convert from opengl to ros convention rotm = math_utils.matrix_from_quat(quat_gl) rotm[:, :, 2] = -rotm[:, :, 2] rotm[:, :, 1] = -rotm[:, :, 1] return math_utils.quat_from_matrix(rotm) elif target == "world": # convert from opengl to world (x forward and z up) convention rotm = math_utils.matrix_from_quat(quat_gl) rotm = torch.matmul( rotm, math_utils.matrix_from_euler( torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ" ).T, ) return math_utils.quat_from_matrix(rotm) else: return quat_gl.clone() # @torch.jit.script def create_rotation_matrix_from_view( eyes: torch.Tensor, targets: torch.Tensor, device: str = "cpu", ) -> torch.Tensor: """ This function takes a vector ''eyes'' which specifies the location of the camera in world coordinates and the vector ''targets'' which indicate the position of the object. The output is a rotation matrix representing the transformation from world coordinates -> view coordinates. The inputs camera_position and targets can each be a - 3 element tuple/list - torch tensor of shape (1, 3) - torch tensor of shape (N, 3) Args: eyes: position of the camera in world coordinates targets: position of the object in world coordinates The vectors are broadcast against each other so they all have shape (N, 3). Returns: R: (N, 3, 3) batched rotation matrices Reference: Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/eaf0709d6af0025fe94d1ee7cec454bc3054826a/pytorch3d/renderer/cameras.py#L1635-L1685) """ up_axis_token = stage_utils.get_stage_up_axis() if up_axis_token == UsdGeom.Tokens.y: up_axis = torch.tensor((0, 1, 0), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1) elif up_axis_token == UsdGeom.Tokens.z: up_axis = torch.tensor((0, 0, 1), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1) else: raise ValueError(f"Invalid up axis: {up_axis_token}") # get rotation matrix in opengl format (-Z forward, +Y up) z_axis = -F.normalize(targets - eyes, eps=1e-5) x_axis = F.normalize(torch.cross(up_axis, z_axis, dim=1), eps=1e-5) y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5) is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all(dim=1, keepdim=True) if is_close.any(): replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5) x_axis = torch.where(is_close, replacement, x_axis) R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1) return R.transpose(1, 2)
17,516
Python
42.902256
158
0.653574
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.sim import FisheyeCameraCfg, PinholeCameraCfg from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .camera import Camera @configclass class CameraCfg(SensorBaseCfg): """Configuration for a camera sensor.""" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" convention: Literal["opengl", "ros", "world"] = "ros" """The convention in which the frame offset is applied. Defaults to "ros". - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention. - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. """ class_type: type = Camera offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. Note: The parent frame is the frame the sensor attaches to. For example, the parent frame of a camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. """ spawn: PinholeCameraCfg | FisheyeCameraCfg | None = MISSING """Spawn configuration for the asset. If None, then the prim is not spawned by the asset. Instead, it is assumed that the asset is already present in the scene. """ data_types: list[str] = ["rgb"] """List of sensor names/types to enable for the camera. Defaults to ["rgb"]. Please refer to the :class:`Camera` class for a list of available data types. """ width: int = MISSING """Width of the image in pixels.""" height: int = MISSING """Height of the image in pixels.""" semantic_filter: str | list[str] = "*:*" """A string or a list specifying a semantic filter predicate. Defaults to ``"*:*"``. If a string, it should be a disjunctive normal form of (semantic type, labels). For examples: * ``"typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelE"``: All prims with semantic type "typeA" and label "labelA" but not "labelB" or with label "labelC". Also, all prims with semantic type "typeB" and label "labelA", or with semantic type "typeC" and label "labelE". * ``"typeA : * ; * : labelA"``: All prims with semantic type "typeA" or with label "labelA" If a list of strings, each string should be a semantic type. The segmentation for prims with semantics of the specified types will be retrieved. For example, if the list is ["class"], only the segmentation for prims with semantics of type "class" will be retrieved. .. seealso:: For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_. .. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering """ colorize_semantic_segmentation: bool = True """Whether to colorize the semantic segmentation images. Defaults to True. If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. """ colorize_instance_id_segmentation: bool = True """Whether to colorize the instance ID segmentation images. Defaults to True. If True, instance id segmentation is converted to an image where instance IDs are mapped to colors. and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. """ colorize_instance_segmentation: bool = True """Whether to colorize the instance ID segmentation images. Defaults to True. If True, instance segmentation is converted to an image where instance IDs are mapped to colors. and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. """
4,659
Python
40.981982
159
0.674394
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Ignore optional memory usage warning globally # pyright: reportOptionalSubscript=false from __future__ import annotations import torch from collections.abc import Sequence from typing import TYPE_CHECKING import omni.physics.tensors.impl.api as physx from pxr import PhysxSchema import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.utils.math import convert_quat from ..sensor_base import SensorBase from .contact_sensor_data import ContactSensorData if TYPE_CHECKING: from .contact_sensor_cfg import ContactSensorCfg class ContactSensor(SensorBase): """A contact reporting sensor. The contact sensor reports the normal contact forces on a rigid body in the world frame. It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. To enable the contact reporter on a rigid body, please make sure to enable the :attr:`omni.isaac.orbit.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your asset spawner configuration. This will enable the contact reporter on all the rigid bodies in the asset. The sensor can be configured to report the contact forces on a set of bodies with a given filter pattern. Please check the documentation on `RigidContactView`_ for more details. .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html .. _RigidContactView: https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html#omni.isaac.core.prims.RigidContactView """ cfg: ContactSensorCfg """The configuration parameters.""" def __init__(self, cfg: ContactSensorCfg): """Initializes the contact sensor object. Args: cfg: The configuration parameters. """ # initialize base class super().__init__(cfg) # Create empty variables for storing output data self._data: ContactSensorData = ContactSensorData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"Contact sensor @ '{self.cfg.prim_path}': \n" f"\tview type : {self.body_physx_view.__class__}\n" f"\tupdate period (s) : {self.cfg.update_period}\n" f"\tnumber of bodies : {self.num_bodies}\n" f"\tbody names : {self.body_names}\n" ) """ Properties """ @property def num_instances(self) -> int: return self.body_physx_view.count @property def data(self) -> ContactSensorData: # update sensors if needed self._update_outdated_buffers() # return the data return self._data @property def num_bodies(self) -> int: """Number of bodies with contact sensors attached.""" return self._num_bodies @property def body_names(self) -> list[str]: """Ordered names of bodies with contact sensors attached.""" prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] return [path.split("/")[-1] for path in prim_paths] @property def body_physx_view(self) -> physx.RigidBodyView: """View for the rigid bodies captured (PhysX). Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._body_physx_view @property def contact_physx_view(self) -> physx.RigidContactView: """Contact reporter view for the bodies (PhysX). Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._contact_physx_view """ Operations """ def reset(self, env_ids: Sequence[int] | None = None): # reset the timers and counters super().reset(env_ids) # resolve None if env_ids is None: env_ids = slice(None) # reset accumulative data buffers self._data.net_forces_w[env_ids] = 0.0 self._data.net_forces_w_history[env_ids] = 0.0 if self.cfg.history_length > 0: self._data.net_forces_w_history[env_ids] = 0.0 # reset force matrix if len(self.cfg.filter_prim_paths_expr) != 0: self._data.force_matrix_w[env_ids] = 0.0 # reset the current air time if self.cfg.track_air_time: self._data.current_air_time[env_ids] = 0.0 self._data.last_air_time[env_ids] = 0.0 self._data.current_contact_time[env_ids] = 0.0 self._data.last_contact_time[env_ids] = 0.0 def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the articulation based on the name keys. Args: name_keys: A regular expression or a list of regular expressions to match the body names. preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. Returns: A tuple of lists containing the body indices and names. """ return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: """Checks if bodies that have established contact within the last :attr:`dt` seconds. This function checks if the bodies have established contact within the last :attr:`dt` seconds by comparing the current contact time with the given time period. If the contact time is less than the given time period, then the bodies are considered to be in contact. Note: The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if the sensor is updated by the physics or the environment stepping time-step and the sensor is read by the environment stepping time-step. Args: dt: The time period since the contact was established. abs_tol: The absolute tolerance for the comparison. Returns: A boolean tensor indicating the bodies that have established contact within the last :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Raises: RuntimeError: If the sensor is not configured to track contact time. """ # check if the sensor is configured to track contact time if not self.cfg.track_air_time: raise RuntimeError( "The contact sensor is not configured to track contact time." "Please enable the 'track_air_time' in the sensor configuration." ) # check if the bodies are in contact currently_in_contact = self.data.current_contact_time > 0.0 less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) return currently_in_contact * less_than_dt_in_contact def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: """Checks if bodies that have broken contact within the last :attr:`dt` seconds. This function checks if the bodies have broken contact within the last :attr:`dt` seconds by comparing the current air time with the given time period. If the air time is less than the given time period, then the bodies are considered to not be in contact. Note: It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if the sensor is updated by the physics or the environment stepping time-step and the sensor is read by the environment stepping time-step. Args: dt: The time period since the contract is broken. abs_tol: The absolute tolerance for the comparison. Returns: A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Raises: RuntimeError: If the sensor is not configured to track contact time. """ # check if the sensor is configured to track contact time if not self.cfg.track_air_time: raise RuntimeError( "The contact sensor is not configured to track contact time." "Please enable the 'track_air_time' in the sensor configuration." ) # check if the sensor is configured to track contact time currently_detached = self.data.current_air_time > 0.0 less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) return currently_detached * less_than_dt_detached """ Implementation. """ def _initialize_impl(self): super()._initialize_impl() # create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # check that only rigid bodies are selected leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] template_prim_path = self._parent_prims[0].GetPath().pathString body_names = list() for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): # check if prim has contact reporter API if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): prim_path = prim.GetPath().pathString body_names.append(prim_path.rsplit("/", 1)[-1]) # check that there is at least one body with contact reporter API if not body_names: raise RuntimeError( f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." ) # construct regex expression for the body names body_names_regex = r"(" + "|".join(body_names) + r")" body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" # construct a new regex expression # create a rigid prim view for the sensor self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*")) self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( body_names_regex.replace(".*", "*"), filter_patterns=self.cfg.filter_prim_paths_expr ) # resolve the true count of bodies self._num_bodies = self.body_physx_view.count // self._num_envs # check that contact reporter succeeded if self._num_bodies != len(body_names): raise RuntimeError( "Failed to initialize contact reporter for specified bodies." f"\n\tInput prim path : {self.cfg.prim_path}" f"\n\tResolved prim paths: {body_names_regex}" ) # prepare data buffers self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) # optional buffers # -- history of net forces if self.cfg.history_length > 0: self._data.net_forces_w_history = torch.zeros( self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device ) else: self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) # -- pose of sensor origins if self.cfg.track_pose: self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) # -- air/contact time between contacts if self.cfg.track_air_time: self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) if len(self.cfg.filter_prim_paths_expr) != 0: num_filters = self.contact_physx_view.filter_count self._data.force_matrix_w = torch.zeros( self._num_envs, self._num_bodies, num_filters, 3, device=self._device ) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" # default to all sensors if len(env_ids) == self._num_envs: env_ids = slice(None) # obtain the contact forces # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). # This isn't the most efficient way to do this, but it's the easiest to implement. net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] # update contact force history if self.cfg.history_length > 0: self._data.net_forces_w_history[env_ids, 1:] = self._data.net_forces_w_history[env_ids, :-1].clone() self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] # obtain the contact force matrix if len(self.cfg.filter_prim_paths_expr) != 0: # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) num_filters = self.contact_physx_view.filter_count # acquire and shape the force matrix force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] # obtain the pose of the sensor origin if self.cfg.track_pose: pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) # obtain the air time if self.cfg.track_air_time: # -- time elapsed since last update # since this function is called every frame, we can use the difference to get the elapsed time elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] # -- check contact state of bodies is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact # -- update the last contact time if body has just become in contact self._data.last_air_time[env_ids] = torch.where( is_first_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), self._data.last_air_time[env_ids], ) # -- increment time for bodies that are not in contact self._data.current_air_time[env_ids] = torch.where( ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 ) # -- update the last contact time if body has just detached self._data.last_contact_time[env_ids] = torch.where( is_first_detached, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), self._data.last_contact_time[env_ids], ) # -- increment time for bodies that are in contact self._data.current_contact_time[env_ids] = torch.where( is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 ) def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers # note: parent only deals with callbacks. not their visibility if debug_vis: # create markers if necessary for the first tome if not hasattr(self, "contact_visualizer"): self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) # set their visibility to true self.contact_visualizer.set_visibility(True) else: if hasattr(self, "contact_visualizer"): self.contact_visualizer.set_visibility(False) def _debug_vis_callback(self, event): # safely return if view becomes invalid # note: this invalidity happens because of isaac sim view callbacks if self.body_physx_view is None: return # marker indices # 0: contact, 1: no contact net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) # check if prim is visualized if self.cfg.track_pose: frame_origins: torch.Tensor = self._data.pos_w else: pose = self.body_physx_view.get_transforms() frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] # visualize self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._body_physx_view = None self._contact_physx_view = None
18,724
Python
46.285353
160
0.628124
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass @dataclass class ContactSensorData: """Data container for the contact reporting sensor.""" pos_w: torch.Tensor | None = None """Position of the sensor origin in world frame. Shape is (N, 3), where N is the number of sensors. Note: If the :attr:`ContactSensorCfg.track_pose` is False, then this qunatity is None. """ quat_w: torch.Tensor | None = None """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4), where N is the number of sensors. Note: If the :attr:`ContactSensorCfg.track_pose` is False, then this qunatity is None. """ net_forces_w: torch.Tensor | None = None """The net contact forces in world frame. Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. """ net_forces_w_history: torch.Tensor | None = None """The net contact forces in world frame. Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length and B is the number of bodies in each sensor. In the history dimension, the first index is the most recent and the last index is the oldest. """ force_matrix_w: torch.Tensor | None = None """The contact forces filtered between the sensor bodies and filtered bodies in world frame. Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor and ``M`` is the number of filtered bodies. Note: If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. """ last_air_time: torch.Tensor | None = None """Time spent (in s) in the air before the last contact. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """ current_air_time: torch.Tensor | None = None """Time spent (in s) in the air since the last detach. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """ last_contact_time: torch.Tensor | None = None """Time spent (in s) in contact before the last detach. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """ current_contact_time: torch.Tensor | None = None """Time spent (in s) in contact since the last contact. Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. Note: If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. """
3,111
Python
32.106383
102
0.668917
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for rigid contact sensor based on :class:`omni.isaac.core.prims.RigidContactView`.""" from .contact_sensor import ContactSensor from .contact_sensor_cfg import ContactSensorCfg from .contact_sensor_data import ContactSensorData
366
Python
32.363633
99
0.789617
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/contact_sensor/contact_sensor_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from omni.isaac.orbit.markers import VisualizationMarkersCfg from omni.isaac.orbit.markers.config import CONTACT_SENSOR_MARKER_CFG from omni.isaac.orbit.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .contact_sensor import ContactSensor @configclass class ContactSensorCfg(SensorBaseCfg): """Configuration for the contact sensor.""" class_type: type = ContactSensor track_pose: bool = False """Whether to track the pose of the sensor's origin. Defaults to False.""" track_air_time: bool = False """Whether to track the air/contact time of the bodies (time between contacts). Defaults to False.""" force_threshold: float = 1.0 """The threshold on the norm of the contact force that determines whether two bodies are in collision or not. This value is only used for tracking the mode duration (the time in contact or in air), if :attr:`track_air_time` is True. """ filter_prim_paths_expr: list[str] = list() """The list of primitive paths to filter contacts with. For example, if you want to filter contacts with the ground plane, you can set this to ``["/World/ground_plane"]``. In this case, the contact sensor will only report contacts with the ground plane while using the :meth:`omni.isaac.core.prims.RigidContactView.get_contact_force_matrix` method. If an empty list is provided, then only net contact forces are reported. """ visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor") """The configuration object for the visualization markers. Defaults to CONTACT_SENSOR_MARKER_CFG. Note: This attribute is only used when debug visualization is enabled. """
1,916
Python
35.865384
115
0.733299
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/rl_task_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.utils import configclass from .base_env_cfg import BaseEnvCfg from .ui import RLTaskEnvWindow @configclass class RLTaskEnvCfg(BaseEnvCfg): """Configuration for a reinforcement learning environment.""" # ui settings ui_window_class_type: type | None = RLTaskEnvWindow # general settings is_finite_horizon: bool = False """Whether the learning task is treated as a finite or infinite horizon problem for the agent. Defaults to False, which means the task is treated as an infinite horizon problem. This flag handles the subtleties of finite and infinite horizon tasks: * **Finite horizon**: no penalty or bootstrapping value is required by the the agent for running out of time. However, the environment still needs to terminate the episode after the time limit is reached. * **Infinite horizon**: the agent needs to bootstrap the value of the state at the end of the episode. This is done by sending a time-limit (or truncated) done signal to the agent, which triggers this bootstrapping calculation. If True, then the environment is treated as a finite horizon problem and no time-out (or truncated) done signal is sent to the agent. If False, then the environment is treated as an infinite horizon problem and a time-out (or truncated) done signal is sent to the agent. Note: The base :class:`RLTaskEnv` class does not use this flag directly. It is used by the environment wrappers to determine what type of done signal to send to the corresponding learning agent. """ episode_length_s: float = MISSING """Duration of an episode (in seconds). Based on the decimation rate and physics time step, the episode length is calculated as: .. code-block:: python episode_length_steps = ceil(episode_length_s / (decimation_rate * physics_time_step)) For example, if the decimation rate is 10, the physics time step is 0.01, and the episode length is 10 seconds, then the episode length in steps is 100. """ # environment settings rewards: object = MISSING """Reward settings. Please refer to the :class:`omni.isaac.orbit.managers.RewardManager` class for more details. """ terminations: object = MISSING """Termination settings. Please refer to the :class:`omni.isaac.orbit.managers.TerminationManager` class for more details. """ curriculum: object = MISSING """Curriculum settings. Please refer to the :class:`omni.isaac.orbit.managers.CurriculumManager` class for more details. """ commands: object = MISSING """Command settings. Please refer to the :class:`omni.isaac.orbit.managers.CommandManager` class for more details. """
2,963
Python
34.710843
115
0.721228
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package for environment definitions. Environments define the interface between the agent and the simulation. In the simplest case, the environment provides the agent with the current observations and executes the actions provided by the agent. However, the environment can also provide additional information such as the current reward, done flag, and information about the current episode. Based on these, there are two types of environments: * :class:`BaseEnv`: The base environment which only provides the agent with the current observations and executes the actions provided by the agent. * :class:`RLTaskEnv`: The RL task environment which besides the functionality of the base environment also provides additional Markov Decision Process (MDP) related information such as the current reward, done flag, and information. """ from . import mdp, ui from .base_env import BaseEnv, VecEnvObs from .base_env_cfg import BaseEnvCfg, ViewerCfg from .rl_task_env import RLTaskEnv, VecEnvStepReturn from .rl_task_env_cfg import RLTaskEnvCfg
1,177
Python
39.620688
80
0.796941
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/envs/base_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Base configuration of the environment. This module defines the general configuration of the environment. It includes parameters for configuring the environment instances, viewer settings, and simulation parameters. """ from __future__ import annotations from dataclasses import MISSING from typing import Literal import omni.isaac.orbit.envs.mdp as mdp from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sim import SimulationCfg from omni.isaac.orbit.utils import configclass from .ui import BaseEnvWindow @configclass class ViewerCfg: """Configuration of the scene viewport camera.""" eye: tuple[float, float, float] = (7.5, 7.5, 7.5) """Initial camera position (in m). Default is (7.5, 7.5, 7.5).""" lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) """Initial camera target position (in m). Default is (0.0, 0.0, 0.0).""" cam_prim_path: str = "/OmniverseKit_Persp" """The camera prim path to record images from. Default is "/OmniverseKit_Persp", which is the default camera in the viewport. """ resolution: tuple[int, int] = (1280, 720) """The resolution (width, height) of the camera specified using :attr:`cam_prim_path`. Default is (1280, 720). """ origin_type: Literal["world", "env", "asset_root"] = "world" """The frame in which the camera position (eye) and target (lookat) are defined in. Default is "world". Available options are: * ``"world"``: The origin of the world. * ``"env"``: The origin of the environment defined by :attr:`env_index`. * ``"asset_root"``: The center of the asset defined by :attr:`asset_name` in environment :attr:`env_index`. """ env_index: int = 0 """The environment index for frame origin. Default is 0. This quantity is only effective if :attr:`origin` is set to "env" or "asset_root". """ asset_name: str | None = None """The asset name in the interactive scene for the frame origin. Default is None. This quantity is only effective if :attr:`origin` is set to "asset_root". """ @configclass class DefaultEventManagerCfg: """Configuration of the default event manager. This manager is used to reset the scene to a default state. The default state is specified by the scene configuration. """ reset_scene_to_default = EventTerm(func=mdp.reset_scene_to_default, mode="reset") @configclass class BaseEnvCfg: """Base configuration of the environment.""" # simulation settings viewer: ViewerCfg = ViewerCfg() """Viewer configuration. Default is ViewerCfg().""" sim: SimulationCfg = SimulationCfg() """Physics simulation configuration. Default is SimulationCfg().""" # ui settings ui_window_class_type: type | None = BaseEnvWindow """The class type of the UI window. Default is None. If None, then no UI window is created. Note: If you want to make your own UI window, you can create a class that inherits from from :class:`omni.isaac.orbit.envs.ui.base_env_window.BaseEnvWindow`. Then, you can set this attribute to your class type. """ # general settings decimation: int = MISSING """Number of control action updates @ sim dt per policy dt. For instance, if the simulation dt is 0.01s and the policy dt is 0.1s, then the decimation is 10. This means that the control action is updated every 10 simulation steps. """ # environment settings scene: InteractiveSceneCfg = MISSING """Scene settings. Please refer to the :class:`omni.isaac.orbit.scene.InteractiveSceneCfg` class for more details. """ observations: object = MISSING """Observation space settings. Please refer to the :class:`omni.isaac.orbit.managers.ObservationManager` class for more details. """ actions: object = MISSING """Action space settings. Please refer to the :class:`omni.isaac.orbit.managers.ActionManager` class for more details. """ events: object = DefaultEventManagerCfg() """Event settings. Defaults to the basic configuration that resets the scene to its default state. Please refer to the :class:`omni.isaac.orbit.managers.EventManager` class for more details. """ randomization: object | None = None """Randomization settings. Default is None. .. deprecated:: 0.3.0 This attribute is deprecated and will be removed in v0.4.0. Please use the :attr:`events` attribute to configure the randomization settings. """
4,718
Python
32
111
0.693726