file_path
stringlengths
22
162
content
stringlengths
19
501k
size
int64
19
501k
lang
stringclasses
1 value
avg_line_length
float64
6.33
100
max_line_length
int64
18
935
alphanum_fraction
float64
0.34
0.93
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/terrains/config/rough.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for custom terrains.""" from __future__ import annotations import omni.isaac.orbit.terrains as terrain_gen from ..terrain_generator_cfg import TerrainGeneratorCfg ROUGH_TERRAINS_CFG = TerrainGeneratorCfg( size=(8.0, 8.0), border_width=20.0, num_rows=10, num_cols=20, horizontal_scale=0.1, vertical_scale=0.005, slope_threshold=0.75, use_cache=False, sub_terrains={ "pyramid_stairs": terrain_gen.MeshPyramidStairsTerrainCfg( proportion=0.2, step_height_range=(0.05, 0.23), step_width=0.3, platform_width=3.0, border_width=1.0, holes=False, ), "pyramid_stairs_inv": terrain_gen.MeshInvertedPyramidStairsTerrainCfg( proportion=0.2, step_height_range=(0.05, 0.23), step_width=0.3, platform_width=3.0, border_width=1.0, holes=False, ), "boxes": terrain_gen.MeshRandomGridTerrainCfg( proportion=0.2, grid_width=0.45, grid_height_range=(0.05, 0.2), platform_width=2.0 ), "random_rough": terrain_gen.HfRandomUniformTerrainCfg( proportion=0.2, noise_range=(0.02, 0.10), noise_step=0.02, border_width=0.25 ), "hf_pyramid_slope": terrain_gen.HfPyramidSlopedTerrainCfg( proportion=0.1, slope_range=(0.0, 0.4), platform_width=2.0, border_width=0.25 ), "hf_pyramid_slope_inv": terrain_gen.HfInvertedPyramidSlopedTerrainCfg( proportion=0.1, slope_range=(0.0, 0.4), platform_width=2.0, border_width=0.25 ), }, ) """Rough terrains configuration."""
1,804
Python
31.818181
94
0.611973
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/timer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for a timer class that can be used for performance measurements.""" from __future__ import annotations import time from contextlib import ContextDecorator from typing import Any class TimerError(Exception): """A custom exception used to report errors in use of :class:`Timer` class.""" pass class Timer(ContextDecorator): """A timer for performance measurements. A class to keep track of time for performance measurement. It allows timing via context managers and decorators as well. It uses the `time.perf_counter` function to measure time. This function returns the number of seconds since the epoch as a float. It has the highest resolution available on the system. As a regular object: .. code-block:: python import time from omni.isaac.orbit.utils.timer import Timer timer = Timer() timer.start() time.sleep(1) print(1 <= timer.time_elapsed <= 2) # Output: True time.sleep(1) timer.stop() print(2 <= stopwatch.total_run_time) # Output: True As a context manager: .. code-block:: python import time from omni.isaac.orbit.utils.timer import Timer with Timer() as timer: time.sleep(1) print(1 <= timer.time_elapsed <= 2) # Output: True Reference: https://gist.github.com/sumeet/1123871 """ def __init__(self, msg: str | None = None): """Initializes the timer. Args: msg: The message to display when using the timer class in a context manager. Defaults to None. """ self._msg = msg self._start_time = None self._stop_time = None self._elapsed_time = None def __str__(self) -> str: """A string representation of the class object. Returns: A string containing the elapsed time. """ return f"{self.time_elapsed:0.6f} seconds" """ Properties """ @property def time_elapsed(self) -> float: """The number of seconds that have elapsed since this timer started timing. Note: This is used for checking how much time has elapsed while the timer is still running. """ return time.perf_counter() - self._start_time @property def total_run_time(self) -> float: """The number of seconds that elapsed from when the timer started to when it ended.""" return self._elapsed_time """ Operations """ def start(self): """Start timing.""" if self._start_time is not None: raise TimerError("Timer is running. Use .stop() to stop it") self._start_time = time.perf_counter() def stop(self): """Stop timing.""" if self._start_time is None: raise TimerError("Timer is not running. Use .start() to start it") self._stop_time = time.perf_counter() self._elapsed_time = self._stop_time - self._start_time self._start_time = None """ Context managers """ def __enter__(self) -> Timer: """Start timing and return this `Timer` instance.""" self.start() return self def __exit__(self, *exc_info: Any): """Stop timing.""" self.stop() # print message if self._msg is not None: print(self._msg, f": {self._elapsed_time:0.6f} seconds")
3,572
Python
25.272059
97
0.599944
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/string.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing utilities for transforming strings and regular expressions.""" from __future__ import annotations import ast import importlib import inspect import re from collections.abc import Callable, Sequence from typing import Any """ String formatting. """ def to_camel_case(snake_str: str, to: str = "cC") -> str: """Converts a string from snake case to camel case. Args: snake_str: A string in snake case (i.e. with '_') to: Convention to convert string to. Defaults to "cC". Raises: ValueError: Invalid input argument `to`, i.e. not "cC" or "CC". Returns: A string in camel-case format. """ # check input is correct if to not in ["cC", "CC"]: msg = "to_camel_case(): Choose a valid `to` argument (CC or cC)" raise ValueError(msg) # convert string to lower case and split components = snake_str.lower().split("_") if to == "cC": # We capitalize the first letter of each component except the first one # with the 'title' method and join them together. return components[0] + "".join(x.title() for x in components[1:]) else: # Capitalize first letter in all the components return "".join(x.title() for x in components) def to_snake_case(camel_str: str) -> str: """Converts a string from camel case to snake case. Args: camel_str: A string in camel case. Returns: A string in snake case (i.e. with '_') """ camel_str = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_str) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", camel_str).lower() """ String <-> Callable operations. """ def is_lambda_expression(name: str) -> bool: """Checks if the input string is a lambda expression. Args: name: The input string. Returns: Whether the input string is a lambda expression. """ try: ast.parse(name) return isinstance(ast.parse(name).body[0], ast.Expr) and isinstance(ast.parse(name).body[0].value, ast.Lambda) except SyntaxError: return False def callable_to_string(value: Callable) -> str: """Converts a callable object to a string. Args: value: A callable object. Raises: ValueError: When the input argument is not a callable object. Returns: A string representation of the callable object. """ # check if callable if not callable(value): raise ValueError(f"The input argument is not callable: {value}.") # check if lambda function if value.__name__ == "<lambda>": return f"lambda {inspect.getsourcelines(value)[0][0].strip().split('lambda')[1].strip().split(',')[0]}" else: # get the module and function name module_name = value.__module__ function_name = value.__name__ # return the string return f"{module_name}:{function_name}" def string_to_callable(name: str) -> Callable: """Resolves the module and function names to return the function. Args: name: The function name. The format should be 'module:attribute_name' or a lambda expression of format: 'lambda x: x'. Raises: ValueError: When the resolved attribute is not a function. ValueError: When the module cannot be found. Returns: Callable: The function loaded from the module. """ try: if is_lambda_expression(name): callable_object = eval(name) else: mod_name, attr_name = name.split(":") mod = importlib.import_module(mod_name) callable_object = getattr(mod, attr_name) # check if attribute is callable if callable(callable_object): return callable_object else: raise AttributeError(f"The imported object is not callable: '{name}'") except (ValueError, ModuleNotFoundError) as e: msg = ( f"Could not resolve the input string '{name}' into callable object." " The format of input should be 'module:attribute_name'.\n" f"Received the error:\n {e}." ) raise ValueError(msg) """ Regex operations. """ def resolve_matching_names( keys: str | Sequence[str], list_of_strings: Sequence[str], preserve_order: bool = False ) -> tuple[list[int], list[str]]: """Match a list of query regular expressions against a list of strings and return the matched indices and names. When a list of query regular expressions is provided, the function checks each target string against each query regular expression and returns the indices of the matched strings and the matched strings. If the :attr:`preserve_order` is True, the ordering of the matched indices and names is the same as the order of the provided list of strings. This means that the ordering is dictated by the order of the target strings and not the order of the query regular expressions. If the :attr:`preserve_order` is False, the ordering of the matched indices and names is the same as the order of the provided list of query regular expressions. For example, consider the list of strings is ['a', 'b', 'c', 'd', 'e'] and the regular expressions are ['a|c', 'b']. If :attr:`preserve_order` is False, then the function will return the indices of the matched strings and the strings as: ([0, 1, 2], ['a', 'b', 'c']). When :attr:`preserve_order` is True, it will return them as: ([0, 2, 1], ['a', 'c', 'b']). Note: The function does not sort the indices. It returns the indices in the order they are found. Args: keys: A regular expression or a list of regular expressions to match the strings in the list. list_of_strings: A list of strings to match. preserve_order: Whether to preserve the order of the query keys in the returned values. Defaults to False. Returns: A tuple of lists containing the matched indices and names. Raises: ValueError: When multiple matches are found for a string in the list. ValueError: When not all regular expressions are matched. """ # resolve name keys if isinstance(keys, str): keys = [keys] # find matching patterns index_list = [] names_list = [] key_idx_list = [] # book-keeping to check that we always have a one-to-one mapping # i.e. each target string should match only one regular expression target_strings_match_found = [None for _ in range(len(list_of_strings))] keys_match_found = [[] for _ in range(len(keys))] # loop over all target strings for target_index, potential_match_string in enumerate(list_of_strings): for key_index, re_key in enumerate(keys): if re.fullmatch(re_key, potential_match_string): # check if match already found if target_strings_match_found[target_index]: raise ValueError( f"Multiple matches for '{potential_match_string}':" f" '{target_strings_match_found[target_index]}' and '{re_key}'!" ) # add to list target_strings_match_found[target_index] = re_key index_list.append(target_index) names_list.append(potential_match_string) key_idx_list.append(key_index) # add for regex key keys_match_found[key_index].append(potential_match_string) # reorder keys if they should be returned in order of the query keys if preserve_order: reordered_index_list = [None] * len(index_list) global_index = 0 for key_index in range(len(keys)): for key_idx_position, key_idx_entry in enumerate(key_idx_list): if key_idx_entry == key_index: reordered_index_list[key_idx_position] = global_index global_index += 1 # reorder index and names list index_list_reorder = [None] * len(index_list) names_list_reorder = [None] * len(index_list) for idx, reorder_idx in enumerate(reordered_index_list): index_list_reorder[reorder_idx] = index_list[idx] names_list_reorder[reorder_idx] = names_list[idx] # update index_list = index_list_reorder names_list = names_list_reorder # check that all regular expressions are matched if not all(keys_match_found): # make this print nicely aligned for debugging msg = "\n" for key, value in zip(keys, keys_match_found): msg += f"\t{key}: {value}\n" msg += f"Available strings: {list_of_strings}\n" # raise error raise ValueError( f"Not all regular expressions are matched! Please check that the regular expressions are correct: {msg}" ) # return return index_list, names_list def resolve_matching_names_values( data: dict[str, Any], list_of_strings: Sequence[str], preserve_order: bool = False ) -> tuple[list[int], list[str], list[Any]]: """Match a list of regular expressions in a dictionary against a list of strings and return the matched indices, names, and values. If the :attr:`preserve_order` is True, the ordering of the matched indices and names is the same as the order of the provided list of strings. This means that the ordering is dictated by the order of the target strings and not the order of the query regular expressions. If the :attr:`preserve_order` is False, the ordering of the matched indices and names is the same as the order of the provided list of query regular expressions. For example, consider the dictionary is {"a|d|e": 1, "b|c": 2}, the list of strings is ['a', 'b', 'c', 'd', 'e']. If :attr:`preserve_order` is False, then the function will return the indices of the matched strings, the matched strings, and the values as: ([0, 1, 2, 3, 4], ['a', 'b', 'c', 'd', 'e'], [1, 2, 2, 1, 1]). When :attr:`preserve_order` is True, it will return them as: ([0, 3, 4, 1, 2], ['a', 'd', 'e', 'b', 'c'], [1, 1, 1, 2, 2]). Args: data: A dictionary of regular expressions and values to match the strings in the list. list_of_strings: A list of strings to match. preserve_order: Whether to preserve the order of the query keys in the returned values. Defaults to False. Returns: A tuple of lists containing the matched indices, names, and values. Raises: TypeError: When the input argument :attr:`data` is not a dictionary. ValueError: When multiple matches are found for a string in the dictionary. ValueError: When not all regular expressions in the data keys are matched. """ # check valid input if not isinstance(data, dict): raise TypeError(f"Input argument `data` should be a dictionary. Received: {data}") # find matching patterns index_list = [] names_list = [] values_list = [] key_idx_list = [] # book-keeping to check that we always have a one-to-one mapping # i.e. each target string should match only one regular expression target_strings_match_found = [None for _ in range(len(list_of_strings))] keys_match_found = [[] for _ in range(len(data))] # loop over all target strings for target_index, potential_match_string in enumerate(list_of_strings): for key_index, (re_key, value) in enumerate(data.items()): if re.fullmatch(re_key, potential_match_string): # check if match already found if target_strings_match_found[target_index]: raise ValueError( f"Multiple matches for '{potential_match_string}':" f" '{target_strings_match_found[target_index]}' and '{re_key}'!" ) # add to list target_strings_match_found[target_index] = re_key index_list.append(target_index) names_list.append(potential_match_string) values_list.append(value) key_idx_list.append(key_index) # add for regex key keys_match_found[key_index].append(potential_match_string) # reorder keys if they should be returned in order of the query keys if preserve_order: reordered_index_list = [None] * len(index_list) global_index = 0 for key_index in range(len(data)): for key_idx_position, key_idx_entry in enumerate(key_idx_list): if key_idx_entry == key_index: reordered_index_list[key_idx_position] = global_index global_index += 1 # reorder index and names list index_list_reorder = [None] * len(index_list) names_list_reorder = [None] * len(index_list) values_list_reorder = [None] * len(index_list) for idx, reorder_idx in enumerate(reordered_index_list): index_list_reorder[reorder_idx] = index_list[idx] names_list_reorder[reorder_idx] = names_list[idx] values_list_reorder[reorder_idx] = values_list[idx] # update index_list = index_list_reorder names_list = names_list_reorder values_list = values_list_reorder # check that all regular expressions are matched if not all(keys_match_found): # make this print nicely aligned for debugging msg = "\n" for key, value in zip(data.keys(), keys_match_found): msg += f"\t{key}: {value}\n" msg += f"Available strings: {list_of_strings}\n" # raise error raise ValueError( f"Not all regular expressions are matched! Please check that the regular expressions are correct: {msg}" ) # return return index_list, names_list, values_list
14,010
Python
40.087976
122
0.626053
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package containing utilities for common operations and helper functions.""" from .array import * from .configclass import configclass from .dict import * from .string import * from .timer import Timer
332
Python
24.615383
82
0.76506
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/array.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing utilities for working with different array backends.""" from __future__ import annotations import numpy as np import torch from typing import Union import warp as wp TensorData = Union[np.ndarray, torch.Tensor, wp.array] """Type definition for a tensor data. Union of numpy, torch, and warp arrays. """ TENSOR_TYPES = { "numpy": np.ndarray, "torch": torch.Tensor, "warp": wp.array, } """A dictionary containing the types for each backend. The keys are the name of the backend ("numpy", "torch", "warp") and the values are the corresponding type (``np.ndarray``, ``torch.Tensor``, ``wp.array``). """ TENSOR_TYPE_CONVERSIONS = { "numpy": {wp.array: lambda x: x.numpy(), torch.Tensor: lambda x: x.detach().cpu().numpy()}, "torch": {wp.array: lambda x: wp.torch.to_torch(x), np.ndarray: lambda x: torch.from_numpy(x)}, "warp": {np.array: lambda x: wp.array(x), torch.Tensor: lambda x: wp.torch.from_torch(x)}, } """A nested dictionary containing the conversion functions for each backend. The keys of the outer dictionary are the name of target backend ("numpy", "torch", "warp"). The keys of the inner dictionary are the source backend (``np.ndarray``, ``torch.Tensor``, ``wp.array``). """ def convert_to_torch( array: TensorData, dtype: torch.dtype = None, device: torch.device | str | None = None, ) -> torch.Tensor: """Converts a given array into a torch tensor. The function tries to convert the array to a torch tensor. If the array is a numpy/warp arrays, or python list/tuples, it is converted to a torch tensor. If the array is already a torch tensor, it is returned directly. If ``device`` is None, then the function deduces the current device of the data. For numpy arrays, this defaults to "cpu", for torch tensors it is "cpu" or "cuda", and for warp arrays it is "cuda". Note: Since PyTorch does not support unsigned integer types, unsigned integer arrays are converted to signed integer arrays. This is done by casting the array to the corresponding signed integer type. Args: array: The input array. It can be a numpy array, warp array, python list/tuple, or torch tensor. dtype: Target data-type for the tensor. device: The target device for the tensor. Defaults to None. Returns: The converted array as torch tensor. """ # Convert array to tensor # if the datatype is not currently supported by torch we need to improvise # supported types are: https://pytorch.org/docs/stable/tensors.html if isinstance(array, torch.Tensor): tensor = array elif isinstance(array, np.ndarray): if array.dtype == np.uint32: array = array.astype(np.int64) # need to deal with object arrays (np.void) separately tensor = torch.from_numpy(array) elif isinstance(array, wp.array): if array.dtype == wp.uint32: array = array.view(wp.int32) tensor = wp.to_torch(array) else: tensor = torch.Tensor(array) # Convert tensor to the right device if device is not None and str(tensor.device) != str(device): tensor = tensor.to(device) # Convert dtype of tensor if requested if dtype is not None and tensor.dtype != dtype: tensor = tensor.type(dtype) return tensor
3,480
Python
35.642105
109
0.679023
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/math.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing utilities for various math operations.""" from __future__ import annotations import numpy as np import torch import torch.nn.functional from typing import Literal """ General """ @torch.jit.script def scale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """Normalizes a given input tensor to a range of [-1, 1]. .. note:: It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape is (N, dims) or (dims,). upper: The maximum value of the tensor. Shape is (N, dims) or (dims,). Returns: Normalized transform of the tensor. Shape is (N, dims). """ # default value of center offset = (lower + upper) * 0.5 # return normalized tensor return 2 * (x - offset) / (upper - lower) @torch.jit.script def unscale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """De-normalizes a given input tensor from range of [-1, 1] to (lower, upper). .. note:: It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape is (N, dims) or (dims,). upper: The maximum value of the tensor. Shape is (N, dims) or (dims,). Returns: De-normalized transform of the tensor. Shape is (N, dims). """ # default value of center offset = (lower + upper) * 0.5 # return normalized tensor return x * (upper - lower) * 0.5 + offset @torch.jit.script def saturate(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor: """Clamps a given input tensor to (lower, upper). It uses pytorch broadcasting functionality to deal with batched input. Args: x: Input tensor of shape (N, dims). lower: The minimum value of the tensor. Shape is (N, dims) or (dims,). upper: The maximum value of the tensor. Shape is (N, dims) or (dims,). Returns: Clamped transform of the tensor. Shape is (N, dims). """ return torch.max(torch.min(x, upper), lower) @torch.jit.script def normalize(x: torch.Tensor, eps: float = 1e-9) -> torch.Tensor: """Normalizes a given input tensor to unit length. Args: x: Input tensor of shape (N, dims). eps: A small value to avoid division by zero. Defaults to 1e-9. Returns: Normalized tensor of shape (N, dims). """ return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1) @torch.jit.script def wrap_to_pi(angles: torch.Tensor) -> torch.Tensor: """Wraps input angles (in radians) to the range [-pi, pi]. Args: angles: Input angles of any shape. Returns: Angles in the range [-pi, pi]. """ angles = angles.clone() angles %= 2 * torch.pi angles -= 2 * torch.pi * (angles > torch.pi) return angles @torch.jit.script def copysign(mag: float, other: torch.Tensor) -> torch.Tensor: """Create a new floating-point tensor with the magnitude of input and the sign of other, element-wise. Note: The implementation follows from `torch.copysign`. The function allows a scalar magnitude. Args: mag: The magnitude scalar. other: The tensor containing values whose signbits are applied to magnitude. Returns: The output tensor. """ mag = torch.tensor(mag, device=other.device, dtype=torch.float).repeat(other.shape[0]) return torch.abs(mag) * torch.sign(other) """ Rotation """ @torch.jit.script def matrix_from_quat(quaternions: torch.Tensor) -> torch.Tensor: """Convert rotations given as quaternions to rotation matrices. Args: quaternions: The quaternion orientation in (w, x, y, z). Shape is (..., 4). Returns: Rotation matrices. The shape is (..., 3, 3). Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L41-L70 """ r, i, j, k = torch.unbind(quaternions, -1) # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. two_s = 2.0 / (quaternions * quaternions).sum(-1) o = torch.stack( ( 1 - two_s * (j * j + k * k), two_s * (i * j - k * r), two_s * (i * k + j * r), two_s * (i * j + k * r), 1 - two_s * (i * i + k * k), two_s * (j * k - i * r), two_s * (i * k - j * r), two_s * (j * k + i * r), 1 - two_s * (i * i + j * j), ), -1, ) return o.reshape(quaternions.shape[:-1] + (3, 3)) def convert_quat(quat: torch.Tensor | np.ndarray, to: Literal["xyzw", "wxyz"] = "xyzw") -> torch.Tensor | np.ndarray: """Converts quaternion from one convention to another. The convention to convert TO is specified as an optional argument. If to == 'xyzw', then the input is in 'wxyz' format, and vice-versa. Args: quat: The quaternion of shape (..., 4). to: Convention to convert quaternion to.. Defaults to "xyzw". Returns: The converted quaternion in specified convention. Raises: ValueError: Invalid input argument `to`, i.e. not "xyzw" or "wxyz". ValueError: Invalid shape of input `quat`, i.e. not (..., 4,). """ # check input is correct if quat.shape[-1] != 4: msg = f"Expected input quaternion shape mismatch: {quat.shape} != (..., 4)." raise ValueError(msg) if to not in ["xyzw", "wxyz"]: msg = f"Expected input argument `to` to be 'xyzw' or 'wxyz'. Received: {to}." raise ValueError(msg) # check if input is numpy array (we support this backend since some classes use numpy) if isinstance(quat, np.ndarray): # use numpy functions if to == "xyzw": # wxyz -> xyzw return np.roll(quat, -1, axis=-1) else: # xyzw -> wxyz return np.roll(quat, 1, axis=-1) else: # convert to torch (sanity check) if not isinstance(quat, torch.Tensor): quat = torch.tensor(quat, dtype=float) # convert to specified quaternion type if to == "xyzw": # wxyz -> xyzw return quat.roll(-1, dims=-1) else: # xyzw -> wxyz return quat.roll(1, dims=-1) @torch.jit.script def quat_conjugate(q: torch.Tensor) -> torch.Tensor: """Computes the conjugate of a quaternion. Args: q: The quaternion orientation in (w, x, y, z). Shape is (..., 4). Returns: The conjugate quaternion in (w, x, y, z). Shape is (..., 4). """ shape = q.shape q = q.reshape(-1, 4) return torch.cat((q[:, 0:1], -q[:, 1:]), dim=-1).view(shape) @torch.jit.script def quat_inv(q: torch.Tensor) -> torch.Tensor: """Compute the inverse of a quaternion. Args: q: The quaternion orientation in (w, x, y, z). Shape is (N, 4). Returns: The inverse quaternion in (w, x, y, z). Shape is (N, 4). """ return normalize(quat_conjugate(q)) @torch.jit.script def quat_from_euler_xyz(roll: torch.Tensor, pitch: torch.Tensor, yaw: torch.Tensor) -> torch.Tensor: """Convert rotations given as Euler angles in radians to Quaternions. Note: The euler angles are assumed in XYZ convention. Args: roll: Rotation around x-axis (in radians). Shape is (N,). pitch: Rotation around y-axis (in radians). Shape is (N,). yaw: Rotation around z-axis (in radians). Shape is (N,). Returns: The quaternion in (w, x, y, z). Shape is (N, 4). """ cy = torch.cos(yaw * 0.5) sy = torch.sin(yaw * 0.5) cr = torch.cos(roll * 0.5) sr = torch.sin(roll * 0.5) cp = torch.cos(pitch * 0.5) sp = torch.sin(pitch * 0.5) # compute quaternion qw = cy * cr * cp + sy * sr * sp qx = cy * sr * cp - sy * cr * sp qy = cy * cr * sp + sy * sr * cp qz = sy * cr * cp - cy * sr * sp return torch.stack([qw, qx, qy, qz], dim=-1) @torch.jit.script def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: """Returns torch.sqrt(torch.max(0, x)) but with a zero sub-gradient where x is 0. Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L91-L99 """ ret = torch.zeros_like(x) positive_mask = x > 0 ret[positive_mask] = torch.sqrt(x[positive_mask]) return ret @torch.jit.script def quat_from_matrix(matrix: torch.Tensor) -> torch.Tensor: """Convert rotations given as rotation matrices to quaternions. Args: matrix: The rotation matrices. Shape is (..., 3, 3). Returns: The quaternion in (w, x, y, z). Shape is (..., 4). Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L102-L161 """ if matrix.size(-1) != 3 or matrix.size(-2) != 3: raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") batch_dim = matrix.shape[:-2] m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(matrix.reshape(batch_dim + (9,)), dim=-1) q_abs = _sqrt_positive_part( torch.stack( [ 1.0 + m00 + m11 + m22, 1.0 + m00 - m11 - m22, 1.0 - m00 + m11 - m22, 1.0 - m00 - m11 + m22, ], dim=-1, ) ) # we produce the desired quaternion multiplied by each of r, i, j, k quat_by_rijk = torch.stack( [ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), ], dim=-2, ) # We floor here at 0.1 but the exact level is not important; if q_abs is small, # the candidate won't be picked. flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) # if not for numerical problems, quat_candidates[i] should be same (up to a sign), # forall i; we pick the best-conditioned one (with the largest denominator) return quat_candidates[torch.nn.functional.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :].reshape( batch_dim + (4,) ) def _axis_angle_rotation(axis: Literal["X", "Y", "Z"], angle: torch.Tensor) -> torch.Tensor: """Return the rotation matrices for one of the rotations about an axis of which Euler angles describe, for each value of the angle given. Args: axis: Axis label "X" or "Y or "Z". angle: Euler angles in radians of any shape. Returns: Rotation matrices. Shape is (..., 3, 3). Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L164-L191 """ cos = torch.cos(angle) sin = torch.sin(angle) one = torch.ones_like(angle) zero = torch.zeros_like(angle) if axis == "X": R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) elif axis == "Y": R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) elif axis == "Z": R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) else: raise ValueError("letter must be either X, Y or Z.") return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) def matrix_from_euler(euler_angles: torch.Tensor, convention: str) -> torch.Tensor: """ Convert rotations given as Euler angles in radians to rotation matrices. Args: euler_angles: Euler angles in radians. Shape is (..., 3). convention: Convention string of three uppercase letters from {"X", "Y", and "Z"}. For example, "XYZ" means that the rotations should be applied first about x, then y, then z. Returns: Rotation matrices. Shape is (..., 3, 3). Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L194-L220 """ if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: raise ValueError("Invalid input euler angles.") if len(convention) != 3: raise ValueError("Convention must have 3 letters.") if convention[1] in (convention[0], convention[2]): raise ValueError(f"Invalid convention {convention}.") for letter in convention: if letter not in ("X", "Y", "Z"): raise ValueError(f"Invalid letter {letter} in convention string.") matrices = [_axis_angle_rotation(c, e) for c, e in zip(convention, torch.unbind(euler_angles, -1))] # return functools.reduce(torch.matmul, matrices) return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2]) @torch.jit.script def euler_xyz_from_quat(quat: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Convert rotations given as quaternions to Euler angles in radians. Note: The euler angles are assumed in XYZ convention. Args: quat: The quaternion orientation in (w, x, y, z). Shape is (N, 4). Returns: A tuple containing roll-pitch-yaw. Each element is a tensor of shape (N,). Reference: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles """ q_w, q_x, q_y, q_z = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3] # roll (x-axis rotation) sin_roll = 2.0 * (q_w * q_x + q_y * q_z) cos_roll = 1 - 2 * (q_x * q_x + q_y * q_y) roll = torch.atan2(sin_roll, cos_roll) # pitch (y-axis rotation) sin_pitch = 2.0 * (q_w * q_y - q_z * q_x) pitch = torch.where(torch.abs(sin_pitch) >= 1, copysign(torch.pi / 2.0, sin_pitch), torch.asin(sin_pitch)) # yaw (z-axis rotation) sin_yaw = 2.0 * (q_w * q_z + q_x * q_y) cos_yaw = 1 - 2 * (q_y * q_y + q_z * q_z) yaw = torch.atan2(sin_yaw, cos_yaw) return roll % (2 * torch.pi), pitch % (2 * torch.pi), yaw % (2 * torch.pi) # TODO: why not wrap_to_pi here ? @torch.jit.script def quat_mul(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """Multiply two quaternions together. Args: q1: The first quaternion in (w, x, y, z). Shape is (..., 4). q2: The second quaternion in (w, x, y, z). Shape is (..., 4). Returns: The product of the two quaternions in (w, x, y, z). Shape is (..., 4). Raises: ValueError: Input shapes of ``q1`` and ``q2`` are not matching. """ # check input is correct if q1.shape != q2.shape: msg = f"Expected input quaternion shape mismatch: {q1.shape} != {q2.shape}." raise ValueError(msg) # reshape to (N, 4) for multiplication shape = q1.shape q1 = q1.reshape(-1, 4) q2 = q2.reshape(-1, 4) # extract components from quaternions w1, x1, y1, z1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3] w2, x2, y2, z2 = q2[:, 0], q2[:, 1], q2[:, 2], q2[:, 3] # perform multiplication ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) return torch.stack([w, x, y, z], dim=-1).view(shape) @torch.jit.script def quat_box_minus(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """The box-minus operator (quaternion difference) between two quaternions. Args: q1: The first quaternion in (w, x, y, z). Shape is (N, 4). q2: The second quaternion in (w, x, y, z). Shape is (N, 4). Returns: The difference between the two quaternions. Shape is (N, 3). Reference: https://docs.leggedrobotics.com/kindr/cheatsheet_latest.pdf """ quat_diff = quat_mul(q1, quat_conjugate(q2)) # q1 * q2^-1 re = quat_diff[:, 0] # real part, q = [w, x, y, z] = [re, im] im = quat_diff[:, 1:] # imaginary part norm_im = torch.norm(im, dim=1) scale = 2.0 * torch.where(norm_im > 1.0e-7, torch.atan(norm_im / re) / norm_im, torch.sign(re)) return scale.unsqueeze(-1) * im @torch.jit.script def yaw_quat(quat: torch.Tensor) -> torch.Tensor: """Extract the yaw component of a quaternion. Args: quat: The orientation in (w, x, y, z). Shape is (..., 4) Returns: A quaternion with only yaw component. """ shape = quat.shape quat_yaw = quat.clone().view(-1, 4) qw = quat_yaw[:, 0] qx = quat_yaw[:, 1] qy = quat_yaw[:, 2] qz = quat_yaw[:, 3] yaw = torch.atan2(2 * (qw * qz + qx * qy), 1 - 2 * (qy * qy + qz * qz)) quat_yaw[:] = 0.0 quat_yaw[:, 3] = torch.sin(yaw / 2) quat_yaw[:, 0] = torch.cos(yaw / 2) quat_yaw = normalize(quat_yaw) return quat_yaw.view(shape) @torch.jit.script def quat_apply(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: """Apply a quaternion rotation to a vector. Args: quat: The quaternion in (w, x, y, z). Shape is (..., 4). vec: The vector in (x, y, z). Shape is (..., 3). Returns: The rotated vector in (x, y, z). Shape is (..., 3). """ # store shape shape = vec.shape # reshape to (N, 3) for multiplication quat = quat.reshape(-1, 4) vec = vec.reshape(-1, 3) # extract components from quaternions xyz = quat[:, 1:] t = xyz.cross(vec, dim=-1) * 2 return (vec + quat[:, 0:1] * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script def quat_apply_yaw(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: """Rotate a vector only around the yaw-direction. Args: quat: The orientation in (w, x, y, z). Shape is (N, 4). vec: The vector in (x, y, z). Shape is (N, 3). Returns: The rotated vector in (x, y, z). Shape is (N, 3). """ quat_yaw = yaw_quat(quat) return quat_apply(quat_yaw, vec) @torch.jit.script def quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: """Rotate a vector by a quaternion. Args: q: The quaternion in (w, x, y, z). Shape is (N, 4). v: The vector in (x, y, z). Shape is (N, 3). Returns: The rotated vector in (x, y, z). Shape is (N, 3). """ shape = q.shape q_w = q[:, 0] q_vec = q[:, 1:] a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * torch.bmm(q_vec.view(shape[0], 1, 3), v.view(shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: """Rotate a vector by the inverse of a quaternion. Args: q: The quaternion in (w, x, y, z). Shape is (N, 4). v: The vector in (x, y, z). Shape is (N, 3). Returns: The rotated vector in (x, y, z). Shape is (N, 3). """ shape = q.shape q_w = q[:, 0] q_vec = q[:, 1:] a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * torch.bmm(q_vec.view(shape[0], 1, 3), v.view(shape[0], 3, 1)).squeeze(-1) * 2.0 return a - b + c @torch.jit.script def quat_from_angle_axis(angle: torch.Tensor, axis: torch.Tensor) -> torch.Tensor: """Convert rotations given as angle-axis to quaternions. Args: angle: The angle turned anti-clockwise in radians around the vector's direction. Shape is (N,). axis: The axis of rotation. Shape is (N, 3). Returns: The quaternion in (w, x, y, z). Shape is (N, 4). """ theta = (angle / 2).unsqueeze(-1) xyz = normalize(axis) * theta.sin() w = theta.cos() return normalize(torch.cat([w, xyz], dim=-1)) @torch.jit.script def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tensor: """Convert rotations given as quaternions to axis/angle. Args: quat: The quaternion orientation in (w, x, y, z). Shape is (..., 4). eps: The tolerance for Taylor approximation. Defaults to 1.0e-6. Returns: Rotations given as a vector in axis angle form. Shape is (..., 3). The vector's magnitude is the angle turned anti-clockwise in radians around the vector's direction. Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L526-L554 """ # Modified to take in quat as [q_w, q_x, q_y, q_z] # Quaternion is [q_w, q_x, q_y, q_z] = [cos(theta/2), n_x * sin(theta/2), n_y * sin(theta/2), n_z * sin(theta/2)] # Axis-angle is [a_x, a_y, a_z] = [theta * n_x, theta * n_y, theta * n_z] # Thus, axis-angle is [q_x, q_y, q_z] / (sin(theta/2) / theta) # When theta = 0, (sin(theta/2) / theta) is undefined # However, as theta --> 0, we can use the Taylor approximation 1/2 - theta^2 / 48 quat = quat * (1.0 - 2.0 * (quat[..., 0:1] < 0.0)) mag = torch.linalg.norm(quat[..., 1:], dim=-1) half_angle = torch.atan2(mag, quat[..., 0]) angle = 2.0 * half_angle # check whether to apply Taylor approximation sin_half_angles_over_angles = torch.where( torch.abs(angle.abs()) > eps, torch.sin(half_angle) / angle, 0.5 - angle * angle / 48 ) return quat[..., 1:4] / sin_half_angles_over_angles.unsqueeze(-1) @torch.jit.script def quat_error_magnitude(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """Computes the rotation difference between two quaternions. Args: q1: The first quaternion in (w, x, y, z). Shape is (..., 4). q2: The second quaternion in (w, x, y, z). Shape is (..., 4). Returns: Angular error between input quaternions in radians. """ quat_diff = quat_mul(q1, quat_conjugate(q2)) return torch.norm(axis_angle_from_quat(quat_diff), dim=-1) @torch.jit.script def skew_symmetric_matrix(vec: torch.Tensor) -> torch.Tensor: """Computes the skew-symmetric matrix of a vector. Args: vec: The input vector. Shape is (3,) or (N, 3). Returns: The skew-symmetric matrix. Shape is (1, 3, 3) or (N, 3, 3). Raises: ValueError: If input tensor is not of shape (..., 3). """ # check input is correct if vec.shape[-1] != 3: raise ValueError(f"Expected input vector shape mismatch: {vec.shape} != (..., 3).") # unsqueeze the last dimension if vec.ndim == 1: vec = vec.unsqueeze(0) # create a skew-symmetric matrix skew_sym_mat = torch.zeros(vec.shape[0], 3, 3, device=vec.device, dtype=vec.dtype) skew_sym_mat[:, 0, 1] = -vec[:, 2] skew_sym_mat[:, 0, 2] = vec[:, 1] skew_sym_mat[:, 1, 2] = -vec[:, 0] skew_sym_mat[:, 1, 0] = vec[:, 2] skew_sym_mat[:, 2, 0] = -vec[:, 1] skew_sym_mat[:, 2, 1] = vec[:, 0] return skew_sym_mat """ Transformations """ def is_identity_pose(pos: torch.tensor, rot: torch.tensor) -> bool: """Checks if input poses are identity transforms. The function checks if the input position and orientation are close to zero and identity respectively using L2-norm. It does NOT check the error in the orientation. Args: pos: The cartesian position. Shape is (N, 3). rot: The quaternion in (w, x, y, z). Shape is (N, 4). Returns: True if all the input poses result in identity transform. Otherwise, False. """ # create identity transformations pos_identity = torch.zeros_like(pos) rot_identity = torch.zeros_like(rot) rot_identity[..., 0] = 1 # compare input to identity return torch.allclose(pos, pos_identity) and torch.allclose(rot, rot_identity) # @torch.jit.script def combine_frame_transforms( t01: torch.Tensor, q01: torch.Tensor, t12: torch.Tensor | None = None, q12: torch.Tensor | None = None ) -> tuple[torch.Tensor, torch.Tensor]: r"""Combine transformations between two reference frames into a stationary frame. It performs the following transformation operation: :math:`T_{02} = T_{01} \times T_{12}`, where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B. Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). t12: Position of frame 2 w.r.t. frame 1. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. q12: Quaternion orientation of frame 2 w.r.t. frame 1 in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: A tuple containing the position and orientation of frame 2 w.r.t. frame 0. Shape of the tensors are (N, 3) and (N, 4) respectively. """ # compute orientation if q12 is not None: q02 = quat_mul(q01, q12) else: q02 = q01 # compute translation if t12 is not None: t02 = t01 + quat_apply(q01, t12) else: t02 = t01 return t02, q02 # @torch.jit.script def subtract_frame_transforms( t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor | None = None, q02: torch.Tensor | None = None ) -> tuple[torch.Tensor, torch.Tensor]: r"""Subtract transformations between two reference frames into a stationary frame. It performs the following transformation operation: :math:`T_{12} = T_{01}^{-1} \times T_{02}`, where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B. Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). t02: Position of frame 2 w.r.t. frame 0. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. q02: Quaternion orientation of frame 2 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: A tuple containing the position and orientation of frame 2 w.r.t. frame 1. Shape of the tensors are (N, 3) and (N, 4) respectively. """ # compute orientation q10 = quat_inv(q01) if q02 is not None: q12 = quat_mul(q10, q02) else: q12 = q10 # compute translation if t02 is not None: t12 = quat_apply(q10, t02 - t01) else: t12 = quat_apply(q10, -t01) return t12, q12 # @torch.jit.script def compute_pose_error( t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor, q02: torch.Tensor, rot_error_type: Literal["quat", "axis_angle"] = "axis_angle", ) -> tuple[torch.Tensor, torch.Tensor]: """Compute the position and orientation error between source and target frames. Args: t01: Position of source frame. Shape is (N, 3). q01: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4). t02: Position of target frame. Shape is (N, 3). q02: Quaternion orientation of target frame in (w, x, y, z). Shape is (N, 4). rot_error_type: The rotation error type to return: "quat", "axis_angle". Defaults to "axis_angle". Returns: A tuple containing position and orientation error. Shape of position error is (N, 3). Shape of orientation error depends on the value of :attr:`rot_error_type`: - If :attr:`rot_error_type` is "quat", the orientation error is returned as a quaternion. Shape is (N, 4). - If :attr:`rot_error_type` is "axis_angle", the orientation error is returned as an axis-angle vector. Shape is (N, 3). Raises: ValueError: Invalid rotation error type. """ # Compute quaternion error (i.e., difference quaternion) # Reference: https://personal.utdallas.edu/~sxb027100/dock/quaternion.html # q_current_norm = q_current * q_current_conj source_quat_norm = quat_mul(q01, quat_conjugate(q01))[:, 0] # q_current_inv = q_current_conj / q_current_norm source_quat_inv = quat_conjugate(q01) / source_quat_norm.unsqueeze(-1) # q_error = q_target * q_current_inv quat_error = quat_mul(q02, source_quat_inv) # Compute position error pos_error = t02 - t01 # return error based on specified type if rot_error_type == "quat": return pos_error, quat_error elif rot_error_type == "axis_angle": # Convert to axis-angle error axis_angle_error = axis_angle_from_quat(quat_error) return pos_error, axis_angle_error else: raise ValueError(f"Unsupported orientation error type: {rot_error_type}. Valid: 'quat', 'axis_angle'.") @torch.jit.script def apply_delta_pose( source_pos: torch.Tensor, source_rot: torch.Tensor, delta_pose: torch.Tensor, eps: float = 1.0e-6 ) -> tuple[torch.Tensor, torch.Tensor]: """Applies delta pose transformation on source pose. The first three elements of `delta_pose` are interpreted as cartesian position displacement. The remaining three elements of `delta_pose` are interpreted as orientation displacement in the angle-axis format. Args: source_pos: Position of source frame. Shape is (N, 3). source_rot: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4).. delta_pose: Position and orientation displacements. Shape is (N, 6). eps: The tolerance to consider orientation displacement as zero. Defaults to 1.0e-6. Returns: A tuple containing the displaced position and orientation frames. Shape of the tensors are (N, 3) and (N, 4) respectively. """ # number of poses given num_poses = source_pos.shape[0] device = source_pos.device # interpret delta_pose[:, 0:3] as target position displacements target_pos = source_pos + delta_pose[:, 0:3] # interpret delta_pose[:, 3:6] as target rotation displacements rot_actions = delta_pose[:, 3:6] angle = torch.linalg.vector_norm(rot_actions, dim=1) axis = rot_actions / angle.unsqueeze(-1) # change from axis-angle to quat convention identity_quat = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat(num_poses, 1) rot_delta_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > eps, quat_from_angle_axis(angle, axis), identity_quat ) # TODO: Check if this is the correct order for this multiplication. target_rot = quat_mul(rot_delta_quat, source_rot) return target_pos, target_rot # @torch.jit.script def transform_points( points: torch.Tensor, pos: torch.Tensor | None = None, quat: torch.Tensor | None = None ) -> torch.Tensor: r"""Transform input points in a given frame to a target frame. This function transform points from a source frame to a target frame. The transformation is defined by the position :math:`t` and orientation :math:`R` of the target frame in the source frame. .. math:: p_{target} = R_{target} \times p_{source} + t_{target} If the input `points` is a batch of points, the inputs `pos` and `quat` must be either a batch of positions and quaternions or a single position and quaternion. If the inputs `pos` and `quat` are a single position and quaternion, the same transformation is applied to all points in the batch. If either the inputs :attr:`pos` and :attr:`quat` are None, the corresponding transformation is not applied. Args: points: Points to transform. Shape is (N, P, 3) or (P, 3). pos: Position of the target frame. Shape is (N, 3) or (3,). Defaults to None, in which case the position is assumed to be zero. quat: Quaternion orientation of the target frame in (w, x, y, z). Shape is (N, 4) or (4,). Defaults to None, in which case the orientation is assumed to be identity. Returns: Transformed points in the target frame. Shape is (N, P, 3) or (P, 3). Raises: ValueError: If the inputs `points` is not of shape (N, P, 3) or (P, 3). ValueError: If the inputs `pos` is not of shape (N, 3) or (3,). ValueError: If the inputs `quat` is not of shape (N, 4) or (4,). """ points_batch = points.clone() # check if inputs are batched is_batched = points_batch.dim() == 3 # -- check inputs if points_batch.dim() == 2: points_batch = points_batch[None] # (P, 3) -> (1, P, 3) if points_batch.dim() != 3: raise ValueError(f"Expected points to have dim = 2 or dim = 3: got shape {points.shape}") if not (pos is None or pos.dim() == 1 or pos.dim() == 2): raise ValueError(f"Expected pos to have dim = 1 or dim = 2: got shape {pos.shape}") if not (quat is None or quat.dim() == 1 or quat.dim() == 2): raise ValueError(f"Expected quat to have dim = 1 or dim = 2: got shape {quat.shape}") # -- rotation if quat is not None: # convert to batched rotation matrix rot_mat = matrix_from_quat(quat) if rot_mat.dim() == 2: rot_mat = rot_mat[None] # (3, 3) -> (1, 3, 3) # convert points to matching batch size (N, P, 3) -> (N, 3, P) # and apply rotation points_batch = torch.matmul(rot_mat, points_batch.transpose_(1, 2)) # (N, 3, P) -> (N, P, 3) points_batch = points_batch.transpose_(1, 2) # -- translation if pos is not None: # convert to batched translation vector if pos.dim() == 1: pos = pos[None, None, :] # (3,) -> (1, 1, 3) else: pos = pos[:, None, :] # (N, 3) -> (N, 1, 3) # apply translation points_batch += pos # -- return points in same shape as input if not is_batched: points_batch = points_batch.squeeze(0) # (1, P, 3) -> (P, 3) return points_batch """ Projection operations. """ @torch.jit.script def unproject_depth(depth: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tensor: r"""Unproject depth image into a pointcloud. This function converts depth images into points given the calibration matrix of the camera. .. math:: p_{3D} = K^{-1} \times [u, v, 1]^T \times d where :math:`p_{3D}` is the 3D point, :math:`d` is the depth value, :math:`u` and :math:`v` are the pixel coordinates and :math:`K` is the intrinsic matrix. If `depth` is a batch of depth images and `intrinsics` is a single intrinsic matrix, the same calibration matrix is applied to all depth images in the batch. The function assumes that the width and height are both greater than 1. This makes the function deal with many possible shapes of depth images and intrinsics matrices. Args: depth: The depth measurement. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). intrinsics: A tensor providing camera's calibration matrix. Shape is (3, 3) or (N, 3, 3). Returns: The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). Raises: ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3). """ depth_batch = depth.clone() intrinsics_batch = intrinsics.clone() # check if inputs are batched is_batched = depth_batch.dim() == 4 or (depth_batch.dim() == 3 and depth_batch.shape[-1] != 1) # make sure inputs are batched if depth_batch.dim() == 3 and depth_batch.shape[-1] == 1: depth_batch = depth_batch.squeeze(dim=2) # (H, W, 1) -> (H, W) if depth_batch.dim() == 2: depth_batch = depth_batch[None] # (H, W) -> (1, H, W) if depth_batch.dim() == 4 and depth_batch.shape[-1] == 1: depth_batch = depth_batch.squeeze(dim=3) # (N, H, W, 1) -> (N, H, W) if intrinsics_batch.dim() == 2: intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) # check shape of inputs if depth_batch.dim() != 3: raise ValueError(f"Expected depth images to have dim = 2 or 3 or 4: got shape {depth.shape}") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}") # get image height and width im_height, im_width = depth_batch.shape[1:] # create image points in homogeneous coordinates (3, H x W) indices_u = torch.arange(im_width, device=depth.device, dtype=depth.dtype) indices_v = torch.arange(im_height, device=depth.device, dtype=depth.dtype) img_indices = torch.stack(torch.meshgrid([indices_u, indices_v], indexing="ij"), dim=0).reshape(2, -1) pixels = torch.nn.functional.pad(img_indices, (0, 0, 0, 1), mode="constant", value=1.0) pixels = pixels.unsqueeze(0) # (3, H x W) -> (1, 3, H x W) # unproject points into 3D space points = torch.matmul(torch.inverse(intrinsics_batch), pixels) # (N, 3, H x W) points = points / points[:, -1, :].unsqueeze(1) # normalize by last coordinate # flatten depth image (N, H, W) -> (N, H x W) depth_batch = depth_batch.transpose_(1, 2).reshape(depth_batch.shape[0], -1).unsqueeze(2) depth_batch = depth_batch.expand(-1, -1, 3) # scale points by depth points_xyz = points.transpose_(1, 2) * depth_batch # (N, H x W, 3) # return points in same shape as input if not is_batched: points_xyz = points_xyz.squeeze(0) return points_xyz @torch.jit.script def project_points(points: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tensor: r"""Projects 3D points into 2D image plane. This project 3D points into a 2D image plane. The transformation is defined by the intrinsic matrix of the camera. .. math:: \begin{align} p &= K \times p_{3D} = \\ p_{2D} &= \begin{pmatrix} u \\ v \\ d \end{pmatrix} = \begin{pmatrix} p[0] / p[2] \\ p[1] / p[2] \\ Z \end{pmatrix} \end{align} where :math:`p_{2D} = (u, v, d)` is the projected 3D point, :math:`p_{3D} = (X, Y, Z)` is the 3D point and :math:`K \in \mathbb{R}^{3 \times 3}` is the intrinsic matrix. If `points` is a batch of 3D points and `intrinsics` is a single intrinsic matrix, the same calibration matrix is applied to all points in the batch. Args: points: The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). intrinsics: Camera's calibration matrix. Shape is (3, 3) or (N, 3, 3). Returns: Projected 3D coordinates of points. Shape is (P, 3) or (N, P, 3). """ points_batch = points.clone() intrinsics_batch = intrinsics.clone() # check if inputs are batched is_batched = points_batch.dim() == 2 # make sure inputs are batched if points_batch.dim() == 2: points_batch = points_batch[None] # (P, 3) -> (1, P, 3) if intrinsics_batch.dim() == 2: intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) # check shape of inputs if points_batch.dim() != 3: raise ValueError(f"Expected points to have dim = 3: got shape {points.shape}.") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}.") # project points into 2D image plane points_2d = torch.matmul(intrinsics_batch, points_batch.transpose(1, 2)) points_2d = points_2d / points_2d[:, -1, :].unsqueeze(1) # normalize by last coordinate points_2d = points_2d.transpose_(1, 2) # (N, 3, P) -> (N, P, 3) # replace last coordinate with depth points_2d[:, :, -1] = points_batch[:, :, -1] # return points in same shape as input if not is_batched: points_2d = points_2d.squeeze(0) # (1, 3, P) -> (3, P) return points_2d """ Sampling """ @torch.jit.script def default_orientation(num: int, device: str) -> torch.Tensor: """Returns identity rotation transform. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Identity quaternion in (w, x, y, z). Shape is (num, 4). """ quat = torch.zeros((num, 4), dtype=torch.float, device=device) quat[..., 0] = 1.0 return quat @torch.jit.script def random_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation in 3D as quaternion. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Sampled quaternion in (w, x, y, z). Shape is (num, 4). Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html """ # sample random orientation from normal distribution quat = torch.randn((num, 4), dtype=torch.float, device=device) # normalize the quaternion return torch.nn.functional.normalize(quat, p=2.0, dim=-1, eps=1e-12) @torch.jit.script def random_yaw_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation around z-axis. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Sampled quaternion in (w, x, y, z). Shape is (num, 4). """ roll = torch.zeros(num, dtype=torch.float, device=device) pitch = torch.zeros(num, dtype=torch.float, device=device) yaw = 2 * torch.pi * torch.rand(num, dtype=torch.float, device=device) return quat_from_euler_xyz(roll, pitch, yaw) def sample_triangle(lower: float, upper: float, size: int | tuple[int, ...], device: str) -> torch.Tensor: """Randomly samples tensor from a triangular distribution. Args: lower: The lower range of the sampled tensor. upper: The upper range of the sampled tensor. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`. """ # convert to tuple if isinstance(size, int): size = (size,) # create random tensor in the range [-1, 1] r = 2 * torch.rand(*size, device=device) - 1 # convert to triangular distribution r = torch.where(r < 0.0, -torch.sqrt(-r), torch.sqrt(r)) # rescale back to [0, 1] r = (r + 1.0) / 2.0 # rescale to range [lower, upper] return (upper - lower) * r + lower def sample_uniform( lower: torch.Tensor | float, upper: torch.Tensor | float, size: int | tuple[int, ...], device: str ) -> torch.Tensor: """Sample uniformly within a range. Args: lower: Lower bound of uniform range. upper: Upper bound of uniform range. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`. """ # convert to tuple if isinstance(size, int): size = (size,) # return tensor return torch.rand(*size, device=device) * (upper - lower) + lower def sample_cylinder( radius: float, h_range: tuple[float, float], size: int | tuple[int, ...], device: str ) -> torch.Tensor: """Sample 3D points uniformly on a cylinder's surface. The cylinder is centered at the origin and aligned with the z-axis. The height of the cylinder is sampled uniformly from the range :obj:`h_range`, while the radius is fixed to :obj:`radius`. The sampled points are returned as a tensor of shape :obj:`(*size, 3)`, i.e. the last dimension contains the x, y, and z coordinates of the sampled points. Args: radius: The radius of the cylinder. h_range: The minimum and maximum height of the cylinder. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is :obj:`(*size, 3)`. """ # sample angles angles = (torch.rand(size, device=device) * 2 - 1) * torch.pi h_min, h_max = h_range # add shape if isinstance(size, int): size = (size, 3) else: size += (3,) # allocate a tensor xyz = torch.zeros(size, device=device) xyz[..., 0] = radius * torch.cos(angles) xyz[..., 1] = radius * torch.sin(angles) xyz[..., 2].uniform_(h_min, h_max) # return positions return xyz
44,978
Python
35.598047
118
0.606296
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/dict.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for utilities for working with dictionaries.""" from __future__ import annotations import collections.abc import hashlib import json from collections.abc import Iterable, Mapping from typing import Any from .array import TENSOR_TYPE_CONVERSIONS, TENSOR_TYPES from .string import callable_to_string, string_to_callable """ Dictionary <-> Class operations. """ def class_to_dict(obj: object) -> dict[str, Any]: """Convert an object into dictionary recursively. Note: Ignores all names starting with "__" (i.e. built-in methods). Args: obj: An instance of a class to convert. Raises: ValueError: When input argument is not an object. Returns: Converted dictionary mapping. """ # check that input data is class instance if not hasattr(obj, "__class__"): raise ValueError(f"Expected a class instance. Received: {type(obj)}.") # convert object to dictionary if isinstance(obj, dict): obj_dict = obj else: obj_dict = obj.__dict__ # convert to dictionary data = dict() for key, value in obj_dict.items(): # disregard builtin attributes if key.startswith("__"): continue # check if attribute is callable -- function if callable(value): data[key] = callable_to_string(value) # check if attribute is a dictionary elif hasattr(value, "__dict__") or isinstance(value, dict): data[key] = class_to_dict(value) else: data[key] = value return data def update_class_from_dict(obj, data: dict[str, Any], _ns: str = "") -> None: """Reads a dictionary and sets object variables recursively. This function performs in-place update of the class member attributes. Args: obj: An instance of a class to update. data: Input dictionary to update from. _ns: Namespace of the current object. This is useful for nested configuration classes or dictionaries. Defaults to "". Raises: TypeError: When input is not a dictionary. ValueError: When dictionary has a value that does not match default config type. KeyError: When dictionary has a key that does not exist in the default config type. """ for key, value in data.items(): # key_ns is the full namespace of the key key_ns = _ns + "/" + key # check if key is present in the object if hasattr(obj, key): obj_mem = getattr(obj, key) if isinstance(obj_mem, Mapping): # Note: We don't handle two-level nested dictionaries. Just use configclass if this is needed. # iterate over the dictionary to look for callable values for k, v in obj_mem.items(): if callable(v): value[k] = string_to_callable(value[k]) setattr(obj, key, value) elif isinstance(value, Mapping): # recursively call if it is a dictionary update_class_from_dict(obj_mem, value, _ns=key_ns) elif isinstance(value, Iterable) and not isinstance(value, str): # check length of value to be safe if len(obj_mem) != len(value) and obj_mem is not None: raise ValueError( f"[Config]: Incorrect length under namespace: {key_ns}." f" Expected: {len(obj_mem)}, Received: {len(value)}." ) # set value setattr(obj, key, value) elif callable(obj_mem): # update function name value = string_to_callable(value) setattr(obj, key, value) elif isinstance(value, type(obj_mem)): # check that they are type-safe setattr(obj, key, value) else: raise ValueError( f"[Config]: Incorrect type under namespace: {key_ns}." f" Expected: {type(obj_mem)}, Received: {type(value)}." ) else: raise KeyError(f"[Config]: Key not found under namespace: {key_ns}.") """ Dictionary <-> Hashable operations. """ def dict_to_md5_hash(data: object) -> str: """Convert a dictionary into a hashable key using MD5 hash. Args: data: Input dictionary or configuration object to convert. Returns: A string object of double length containing only hexadecimal digits. """ # convert to dictionary if isinstance(data, dict): encoded_buffer = json.dumps(data, sort_keys=True).encode() else: encoded_buffer = json.dumps(class_to_dict(data), sort_keys=True).encode() # compute hash using MD5 data_hash = hashlib.md5() data_hash.update(encoded_buffer) # return the hash key return data_hash.hexdigest() """ Dictionary operations. """ def convert_dict_to_backend( data: dict, backend: str = "numpy", array_types: Iterable[str] = ("numpy", "torch", "warp") ) -> dict: """Convert all arrays or tensors in a dictionary to a given backend. This function iterates over the dictionary, converts all arrays or tensors with the given types to the desired backend, and stores them in a new dictionary. It also works with nested dictionaries. Currently supported backends are "numpy", "torch", and "warp". Note: This function only converts arrays or tensors. Other types of data are left unchanged. Mutable types (e.g. lists) are referenced by the new dictionary, so they are not copied. Args: data: An input dict containing array or tensor data as values. backend: The backend ("numpy", "torch", "warp") to which arrays in this dict should be converted. Defaults to "numpy". array_types: A list containing the types of arrays that should be converted to the desired backend. Defaults to ("numpy", "torch", "warp"). Raises: ValueError: If the specified ``backend`` or ``array_types`` are unknown, i.e. not in the list of supported backends ("numpy", "torch", "warp"). Returns: The updated dict with the data converted to the desired backend. """ # THINK: Should we also support converting to a specific device, e.g. "cuda:0"? # Check the backend is valid. if backend not in TENSOR_TYPE_CONVERSIONS: raise ValueError(f"Unknown backend '{backend}'. Supported backends are 'numpy', 'torch', and 'warp'.") # Define the conversion functions for each backend. tensor_type_conversions = TENSOR_TYPE_CONVERSIONS[backend] # Parse the array types and convert them to the corresponding types: "numpy" -> np.ndarray, etc. parsed_types = list() for t in array_types: # Check type is valid. if t not in TENSOR_TYPES: raise ValueError(f"Unknown array type: '{t}'. Supported array types are 'numpy', 'torch', and 'warp'.") # Exclude types that match the backend, since we do not need to convert these. if t == backend: continue # Convert the string types to the corresponding types. parsed_types.append(TENSOR_TYPES[t]) # Convert the data to the desired backend. output_dict = dict() for key, value in data.items(): # Obtain the data type of the current value. data_type = type(value) # -- arrays if data_type in parsed_types: # check if we have a known conversion. if data_type not in tensor_type_conversions: raise ValueError(f"No registered conversion for data type: {data_type} to {backend}!") # convert the data to the desired backend. output_dict[key] = tensor_type_conversions[data_type](value) # -- nested dictionaries elif isinstance(data[key], dict): output_dict[key] = convert_dict_to_backend(value) # -- everything else else: output_dict[key] = value return output_dict def update_dict(orig_dict: dict, new_dict: collections.abc.Mapping) -> dict: """Updates existing dictionary with values from a new dictionary. This function mimics the dict.update() function. However, it works for nested dictionaries as well. Reference: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth Args: orig_dict: The original dictionary to insert items to. new_dict: The new dictionary to insert items from. Returns: The updated dictionary. """ for keyname, value in new_dict.items(): if isinstance(value, collections.abc.Mapping): orig_dict[keyname] = update_dict(orig_dict.get(keyname, {}), value) else: orig_dict[keyname] = value return orig_dict def print_dict(val, nesting: int = -4, start: bool = True): """Outputs a nested dictionary.""" if isinstance(val, dict): if not start: print("") nesting += 4 for k in val: print(nesting * " ", end="") print(k, end=": ") print_dict(val[k], nesting, start=False) else: # deal with functions in print statements if callable(val): print(callable_to_string(val)) else: print(val)
9,558
Python
35.624521
115
0.614773
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/configclass.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations """Sub-module that provides a wrapper around the Python 3.7 onwards ``dataclasses`` module.""" import inspect from collections.abc import Callable from copy import deepcopy from dataclasses import MISSING, Field, dataclass, field, replace from typing import Any, ClassVar from .dict import class_to_dict, update_class_from_dict _CONFIGCLASS_METHODS = ["to_dict", "from_dict", "replace", "copy"] """List of class methods added at runtime to dataclass.""" """ Wrapper around dataclass. """ def __dataclass_transform__(): """Add annotations decorator for PyLance.""" return lambda a: a @__dataclass_transform__() def configclass(cls, **kwargs): """Wrapper around `dataclass` functionality to add extra checks and utilities. As of Python 3.7, the standard dataclasses have two main issues which makes them non-generic for configuration use-cases. These include: 1. Requiring a type annotation for all its members. 2. Requiring explicit usage of :meth:`field(default_factory=...)` to reinitialize mutable variables. This function provides a decorator that wraps around Python's `dataclass`_ utility to deal with the above two issues. It also provides additional helper functions for dictionary <-> class conversion and easily copying class instances. Usage: .. code-block:: python from dataclasses import MISSING from omni.isaac.orbit.utils.configclass import configclass @configclass class ViewerCfg: eye: list = [7.5, 7.5, 7.5] # field missing on purpose lookat: list = field(default_factory=[0.0, 0.0, 0.0]) @configclass class EnvCfg: num_envs: int = MISSING episode_length: int = 2000 viewer: ViewerCfg = ViewerCfg() # create configuration instance env_cfg = EnvCfg(num_envs=24) # print information as a dictionary print(env_cfg.to_dict()) # create a copy of the configuration env_cfg_copy = env_cfg.copy() # replace arbitrary fields using keyword arguments env_cfg_copy = env_cfg_copy.replace(num_envs=32) Args: cls: The class to wrap around. **kwargs: Additional arguments to pass to :func:`dataclass`. Returns: The wrapped class. .. _dataclass: https://docs.python.org/3/library/dataclasses.html """ # add type annotations _add_annotation_types(cls) # add field factory _process_mutable_types(cls) # copy mutable members # note: we check if user defined __post_init__ function exists and augment it with our own if hasattr(cls, "__post_init__"): setattr(cls, "__post_init__", _combined_function(cls.__post_init__, _custom_post_init)) else: setattr(cls, "__post_init__", _custom_post_init) # add helper functions for dictionary conversion setattr(cls, "to_dict", _class_to_dict) setattr(cls, "from_dict", _update_class_from_dict) setattr(cls, "replace", _replace_class_with_kwargs) setattr(cls, "copy", _copy_class) # wrap around dataclass cls = dataclass(cls, **kwargs) # return wrapped class return cls """ Dictionary <-> Class operations. These are redefined here to add new docstrings. """ def _class_to_dict(obj: object) -> dict[str, Any]: """Convert an object into dictionary recursively. Returns: Converted dictionary mapping. """ return class_to_dict(obj) def _update_class_from_dict(obj, data: dict[str, Any]) -> None: """Reads a dictionary and sets object variables recursively. This function performs in-place update of the class member attributes. Args: data: Input (nested) dictionary to update from. Raises: TypeError: When input is not a dictionary. ValueError: When dictionary has a value that does not match default config type. KeyError: When dictionary has a key that does not exist in the default config type. """ return update_class_from_dict(obj, data, _ns="") def _replace_class_with_kwargs(obj: object, **kwargs) -> object: """Return a new object replacing specified fields with new values. This is especially useful for frozen classes. Example usage: .. code-block:: python @configclass(frozen=True) class C: x: int y: int c = C(1, 2) c1 = c.replace(x=3) assert c1.x == 3 and c1.y == 2 Args: obj: The object to replace. **kwargs: The fields to replace and their new values. Returns: The new object. """ return replace(obj, **kwargs) def _copy_class(obj: object) -> object: """Return a new object with the same fields as the original.""" return replace(obj) """ Private helper functions. """ def _add_annotation_types(cls): """Add annotations to all elements in the dataclass. By definition in Python, a field is defined as a class variable that has a type annotation. In case type annotations are not provided, dataclass ignores those members when :func:`__dict__()` is called. This function adds these annotations to the class variable to prevent any issues in case the user forgets to specify the type annotation. This makes the following a feasible operation: @dataclass class State: pos = (0.0, 0.0, 0.0) ^^ If the function is NOT used, the following type-error is returned: TypeError: 'pos' is a field but has no type annotation """ # get type hints hints = {} # iterate over class inheritance # we add annotations from base classes first for base in reversed(cls.__mro__): # check if base is object if base is object: continue # get base class annotations ann = base.__dict__.get("__annotations__", {}) # directly add all annotations from base class hints.update(ann) # iterate over base class members # Note: Do not change this to dir(base) since it orders the members alphabetically. # This is not desirable since the order of the members is important in some cases. for key in base.__dict__: # get class member value = getattr(base, key) # skip members if _skippable_class_member(key, value, hints): continue # add type annotations for members that don't have explicit type annotations # for these, we deduce the type from the default value if not isinstance(value, type): if key not in hints: # check if var type is not MISSING # we cannot deduce type from MISSING! if value is MISSING: raise TypeError( f"Missing type annotation for '{key}' in class '{cls.__name__}'." " Please add a type annotation or set a default value." ) # add type annotation hints[key] = type(value) elif key != value.__name__: # note: we don't want to add type annotations for nested configclass. Thus, we check if # the name of the type matches the name of the variable. # since Python 3.10, type hints are stored as strings hints[key] = f"type[{value.__name__}]" # Note: Do not change this line. `cls.__dict__.get("__annotations__", {})` is different from # `cls.__annotations__` because of inheritance. cls.__annotations__ = cls.__dict__.get("__annotations__", {}) cls.__annotations__ = hints def _process_mutable_types(cls): """Initialize all mutable elements through :obj:`dataclasses.Field` to avoid unnecessary complaints. By default, dataclass requires usage of :obj:`field(default_factory=...)` to reinitialize mutable objects every time a new class instance is created. If a member has a mutable type and it is created without specifying the `field(default_factory=...)`, then Python throws an error requiring the usage of `default_factory`. Additionally, Python only explicitly checks for field specification when the type is a list, set or dict. This misses the use-case where the type is class itself. Thus, the code silently carries a bug with it which can lead to undesirable effects. This function deals with this issue This makes the following a feasible operation: @dataclass class State: pos: list = [0.0, 0.0, 0.0] ^^ If the function is NOT used, the following value-error is returned: ValueError: mutable default <class 'list'> for field pos is not allowed: use default_factory """ # note: Need to set this up in the same order as annotations. Otherwise, it # complains about missing positional arguments. ann = cls.__dict__.get("__annotations__", {}) # iterate over all class members and store them in a dictionary class_members = {} for base in reversed(cls.__mro__): # check if base is object if base is object: continue # iterate over base class members for key in base.__dict__: # get class member f = getattr(base, key) # skip members if _skippable_class_member(key, f): continue # store class member if it is not a type or if it is already present in annotations if not isinstance(f, type) or key in ann: class_members[key] = f # iterate over base class data fields # in previous call, things that became a dataclass field were removed from class members # so we need to add them back here as a dataclass field directly for key, f in base.__dict__.get("__dataclass_fields__", {}).items(): # store class member if not isinstance(f, type): class_members[key] = f # check that all annotations are present in class members # note: mainly for debugging purposes if len(class_members) != len(ann): raise ValueError( f"In class '{cls.__name__}', number of annotations ({len(ann)}) does not match number of class members" f" ({len(class_members)}). Please check that all class members have type annotations and/or a default" " value. If you don't want to specify a default value, please use the literal `dataclasses.MISSING`." ) # iterate over annotations and add field factory for mutable types for key in ann: # find matching field in class value = class_members.get(key, MISSING) # check if key belongs to ClassVar # in that case, we cannot use default_factory! origin = getattr(ann[key], "__origin__", None) if origin is ClassVar: continue # check if f is MISSING # note: commented out for now since it causes issue with inheritance # of dataclasses when parent have some positional and some keyword arguments. # Ref: https://stackoverflow.com/questions/51575931/class-inheritance-in-python-3-7-dataclasses # TODO: check if this is fixed in Python 3.10 # if f is MISSING: # continue if isinstance(value, Field): setattr(cls, key, value) elif not isinstance(value, type): # create field factory for mutable types value = field(default_factory=_return_f(value)) setattr(cls, key, value) def _custom_post_init(obj): """Deepcopy all elements to avoid shared memory issues for mutable objects in dataclasses initialization. This function is called explicitly instead of as a part of :func:`_process_mutable_types()` to prevent mapping proxy type i.e. a read only proxy for mapping objects. The error is thrown when using hierarchical data-classes for configuration. """ for key in dir(obj): # skip dunder members if key.startswith("__"): continue # get data member value = getattr(obj, key) # duplicate data members if not callable(value): setattr(obj, key, deepcopy(value)) def _combined_function(f1: Callable, f2: Callable) -> Callable: """Combine two functions into one. Args: f1: The first function. f2: The second function. Returns: The combined function. """ def _combined(*args, **kwargs): # call both functions f1(*args, **kwargs) f2(*args, **kwargs) return _combined """ Helper functions """ def _skippable_class_member(key: str, value: Any, hints: dict | None = None) -> bool: """Check if the class member should be skipped in configclass processing. The following members are skipped: * Dunder members: ``__name__``, ``__module__``, ``__qualname__``, ``__annotations__``, ``__dict__``. * Manually-added special class functions: From :obj:`_CONFIGCLASS_METHODS`. * Members that are already present in the type annotations. * Functions bounded to class object or class. Args: key: The class member name. value: The class member value. hints: The type hints for the class. Defaults to None, in which case, the members existence in type hints are not checked. Returns: True if the class member should be skipped, False otherwise. """ # skip dunder members if key.startswith("__"): return True # skip manually-added special class functions if key in _CONFIGCLASS_METHODS: return True # check if key is already present if hints is not None and key in hints: return True # skip functions bounded to class if callable(value): signature = inspect.signature(value) if "self" in signature.parameters or "cls" in signature.parameters: return True # Otherwise, don't skip return False def _return_f(f: Any) -> Callable[[], Any]: """Returns default factory function for creating mutable/immutable variables. This function should be used to create default factory functions for variables. Example: .. code-block:: python value = field(default_factory=_return_f(value)) setattr(cls, key, value) """ def _wrap(): if isinstance(f, Field): if f.default_factory is MISSING: return deepcopy(f.default) else: return f.default_factory else: return f return _wrap
14,857
Python
34.125295
132
0.631016
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/assets.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module that defines the host-server where assets and resources are stored. By default, we use the Isaac Sim Nucleus Server for hosting assets and resources. This makes distribution of the assets easier and makes the repository smaller in size code-wise. For more information on Omniverse Nucleus: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus """ from __future__ import annotations import io import os import tempfile from typing import Literal import carb import omni.client import omni.isaac.core.utils.nucleus as nucleus_utils # check nucleus connection if nucleus_utils.get_assets_root_path() is None: msg = ( "Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n" "\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus" ) carb.log_error(msg) raise RuntimeError(msg) NVIDIA_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/NVIDIA" """Path to the root directory on the NVIDIA Nucleus Server.""" ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac" """Path to the `Isaac` directory on the NVIDIA Nucleus Server.""" ISAAC_ORBIT_NUCLEUS_DIR = f"{ISAAC_NUCLEUS_DIR}/Samples/Orbit" """Path to the `Isaac/Samples/Orbit` directory on the NVIDIA Nucleus Server.""" def check_file_path(path: str) -> Literal[0, 1, 2]: """Checks if a file exists on the Nucleus Server or locally. Args: path: The path to the file. Returns: The status of the file. Possible values are: * :obj:`0` if the file does not exist * :obj:`1` if the file exists locally * :obj:`2` if the file exists on the Nucleus Server """ if os.path.isfile(path): return 1 elif omni.client.stat(path)[0] == omni.client.Result.OK: return 2 else: return 0 def retrieve_file_path(path: str, download_dir: str | None = None, force_download: bool = True) -> str: """Retrieves the path to a file on the Nucleus Server or locally. If the file exists locally, then the absolute path to the file is returned. If the file exists on the Nucleus Server, then the file is downloaded to the local machine and the absolute path to the file is returned. Args: path: The path to the file. download_dir: The directory where the file should be downloaded. Defaults to None, in which case the file is downloaded to the system's temporary directory. force_download: Whether to force download the file from the Nucleus Server. This will overwrite the local file if it exists. Defaults to True. Returns: The path to the file on the local machine. Raises: FileNotFoundError: When the file not found locally or on Nucleus Server. RuntimeError: When the file cannot be copied from the Nucleus Server to the local machine. This can happen when the file already exists locally and :attr:`force_download` is set to False. """ # check file status file_status = check_file_path(path) if file_status == 1: return os.path.abspath(path) elif file_status == 2: # resolve download directory if download_dir is None: download_dir = tempfile.gettempdir() else: download_dir = os.path.abspath(download_dir) # create download directory if it does not exist if not os.path.exists(download_dir): os.makedirs(download_dir) # download file in temp directory using os file_name = os.path.basename(omni.client.break_url(path).path) target_path = os.path.join(download_dir, file_name) # check if file already exists locally if not os.path.isfile(target_path) or force_download: # copy file to local machine result = omni.client.copy(path, target_path) if result != omni.client.Result.OK and force_download: raise RuntimeError(f"Unable to copy file: '{path}'. Is the Nucleus Server running?") return os.path.abspath(target_path) else: raise FileNotFoundError(f"Unable to find the file: {path}") def read_file(path: str) -> io.BytesIO: """Reads a file from the Nucleus Server or locally. Args: path: The path to the file. Raises: FileNotFoundError: When the file not found locally or on Nucleus Server. Returns: The content of the file. """ # check file status file_status = check_file_path(path) if file_status == 1: with open(path, "rb") as f: return io.BytesIO(f.read()) elif file_status == 2: file_content = omni.client.read_file(path)[2] return io.BytesIO(memoryview(file_content).tobytes()) else: raise FileNotFoundError(f"Unable to find the file: {path}")
5,040
Python
35.79562
117
0.672222
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/io/yaml.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Utilities for file I/O with yaml.""" import os import yaml from omni.isaac.orbit.utils import class_to_dict def load_yaml(filename: str) -> dict: """Loads an input PKL file safely. Args: filename: The path to pickled file. Raises: FileNotFoundError: When the specified file does not exist. Returns: The data read from the input file. """ if not os.path.exists(filename): raise FileNotFoundError(f"File not found: {filename}") with open(filename) as f: data = yaml.full_load(f) return data def dump_yaml(filename: str, data: dict | object, sort_keys: bool = False): """Saves data into a YAML file safely. Note: The function creates any missing directory along the file's path. Args: filename: The path to save the file at. data: The data to save either a dictionary or class object. sort_keys: Whether to sort the keys in the output file. Defaults to False. """ # check ending if not filename.endswith("yaml"): filename += ".yaml" # create directory if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename), exist_ok=True) # convert data into dictionary if not isinstance(data, dict): data = class_to_dict(data) # save data with open(filename, "w") as f: yaml.dump(data, f, default_flow_style=False, sort_keys=sort_keys)
1,572
Python
27.089285
82
0.653308
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/io/pkl.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Utilities for file I/O with pickle.""" import os import pickle from typing import Any def load_pickle(filename: str) -> Any: """Loads an input PKL file safely. Args: filename: The path to pickled file. Raises: FileNotFoundError: When the specified file does not exist. Returns: The data read from the input file. """ if not os.path.exists(filename): raise FileNotFoundError(f"File not found: {filename}") with open(filename, "rb") as f: data = pickle.load(f) return data def dump_pickle(filename: str, data: Any): """Saves data into a pickle file safely. Note: The function creates any missing directory along the file's path. Args: filename: The path to save the file at. data: The data to save. """ # check ending if not filename.endswith("pkl"): filename += ".pkl" # create directory if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename), exist_ok=True) # save data with open(filename, "wb") as f: pickle.dump(data, f)
1,252
Python
23.568627
73
0.638978
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/warp/kernels.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Custom kernels for warp.""" import warp as wp @wp.kernel def raycast_mesh_kernel( mesh: wp.uint64, ray_starts: wp.array(dtype=wp.vec3), ray_directions: wp.array(dtype=wp.vec3), ray_hits: wp.array(dtype=wp.vec3), ray_distance: wp.array(dtype=wp.float32), ray_normal: wp.array(dtype=wp.vec3), ray_face_id: wp.array(dtype=wp.int32), max_dist: float = 1e6, return_distance: int = False, return_normal: int = False, return_face_id: int = False, ): """Performs ray-casting against a mesh. This function performs ray-casting against the given mesh using the provided ray start positions and directions. The resulting ray hit positions are stored in the :obj:`ray_hits` array. Note that the `ray_starts`, `ray_directions`, and `ray_hits` arrays should have compatible shapes and data types to ensure proper execution. Additionally, they all must be in the same frame. The function utilizes the `mesh_query_ray` method from the `wp` module to perform the actual ray-casting operation. The maximum ray-cast distance is set to `1e6` units. Args: mesh: The input mesh. The ray-casting is performed against this mesh on the device specified by the `mesh`'s `device` attribute. ray_starts: The input ray start positions. Shape is (N, 3). ray_directions: The input ray directions. Shape is (N, 3). ray_hits: The output ray hit positions. Shape is (N, 3). ray_distance: The output ray hit distances. Shape is (N,), if `return_distance` is True. Otherwise, this array is not used. ray_normal: The output ray hit normals. Shape is (N, 3), if `return_normal` is True. Otherwise, this array is not used. ray_face_id: The output ray hit face ids. Shape is (N,), if `return_face_id` is True. Otherwise, this array is not used. max_dist: The maximum ray-cast distance. Defaults to 1e6. return_distance: Whether to return the ray hit distances. Defaults to False. return_normal: Whether to return the ray hit normals. Defaults to False`. return_face_id: Whether to return the ray hit face ids. Defaults to False. """ # get the thread id tid = wp.tid() t = float(0.0) # hit distance along ray u = float(0.0) # hit face barycentric u v = float(0.0) # hit face barycentric v sign = float(0.0) # hit face sign n = wp.vec3() # hit face normal f = int(0) # hit face index # ray cast against the mesh and store the hit position hit_success = wp.mesh_query_ray(mesh, ray_starts[tid], ray_directions[tid], max_dist, t, u, v, sign, n, f) # if the ray hit, store the hit data if hit_success: ray_hits[tid] = ray_starts[tid] + t * ray_directions[tid] if return_distance == 1: ray_distance[tid] = t if return_normal == 1: ray_normal[tid] = n if return_face_id == 1: ray_face_id[tid] = f
3,123
Python
41.216216
110
0.651297
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/warp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing operations based on warp.""" from .ops import convert_to_warp_mesh, raycast_mesh
230
Python
24.666664
56
0.747826
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/warp/ops.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Wrapping around warp kernels for compatibility with torch tensors.""" from __future__ import annotations import numpy as np import torch import warp as wp from . import kernels def raycast_mesh( ray_starts: torch.Tensor, ray_directions: torch.Tensor, mesh: wp.Mesh, max_dist: float = 1e6, return_distance: bool = False, return_normal: bool = False, return_face_id: bool = False, ) -> tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None, torch.Tensor | None]: """Performs ray-casting against a mesh. Note that the `ray_starts` and `ray_directions`, and `ray_hits` should have compatible shapes and data types to ensure proper execution. Additionally, they all must be in the same frame. Args: ray_starts: The starting position of the rays. Shape (N, 3). ray_directions: The ray directions for each ray. Shape (N, 3). mesh: The warp mesh to ray-cast against. max_dist: The maximum distance to ray-cast. Defaults to 1e6. return_distance: Whether to return the distance of the ray until it hits the mesh. Defaults to False. return_normal: Whether to return the normal of the mesh face the ray hits. Defaults to False. return_face_id: Whether to return the face id of the mesh face the ray hits. Defaults to False. Returns: The ray hit position. Shape (N, 3). The returned tensor contains :obj:`float('inf')` for missed hits. The ray hit distance. Shape (N,). Will only return if :attr:`return_distance` is True, else returns None. The returned tensor contains :obj:`float('inf')` for missed hits. The ray hit normal. Shape (N, 3). Will only return if :attr:`return_normal` is True else returns None. The returned tensor contains :obj:`float('inf')` for missed hits. The ray hit face id. Shape (N,). Will only return if :attr:`return_face_id` is True else returns None. The returned tensor contains :obj:`int(-1)` for missed hits. """ # extract device and shape information shape = ray_starts.shape device = ray_starts.device # device of the mesh torch_device = wp.device_to_torch(mesh.device) # reshape the tensors ray_starts = ray_starts.to(torch_device).view(-1, 3).contiguous() ray_directions = ray_directions.to(torch_device).view(-1, 3).contiguous() num_rays = ray_starts.shape[0] # create output tensor for the ray hits ray_hits = torch.full((num_rays, 3), float("inf"), device=torch_device).contiguous() # map the memory to warp arrays ray_starts_wp = wp.from_torch(ray_starts, dtype=wp.vec3) ray_directions_wp = wp.from_torch(ray_directions, dtype=wp.vec3) ray_hits_wp = wp.from_torch(ray_hits, dtype=wp.vec3) if return_distance: ray_distance = torch.full((num_rays,), float("inf"), device=torch_device).contiguous() ray_distance_wp = wp.from_torch(ray_distance, dtype=wp.float32) else: ray_distance = None ray_distance_wp = wp.empty((1,), dtype=wp.float32, device=torch_device) if return_normal: ray_normal = torch.full((num_rays, 3), float("inf"), device=torch_device).contiguous() ray_normal_wp = wp.from_torch(ray_normal, dtype=wp.vec3) else: ray_normal = None ray_normal_wp = wp.empty((1,), dtype=wp.vec3, device=torch_device) if return_face_id: ray_face_id = torch.ones((num_rays,), dtype=torch.int32, device=torch_device).contiguous() * (-1) ray_face_id_wp = wp.from_torch(ray_face_id, dtype=wp.int32) else: ray_face_id = None ray_face_id_wp = wp.empty((1,), dtype=wp.int32, device=torch_device) # launch the warp kernel wp.launch( kernel=kernels.raycast_mesh_kernel, dim=num_rays, inputs=[ mesh.id, ray_starts_wp, ray_directions_wp, ray_hits_wp, ray_distance_wp, ray_normal_wp, ray_face_id_wp, float(max_dist), int(return_distance), int(return_normal), int(return_face_id), ], device=mesh.device, ) # NOTE: Synchronize is not needed anymore, but we keep it for now. Check with @dhoeller. wp.synchronize() if return_distance: ray_distance = ray_distance.to(device).view(shape[0], shape[1]) if return_normal: ray_normal = ray_normal.to(device).view(shape) if return_face_id: ray_face_id = ray_face_id.to(device).view(shape[0], shape[1]) return ray_hits.to(device).view(shape), ray_distance, ray_normal, ray_face_id def convert_to_warp_mesh(points: np.ndarray, indices: np.ndarray, device: str) -> wp.Mesh: """Create a warp mesh object with a mesh defined from vertices and triangles. Args: points: The vertices of the mesh. Shape is (N, 3), where N is the number of vertices. indices: The triangles of the mesh as references to vertices for each triangle. Shape is (M, 3), where M is the number of triangles / faces. device: The device to use for the mesh. Returns: The warp mesh object. """ return wp.Mesh( points=wp.array(points.astype(np.float32), dtype=wp.vec3, device=device), indices=wp.array(indices.astype(np.int32).flatten(), dtype=wp.int32, device=device), )
5,554
Python
38.678571
109
0.64152
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/noise/noise_model.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from typing import TYPE_CHECKING if TYPE_CHECKING: from . import noise_cfg def constant_bias_noise(data: torch.Tensor, cfg: noise_cfg.ConstantBiasNoiseCfg) -> torch.Tensor: """Add a constant noise.""" return data + cfg.bias def additive_uniform_noise(data: torch.Tensor, cfg: noise_cfg.UniformNoiseCfg) -> torch.Tensor: """Adds a noise sampled from a uniform distribution.""" return data + torch.rand_like(data) * (cfg.n_max - cfg.n_min) + cfg.n_min def additive_gaussian_noise(data: torch.Tensor, cfg: noise_cfg.GaussianNoiseCfg) -> torch.Tensor: """Adds a noise sampled from a gaussian distribution.""" return data + cfg.mean + cfg.std * torch.randn_like(data)
870
Python
30.107142
97
0.717241
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/noise/noise_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from collections.abc import Callable from dataclasses import MISSING from omni.isaac.orbit.utils import configclass from . import noise_model @configclass class NoiseCfg: """Base configuration for a noise term.""" func: Callable[[torch.Tensor, NoiseCfg], torch.Tensor] = MISSING """The function to be called for applying the noise. Note: The shape of the input and output tensors must be the same. """ @configclass class AdditiveUniformNoiseCfg(NoiseCfg): """Configuration for a additive uniform noise term.""" func = noise_model.additive_uniform_noise n_min: float = -1.0 """The minimum value of the noise. Defaults to -1.0.""" n_max: float = 1.0 """The maximum value of the noise. Defaults to 1.0.""" @configclass class AdditiveGaussianNoiseCfg(NoiseCfg): """Configuration for a additive gaussian noise term.""" func = noise_model.additive_gaussian_noise mean: float = 0.0 """The mean of the noise. Defaults to 0.0.""" std: float = 1.0 """The standard deviation of the noise. Defaults to 1.0.""" @configclass class ConstantBiasNoiseCfg(NoiseCfg): """Configuration for a constant bias noise term.""" func = noise_model.constant_bias_noise bias: float = 0.0 """The bias to add. Defaults to 0.0."""
1,480
Python
23.278688
68
0.693243
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/noise/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module containing different noise models implementations. The noise models are implemented as functions that take in a tensor and a configuration and return a tensor with the noise applied. These functions are then used in the :class:`NoiseCfg` configuration class. Usage: .. code-block:: python import torch from omni.isaac.orbit.utils.noise import AdditiveGaussianNoiseCfg # create a random tensor my_tensor = torch.rand(128, 128, device="cuda") # create a noise configuration cfg = AdditiveGaussianNoiseCfg(mean=0.0, std=1.0) # apply the noise my_noisified_tensor = cfg.func(my_tensor, cfg) """ from .noise_cfg import NoiseCfg # noqa: F401 from .noise_cfg import AdditiveGaussianNoiseCfg, AdditiveUniformNoiseCfg, ConstantBiasNoiseCfg
909
Python
29.333332
107
0.753575
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/asset_base_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from typing import Literal from omni.isaac.orbit.sim import SpawnerCfg from omni.isaac.orbit.utils import configclass from .asset_base import AssetBase @configclass class AssetBaseCfg: """The base configuration class for an asset's parameters. Please see the :class:`AssetBase` class for more information on the asset class. """ @configclass class InitialStateCfg: """Initial state of the asset. This defines the default initial state of the asset when it is spawned into the simulation, as well as the default state when the simulation is reset. After parsing the initial state, the asset class stores this information in the :attr:`data` attribute of the asset class. This can then be accessed by the user to modify the state of the asset during the simulation, for example, at resets. """ # root position pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Position of the root in simulation world frame. Defaults to (0.0, 0.0, 0.0).""" rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) """Quaternion rotation (w, x, y, z) of the root in simulation world frame. Defaults to (1.0, 0.0, 0.0, 0.0). """ class_type: type[AssetBase] = MISSING """The associated asset class. The class should inherit from :class:`omni.isaac.orbit.assets.asset_base.AssetBase`. """ prim_path: str = MISSING """Prim path (or expression) to the asset. .. note:: The expression can contain the environment namespace regex ``{ENV_REGEX_NS}`` which will be replaced with the environment namespace. Example: ``{ENV_REGEX_NS}/Robot`` will be replaced with ``/World/envs/env_.*/Robot``. """ spawn: SpawnerCfg | None = None """Spawn configuration for the asset. Defaults to None. If None, then no prims are spawned by the asset class. Instead, it is assumed that the asset is already present in the scene. """ init_state: InitialStateCfg = InitialStateCfg() """Initial state of the rigid object. Defaults to identity pose.""" collision_group: Literal[0, -1] = 0 """Collision group of the asset. Defaults to ``0``. * ``-1``: global collision group (collides with all assets in the scene). * ``0``: local collision group (collides with other assets in the same environment). """ debug_vis: bool = False """Whether to enable debug visualization for the asset. Defaults to ``False``."""
2,721
Python
33.455696
108
0.668872
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package for different assets, such as rigid objects and articulations. An asset is a physical object that can be spawned in the simulation. The class handles both the spawning of the asset into the USD stage as well as initialization of necessary physics handles to interact with the asset. Upon construction of the asset instance, the prim corresponding to the asset is spawned into the USD stage if the spawn configuration is not None. The spawn configuration is defined in the :attr:`AssetBaseCfg.spawn` attribute. In case the configured :attr:`AssetBaseCfg.prim_path` is an expression, then the prim is spawned at all the matching paths. Otherwise, a single prim is spawned at the configured path. For more information on the spawn configuration, see the :mod:`omni.isaac.orbit.sim.spawners` module. The asset class also registers callbacks for the stage play/stop events. These are used to construct the physics handles for the asset as the physics engine is only available when the stage is playing. Additionally, the class registers a callback for debug visualization of the asset. This can be enabled by setting the :attr:`AssetBaseCfg.debug_vis` attribute to True. The asset class follows the following naming convention for its methods: * **set_xxx()**: These are used to only set the buffers into the :attr:`data` instance. However, they do not write the data into the simulator. The writing of data only happens when the :meth:`write_data_to_sim` method is called. * **write_xxx_to_sim()**: These are used to set the buffers into the :attr:`data` instance and write the corresponding data into the simulator as well. * **update(dt)**: These are used to update the buffers in the :attr:`data` instance. This should be called after a simulation step is performed. The main reason to separate the ``set`` and ``write`` operations is to provide flexibility to the user when they need to perform a post-processing operation of the buffers before applying them into the simulator. A common example for this is dealing with explicit actuator models where the specified joint targets are not directly applied to the simulator but are instead used to compute the corresponding actuator torques. """ from .articulation import Articulation, ArticulationCfg, ArticulationData from .asset_base import AssetBase from .asset_base_cfg import AssetBaseCfg from .rigid_object import RigidObject, RigidObjectCfg, RigidObjectData
2,567
Python
56.066665
101
0.791196
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/asset_base.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import inspect import re import weakref from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any import omni.kit.app import omni.timeline import omni.isaac.orbit.sim as sim_utils if TYPE_CHECKING: from .asset_base_cfg import AssetBaseCfg class AssetBase(ABC): """The base interface class for assets. An asset corresponds to any physics-enabled object that can be spawned in the simulation. These include rigid objects, articulated objects, deformable objects etc. The core functionality of an asset is to provide a set of buffers that can be used to interact with the simulator. The buffers are updated by the asset class and can be written into the simulator using the their respective ``write`` methods. This allows a convenient way to perform post-processing operations on the buffers before writing them into the simulator and obtaining the corresponding simulation results. The class handles both the spawning of the asset into the USD stage as well as initialization of necessary physics handles to interact with the asset. Upon construction of the asset instance, the prim corresponding to the asset is spawned into the USD stage if the spawn configuration is not None. The spawn configuration is defined in the :attr:`AssetBaseCfg.spawn` attribute. In case the configured :attr:`AssetBaseCfg.prim_path` is an expression, then the prim is spawned at all the matching paths. Otherwise, a single prim is spawned at the configured path. For more information on the spawn configuration, see the :mod:`omni.isaac.orbit.sim.spawners` module. Unlike Isaac Sim interface, where one usually needs to call the :meth:`omni.isaac.core.prims.XFormPrimView.initialize` method to initialize the PhysX handles, the asset class automatically initializes and invalidates the PhysX handles when the stage is played/stopped. This is done by registering callbacks for the stage play/stop events. Additionally, the class registers a callback for debug visualization of the asset if a debug visualization is implemented in the asset class. This can be enabled by setting the :attr:`AssetBaseCfg.debug_vis` attribute to True. The debug visualization is implemented through the :meth:`_set_debug_vis_impl` and :meth:`_debug_vis_callback` methods. """ def __init__(self, cfg: AssetBaseCfg): """Initialize the asset base. Args: cfg: The configuration class for the asset. Raises: RuntimeError: If no prims found at input prim path or prim path expression. """ # store inputs self.cfg = cfg # flag for whether the asset is initialized self._is_initialized = False # check if base asset path is valid # note: currently the spawner does not work if there is a regex pattern in the leaf # For example, if the prim path is "/World/Robot_[1,2]" since the spawner will not # know which prim to spawn. This is a limitation of the spawner and not the asset. asset_path = self.cfg.prim_path.split("/")[-1] asset_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", asset_path) is None # spawn the asset if self.cfg.spawn is not None and not asset_path_is_regex: self.cfg.spawn.func( self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.init_state.pos, orientation=self.cfg.init_state.rot, ) # check that spawn was successful matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path) if len(matching_prims) == 0: raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.") # note: Use weakref on all callbacks to ensure that this object can be deleted when its destructor is called. # add callbacks for stage play/stop # The order is set to 10 which is arbitrary but should be lower priority than the default order of 0 timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.PLAY), lambda event, obj=weakref.proxy(self): obj._initialize_callback(event), order=10, ) self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( int(omni.timeline.TimelineEventType.STOP), lambda event, obj=weakref.proxy(self): obj._invalidate_initialize_callback(event), order=10, ) # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) self._debug_vis_handle = None # set initial state of debug visualization self.set_debug_vis(self.cfg.debug_vis) def __del__(self): """Unsubscribe from the callbacks.""" # clear physics events handles if self._initialize_handle: self._initialize_handle.unsubscribe() self._initialize_handle = None if self._invalidate_initialize_handle: self._invalidate_initialize_handle.unsubscribe() self._invalidate_initialize_handle = None # clear debug visualization if self._debug_vis_handle: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None """ Properties """ @property @abstractmethod def num_instances(self) -> int: """Number of instances of the asset. This is equal to the number of asset instances per environment multiplied by the number of environments. """ return NotImplementedError @property def device(self) -> str: """Memory device for computation.""" return self._device @property @abstractmethod def data(self) -> Any: """Data related to the asset.""" return NotImplementedError @property def has_debug_vis_implementation(self) -> bool: """Whether the asset has a debug visualization implemented.""" # check if function raises NotImplementedError source_code = inspect.getsource(self._set_debug_vis_impl) return "NotImplementedError" not in source_code """ Operations. """ def set_debug_vis(self, debug_vis: bool) -> bool: """Sets whether to visualize the asset data. Args: debug_vis: Whether to visualize the asset data. Returns: Whether the debug visualization was successfully set. False if the asset does not support debug visualization. """ # check if debug visualization is supported if not self.has_debug_vis_implementation: return False # toggle debug visualization objects self._set_debug_vis_impl(debug_vis) # toggle debug visualization handles if debug_vis: # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: app_interface = omni.kit.app.get_app_interface() self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) ) else: # remove the subscriber if it exists if self._debug_vis_handle is not None: self._debug_vis_handle.unsubscribe() self._debug_vis_handle = None # return success return True @abstractmethod def reset(self, env_ids: Sequence[int] | None = None): """Resets all internal buffers of selected environments. Args: env_ids: The indices of the object to reset. Defaults to None (all instances). """ raise NotImplementedError @abstractmethod def write_data_to_sim(self): """Writes data to the simulator.""" raise NotImplementedError @abstractmethod def update(self, dt: float): """Update the internal buffers. The time step ``dt`` is used to compute numerical derivatives of quantities such as joint accelerations which are not provided by the simulator. Args: dt: The amount of time passed from last ``update`` call. """ raise NotImplementedError """ Implementation specific. """ @abstractmethod def _initialize_impl(self): """Initializes the PhysX handles and internal buffers.""" raise NotImplementedError def _set_debug_vis_impl(self, debug_vis: bool): """Set debug visualization into visualization objects. This function is responsible for creating the visualization objects if they don't exist and input ``debug_vis`` is True. If the visualization objects exist, the function should set their visibility into the stage. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") def _debug_vis_callback(self, event): """Callback for debug visualization. This function calls the visualization objects and sets the data to visualize into them. """ raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") """ Internal simulation callbacks. """ def _initialize_callback(self, event): """Initializes the scene elements. Note: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ if not self._is_initialized: # obtain simulation related information sim = sim_utils.SimulationContext.instance() if sim is None: raise RuntimeError("SimulationContext is not initialized! Please initialize SimulationContext first.") self._backend = sim.backend self._device = sim.device # initialize the asset self._initialize_impl() # set flag self._is_initialized = True def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" self._is_initialized = False
10,684
Python
39.782443
118
0.659023
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/rigid_object/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for rigid object assets.""" from .rigid_object import RigidObject from .rigid_object_cfg import RigidObjectCfg from .rigid_object_data import RigidObjectData
296
Python
25.999998
56
0.777027
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/rigid_object/rigid_object_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from omni.isaac.orbit.utils import configclass from ..asset_base_cfg import AssetBaseCfg from .rigid_object import RigidObject @configclass class RigidObjectCfg(AssetBaseCfg): """Configuration parameters for a rigid object.""" @configclass class InitialStateCfg(AssetBaseCfg.InitialStateCfg): """Initial state of the rigid body.""" lin_vel: tuple[float, float, float] = (0.0, 0.0, 0.0) """Linear velocity of the root in simulation world frame. Defaults to (0.0, 0.0, 0.0).""" ang_vel: tuple[float, float, float] = (0.0, 0.0, 0.0) """Angular velocity of the root in simulation world frame. Defaults to (0.0, 0.0, 0.0).""" ## # Initialize configurations. ## class_type: type = RigidObject init_state: InitialStateCfg = InitialStateCfg() """Initial state of the rigid object. Defaults to identity pose with zero velocity."""
1,065
Python
29.457142
98
0.683568
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/rigid_object/rigid_object_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch from dataclasses import dataclass @dataclass class RigidObjectData: """Data container for a rigid object.""" ## # Properties. ## body_names: list[str] = None """Body names in the order parsed by the simulation view.""" ## # Default states. ## default_root_state: torch.Tensor = None """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13).""" ## # Frame states. ## root_state_w: torch.Tensor = None """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13).""" root_vel_b: torch.Tensor = None """Root velocity `[lin_vel, ang_vel]` in base frame. Shape is (num_instances, 6).""" projected_gravity_b: torch.Tensor = None """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" heading_w: torch.Tensor = None """Yaw heading of the base frame (in radians). Shape is (num_instances,). Note: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ body_state_w: torch.Tensor = None """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. Shape is (num_instances, num_bodies, 13).""" body_acc_w: torch.Tensor = None """Acceleration of all bodies. Shape is (num_instances, num_bodies, 6). Note: This quantity is computed based on the rigid body state from the last step. """ """ Properties """ @property def root_pos_w(self) -> torch.Tensor: """Root position in simulation world frame. Shape is (num_instances, 3).""" return self.root_state_w[:, :3] @property def root_quat_w(self) -> torch.Tensor: """Root orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4).""" return self.root_state_w[:, 3:7] @property def root_vel_w(self) -> torch.Tensor: """Root velocity in simulation world frame. Shape is (num_instances, 6).""" return self.root_state_w[:, 7:13] @property def root_lin_vel_w(self) -> torch.Tensor: """Root linear velocity in simulation world frame. Shape is (num_instances, 3).""" return self.root_state_w[:, 7:10] @property def root_ang_vel_w(self) -> torch.Tensor: """Root angular velocity in simulation world frame. Shape is (num_instances, 3).""" return self.root_state_w[:, 10:13] @property def root_lin_vel_b(self) -> torch.Tensor: """Root linear velocity in base frame. Shape is (num_instances, 3).""" return self.root_vel_b[:, 0:3] @property def root_ang_vel_b(self) -> torch.Tensor: """Root angular velocity in base world frame. Shape is (num_instances, 3).""" return self.root_vel_b[:, 3:6] @property def body_pos_w(self) -> torch.Tensor: """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3).""" return self.body_state_w[..., :3] @property def body_quat_w(self) -> torch.Tensor: """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4).""" return self.body_state_w[..., 3:7] @property def body_vel_w(self) -> torch.Tensor: """Velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 6).""" return self.body_state_w[..., 7:13] @property def body_lin_vel_w(self) -> torch.Tensor: """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3).""" return self.body_state_w[..., 7:10] @property def body_ang_vel_w(self) -> torch.Tensor: """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3).""" return self.body_state_w[..., 10:13] @property def body_lin_acc_w(self) -> torch.Tensor: """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3).""" return self.body_acc_w[..., 0:3] @property def body_ang_acc_w(self) -> torch.Tensor: """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3).""" return self.body_acc_w[..., 3:6]
4,594
Python
33.037037
120
0.622987
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/rigid_object/rigid_object.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import torch import warnings from collections.abc import Sequence from typing import TYPE_CHECKING import carb import omni.physics.tensors.impl.api as physx from pxr import UsdPhysics import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils import omni.isaac.orbit.utils.string as string_utils from ..asset_base import AssetBase from .rigid_object_data import RigidObjectData if TYPE_CHECKING: from .rigid_object_cfg import RigidObjectCfg class RigidObject(AssetBase): """A rigid object asset class. Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ applied to it. This API is used to define the simulation properties of the rigid body. On playing the simulation, the physics engine will automatically register the rigid body and create a corresponding rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. .. note:: For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view class API. Similar to Orbit, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes in Isaac Sim perform additional USD-related operations which are slow and also not required. .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html """ cfg: RigidObjectCfg """Configuration instance for the rigid object.""" def __init__(self, cfg: RigidObjectCfg): """Initialize the rigid object. Args: cfg: A configuration instance. """ super().__init__(cfg) # container for data access self._data = RigidObjectData() """ Properties """ @property def data(self) -> RigidObjectData: return self._data @property def num_instances(self) -> int: return self.root_physx_view.count @property def num_bodies(self) -> int: """Number of bodies in the asset.""" return 1 @property def body_names(self) -> list[str]: """Ordered names of bodies in articulation.""" prim_paths = self.root_physx_view.prim_paths[: self.num_bodies] return [path.split("/")[-1] for path in prim_paths] @property def root_physx_view(self) -> physx.RigidBodyView: """Rigid body view for the asset (PhysX). Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_physx_view @property def body_physx_view(self) -> physx.RigidBodyView: """Rigid body view for the asset (PhysX). .. deprecated:: v0.3.0 The attribute 'body_physx_view' will be removed in v0.4.0. Please use :attr:`root_physx_view` instead. """ dep_msg = "The attribute 'body_physx_view' will be removed in v0.4.0. Please use 'root_physx_view' instead." warnings.warn(dep_msg, DeprecationWarning) carb.log_error(dep_msg) return self.root_physx_view """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None): # resolve all indices if env_ids is None: env_ids = slice(None) # reset external wrench self._external_force_b[env_ids] = 0.0 self._external_torque_b[env_ids] = 0.0 # reset last body vel self._last_body_vel_w[env_ids] = 0.0 def write_data_to_sim(self): """Write external wrench to the simulation. Note: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ # write external wrench if self.has_external_wrench: self.root_physx_view.apply_forces_and_torques_at_position( force_data=self._external_force_b.view(-1, 3), torque_data=self._external_torque_b.view(-1, 3), position_data=None, indices=self._ALL_BODY_INDICES, is_global=False, ) def update(self, dt: float): # -- root-state (note: we roll the quaternion to match the convention used in Isaac Sim -- wxyz) self._data.root_state_w[:, :7] = self.root_physx_view.get_transforms() self._data.root_state_w[:, 3:7] = math_utils.convert_quat(self._data.root_state_w[:, 3:7], to="wxyz") self._data.root_state_w[:, 7:] = self.root_physx_view.get_velocities() # -- body-state (note: for rigid objects, we only have one body so we just copy the root state) self._data.body_state_w[:] = self._data.root_state_w.view(-1, self.num_bodies, 13) # -- update common data self._update_common_data(dt) def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the articulation based on the name keys. Please check the :meth:`omni.isaac.orbit.utils.string_utils.resolve_matching_names` function for more information on the name matching. Args: name_keys: A regular expression or a list of regular expressions to match the body names. preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. Returns: A tuple of lists containing the body indices and names. """ return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) """ Operations - Write to simulation. """ def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): """Set the root state over selected environment indices into the simulation. The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear and angular velocity. All the quantities are in the simulation frame. Args: root_state: Root state in simulation frame. Shape is (len(env_ids), 13). env_ids: Environment indices. If None, then all indices are used. """ # set into simulation self.write_root_pose_to_sim(root_state[:, :7], env_ids=env_ids) self.write_root_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): """Set the root pose over selected environment indices into the simulation. The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). Args: root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). env_ids: Environment indices. If None, then all indices are used. """ # resolve all indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers self._data.root_state_w[env_ids, :7] = root_pose.clone() # convert root quaternion from wxyz to xyzw root_poses_xyzw = self._data.root_state_w[:, :7].clone() root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") # set into simulation self.root_physx_view.set_transforms(root_poses_xyzw, indices=physx_env_ids) def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): """Set the root velocity over selected environment indices into the simulation. Args: root_velocity: Root velocities in simulation frame. Shape is (len(env_ids), 6). env_ids: Environment indices. If None, then all indices are used. """ # resolve all indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers self._data.root_state_w[env_ids, 7:] = root_velocity.clone() # set into simulation self.root_physx_view.set_velocities(self._data.root_state_w[:, 7:], indices=physx_env_ids) """ Operations - Setters. """ def set_external_force_and_torque( self, forces: torch.Tensor, torques: torch.Tensor, body_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): """Set external force and torque to apply on the asset's bodies in their local frame. For many applications, we want to keep the applied external force on rigid bodies constant over a period of time (for instance, during the policy control). This function allows us to store the external force and torque into buffers which are then applied to the simulation at every step. .. caution:: If the function is called with empty forces and torques, then this function disables the application of external wrench to the simulation. .. code-block:: python # example of disabling external wrench asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) .. note:: This function does not apply the external wrench to the simulation. It only fills the buffers with the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function right before the simulation step. Args: forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). """ if forces.any() or torques.any(): self.has_external_wrench = True # resolve all indices # -- env_ids if env_ids is None: env_ids = self._ALL_INDICES elif not isinstance(env_ids, torch.Tensor): env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) # -- body_ids if body_ids is None: body_ids = torch.arange(self.num_bodies, dtype=torch.long, device=self.device) elif isinstance(body_ids, slice): body_ids = torch.arange(self.num_bodies, dtype=torch.long, device=self.device)[body_ids] elif not isinstance(body_ids, torch.Tensor): body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) # note: we need to do this complicated indexing since torch doesn't support multi-indexing # create global body indices from env_ids and env_body_ids # (env_id * total_bodies_per_env) + body_id indices = body_ids.repeat(len(env_ids), 1) + env_ids.unsqueeze(1) * self.num_bodies indices = indices.view(-1) # set into internal buffers # note: these are applied in the write_to_sim function self._external_force_b.flatten(0, 1)[indices] = forces.flatten(0, 1) self._external_torque_b.flatten(0, 1)[indices] = torques.flatten(0, 1) else: self.has_external_wrench = False """ Internal helper. """ def _initialize_impl(self): # create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # obtain the first prim in the regex expression (all others are assumed to be a copy of this) template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) if template_prim is None: raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") template_prim_path = template_prim.GetPath().pathString # find rigid root prims root_prims = sim_utils.get_all_matching_child_prims( template_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI) ) if len(root_prims) != 1: raise RuntimeError( f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." f" Found multiple '{root_prims}' under '{template_prim_path}'." ) # resolve root prim back into regex expression root_prim_path = root_prims[0].GetPath().pathString root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] # -- object view self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_expr.replace(".*", "*")) # log information about the articulation carb.log_info(f"Rigid body initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") carb.log_info(f"Number of instances: {self.num_instances}") carb.log_info(f"Number of bodies: {self.num_bodies}") carb.log_info(f"Body names: {self.body_names}") # create buffers self._create_buffers() # process configuration self._process_cfg() def _create_buffers(self): """Create buffers for storing data.""" # constants self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) self._ALL_BODY_INDICES = torch.arange( self.root_physx_view.count * self.num_bodies, dtype=torch.long, device=self.device ) self.GRAVITY_VEC_W = torch.tensor((0.0, 0.0, -1.0), device=self.device).repeat(self.num_instances, 1) self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self.num_instances, 1) # external forces and torques self.has_external_wrench = False self._external_force_b = torch.zeros((self.num_instances, self.num_bodies, 3), device=self.device) self._external_torque_b = torch.zeros_like(self._external_force_b) # asset data # -- properties self._data.body_names = self.body_names # -- root states self._data.root_state_w = torch.zeros(self.num_instances, 13, device=self.device) self._data.root_state_w[:, 3] = 1.0 # set default quaternion to (1, 0, 0, 0) self._data.default_root_state = torch.zeros_like(self._data.root_state_w) self._data.default_root_state[:, 3] = 1.0 # set default quaternion to (1, 0, 0, 0) # -- body states self._data.body_state_w = torch.zeros(self.num_instances, self.num_bodies, 13, device=self.device) self._data.body_state_w[:, :, 3] = 1.0 # set default quaternion to (1, 0, 0, 0) # -- post-computed self._data.root_vel_b = torch.zeros(self.num_instances, 6, device=self.device) self._data.projected_gravity_b = torch.zeros(self.num_instances, 3, device=self.device) self._data.heading_w = torch.zeros(self.num_instances, device=self.device) self._data.body_acc_w = torch.zeros(self.num_instances, self.num_bodies, 6, device=self.device) # history buffers for quantities # -- used to compute body accelerations numerically self._last_body_vel_w = torch.zeros(self.num_instances, self.num_bodies, 6, device=self.device) def _process_cfg(self): """Post processing of configuration parameters.""" # default state # -- root state # note: we cast to tuple to avoid torch/numpy type mismatch. default_root_state = ( tuple(self.cfg.init_state.pos) + tuple(self.cfg.init_state.rot) + tuple(self.cfg.init_state.lin_vel) + tuple(self.cfg.init_state.ang_vel) ) default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) def _update_common_data(self, dt: float): """Update common quantities related to rigid objects. Note: This has been separated from the update function to allow for the child classes to override the update function without having to worry about updating the common data. """ # -- body acceleration self._data.body_acc_w[:] = (self._data.body_state_w[..., 7:] - self._last_body_vel_w) / dt self._last_body_vel_w[:] = self._data.body_state_w[..., 7:] # -- root state in body frame self._data.root_vel_b[:, 0:3] = math_utils.quat_rotate_inverse( self._data.root_quat_w, self._data.root_lin_vel_w ) self._data.root_vel_b[:, 3:6] = math_utils.quat_rotate_inverse( self._data.root_quat_w, self._data.root_ang_vel_w ) self._data.projected_gravity_b[:] = math_utils.quat_rotate_inverse(self._data.root_quat_w, self.GRAVITY_VEC_W) # -- heading direction of root forward_w = math_utils.quat_apply(self._data.root_quat_w, self.FORWARD_VEC_B) self._data.heading_w[:] = torch.atan2(forward_w[:, 1], forward_w[:, 0]) """ Internal simulation callbacks. """ def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._physics_sim_view = None self._root_physx_view = None
18,412
Python
44.01956
119
0.632305
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/config/cassie.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for Agility robots. The following configurations are available: * :obj:`CASSIE_CFG`: Agility Cassie robot with simple PD controller for the legs Reference: https://github.com/UMich-BipedLab/Cassie_Model/blob/master/urdf/cassie.urdf """ from __future__ import annotations import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR from ..articulation import ArticulationCfg ## # Configuration ## CASSIE_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/Agility/Cassie/cassie.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.9), joint_pos={ "hip_abduction_left": 0.1, "hip_rotation_left": 0.0, "hip_flexion_left": 1.0, "thigh_joint_left": -1.8, "ankle_joint_left": 1.57, "toe_joint_left": -1.57, "hip_abduction_right": -0.1, "hip_rotation_right": 0.0, "hip_flexion_right": 1.0, "thigh_joint_right": -1.8, "ankle_joint_right": 1.57, "toe_joint_right": -1.57, }, joint_vel={".*": 0.0}, ), soft_joint_pos_limit_factor=0.9, actuators={ "legs": ImplicitActuatorCfg( joint_names_expr=["hip_.*", "thigh_.*", "ankle_.*"], effort_limit=200.0, velocity_limit=10.0, stiffness={ "hip_abduction.*": 100.0, "hip_rotation.*": 100.0, "hip_flexion.*": 200.0, "thigh_joint.*": 200.0, "ankle_joint.*": 200.0, }, damping={ "hip_abduction.*": 3.0, "hip_rotation.*": 3.0, "hip_flexion.*": 6.0, "thigh_joint.*": 6.0, "ankle_joint.*": 6.0, }, ), "toes": ImplicitActuatorCfg( joint_names_expr=["toe_.*"], effort_limit=20.0, velocity_limit=10.0, stiffness={ "toe_joint.*": 20.0, }, damping={ "toe_joint.*": 1.0, }, ), }, )
2,980
Python
29.731958
110
0.537919
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/articulation/articulation_data.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import torch from dataclasses import dataclass from ..rigid_object import RigidObjectData @dataclass class ArticulationData(RigidObjectData): """Data container for an articulation.""" ## # Properties. ## joint_names: list[str] = None """Joint names in the order parsed by the simulation view.""" ## # Default states. ## default_joint_pos: torch.Tensor = None """Default joint positions of all joints. Shape is (num_instances, num_joints).""" default_joint_vel: torch.Tensor = None """Default joint velocities of all joints. Shape is (num_instances, num_joints).""" ## # Joint states <- From simulation. ## joint_pos: torch.Tensor = None """Joint positions of all joints. Shape is (num_instances, num_joints).""" joint_vel: torch.Tensor = None """Joint velocities of all joints. Shape is (num_instances, num_joints).""" joint_acc: torch.Tensor = None """Joint acceleration of all joints. Shape is (num_instances, num_joints).""" ## # Joint commands -- Set into simulation. ## joint_pos_target: torch.Tensor = None """Joint position targets commanded by the user. Shape is (num_instances, num_joints). For an implicit actuator model, the targets are directly set into the simulation. For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), which are then set into the simulation. """ joint_vel_target: torch.Tensor = None """Joint velocity targets commanded by the user. Shape is (num_instances, num_joints). For an implicit actuator model, the targets are directly set into the simulation. For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), which are then set into the simulation. """ joint_effort_target: torch.Tensor = None """Joint effort targets commanded by the user. Shape is (num_instances, num_joints). For an implicit actuator model, the targets are directly set into the simulation. For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), which are then set into the simulation. """ joint_stiffness: torch.Tensor = None """Joint stiffness provided to simulation. Shape is (num_instances, num_joints).""" joint_damping: torch.Tensor = None """Joint damping provided to simulation. Shape is (num_instances, num_joints).""" joint_armature: torch.Tensor = None """Joint armature provided to simulation. Shape is (num_instances, num_joints).""" joint_friction: torch.Tensor = None """Joint friction provided to simulation. Shape is (num_instances, num_joints).""" ## # Joint commands -- Explicit actuators. ## computed_torque: torch.Tensor = None """Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints). This quantity is the raw torque output from the actuator mode, before any clipping is applied. It is exposed for users who want to inspect the computations inside the actuator model. For instance, to penalize the learning agent for a difference between the computed and applied torques. Note: The torques are zero for implicit actuator models. """ applied_torque: torch.Tensor = None """Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints). These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the actuator model. Note: The torques are zero for implicit actuator models. """ ## # Other Data. ## soft_joint_pos_limits: torch.Tensor = None """Joint positions limits for all joints. Shape is (num_instances, num_joints, 2).""" soft_joint_vel_limits: torch.Tensor = None """Joint velocity limits for all joints. Shape is (num_instances, num_joints).""" gear_ratio: torch.Tensor = None """Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints)."""
4,271
Python
34.305785
115
0.694919
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/articulation/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-module for rigid articulated assets.""" from .articulation import Articulation from .articulation_cfg import ArticulationCfg from .articulation_data import ArticulationData
304
Python
26.72727
56
0.792763
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/articulation/articulation.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Flag for pyright to ignore type errors in this file. # pyright: reportPrivateUsage=false from __future__ import annotations import torch import warnings from collections.abc import Sequence from prettytable import PrettyTable from typing import TYPE_CHECKING import carb import omni.physics.tensors.impl.api as physx from omni.isaac.core.utils.types import ArticulationActions from pxr import UsdPhysics import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils import omni.isaac.orbit.utils.string as string_utils from omni.isaac.orbit.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator from ..rigid_object import RigidObject from .articulation_data import ArticulationData if TYPE_CHECKING: from .articulation_cfg import ArticulationCfg class Articulation(RigidObject): """An articulation asset class. An articulation is a collection of rigid bodies connected by joints. The joints can be either fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. However, the articulation class has currently been tested with revolute and prismatic joints. The class supports both floating-base and fixed-base articulations. The type of articulation is determined based on the root joint of the articulation. If the root joint is fixed, then the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. For an asset to be considered an articulation, the root prim of the asset must have the `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using the reduced coordinate formulation. On playing the simulation, the physics engine parses the articulation root prim and creates the corresponding articulation in the physics engine. The articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. The articulation class is a subclass of the :class:`RigidObject` class. Therefore, it inherits all the functionality of the rigid object class. In case of an articulation, the :attr:`root_physx_view` attribute corresponds to the articulation root view and can be used to access the articulation related data. The articulation class also provides the functionality to augment the simulation of an articulated system with custom actuator models. These models can either be explicit or implicit, as detailed in the :mod:`omni.isaac.orbit.actuators` module. The actuator models are specified using the :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the corresponding actuator models, when the simulation is played. During the simulation step, the articulation class first applies the actuator models to compute the joint commands based on the user-specified targets. These joint commands are then applied into the simulation. The joint commands can be either position, velocity, or effort commands. As an example, the following snippet shows how this can be used for position commands: .. code-block:: python # an example instance of the articulation class my_articulation = Articulation(cfg) # set joint position targets my_articulation.set_joint_position_target(position) # propagate the actuator models and apply the computed commands into the simulation my_articulation.write_data_to_sim() # step the simulation using the simulation context sim_context.step() # update the articulation state, where dt is the simulation time step my_articulation.update(dt) .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html """ cfg: ArticulationCfg """Configuration instance for the articulations.""" def __init__(self, cfg: ArticulationCfg): """Initialize the articulation. Args: cfg: A configuration instance. """ super().__init__(cfg) # container for data access self._data = ArticulationData() # data for storing actuator group self.actuators: dict[str, ActuatorBase] = dict.fromkeys(self.cfg.actuators.keys()) """ Properties """ @property def data(self) -> ArticulationData: return self._data @property def is_fixed_base(self) -> bool: """Whether the articulation is a fixed-base or floating-base system.""" return self.root_physx_view.shared_metatype.fixed_base @property def num_joints(self) -> int: """Number of joints in articulation.""" return self.root_physx_view.shared_metatype.dof_count @property def num_fixed_tendons(self) -> int: """Number of fixed tendons in articulation.""" return self.root_physx_view.max_fixed_tendons @property def num_bodies(self) -> int: """Number of bodies in articulation.""" return self.root_physx_view.shared_metatype.link_count @property def joint_names(self) -> list[str]: """Ordered names of joints in articulation.""" return self.root_physx_view.shared_metatype.dof_names @property def body_names(self) -> list[str]: """Ordered names of bodies in articulation.""" return self.root_physx_view.shared_metatype.link_names @property def root_physx_view(self) -> physx.ArticulationView: """Articulation view for the asset (PhysX). Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_physx_view @property def body_physx_view(self) -> physx.RigidBodyView: """Rigid body view for the asset (PhysX). .. deprecated:: v0.3.0 In previous versions, this attribute returned the rigid body view over all the links of the articulation. However, this led to confusion with the link ordering as they were not ordered in the same way as the articulation view. Therefore, this attribute will be removed in v0.4.0. Please use the :attr:`root_physx_view` attribute instead. """ dep_msg = "The attribute 'body_physx_view' will be removed in v0.4.0. Please use 'root_physx_view' instead." warnings.warn(dep_msg, DeprecationWarning) carb.log_error(dep_msg) return self._body_physx_view """ Operations. """ def reset(self, env_ids: Sequence[int] | None = None): super().reset(env_ids) # use ellipses object to skip initial indices. if env_ids is None: env_ids = slice(None) # reset actuators for actuator in self.actuators.values(): actuator.reset(env_ids) def write_data_to_sim(self): """Write external wrenches and joint commands to the simulation. If any explicit actuators are present, then the actuator models are used to compute the joint commands. Otherwise, the joint commands are directly set into the simulation. """ # write external wrench if self.has_external_wrench: # apply external forces and torques self._body_physx_view.apply_forces_and_torques_at_position( force_data=self._external_force_body_view_b.view(-1, 3), torque_data=self._external_torque_body_view_b.view(-1, 3), position_data=None, indices=self._ALL_BODY_INDICES, is_global=False, ) # apply actuator models self._apply_actuator_model() # write actions into simulation self.root_physx_view.set_dof_actuation_forces(self._joint_effort_target_sim, self._ALL_INDICES) # position and velocity targets only for implicit actuators if self._has_implicit_actuators: self.root_physx_view.set_dof_position_targets(self._joint_pos_target_sim, self._ALL_INDICES) self.root_physx_view.set_dof_velocity_targets(self._joint_vel_target_sim, self._ALL_INDICES) def update(self, dt: float): # -- root state (note: we roll the quaternion to match the convention used in Isaac Sim -- wxyz) self._data.root_state_w[:, :7] = self.root_physx_view.get_root_transforms() self._data.root_state_w[:, 3:7] = math_utils.convert_quat(self._data.root_state_w[:, 3:7], to="wxyz") self._data.root_state_w[:, 7:] = self.root_physx_view.get_root_velocities() # -- body-state (note: we roll the quaternion to match the convention used in Isaac Sim -- wxyz) self._data.body_state_w[..., :7] = self.root_physx_view.get_link_transforms() self._data.body_state_w[..., 3:7] = math_utils.convert_quat(self._data.body_state_w[..., 3:7], to="wxyz") self._data.body_state_w[..., 7:] = self.root_physx_view.get_link_velocities() # -- joint states self._data.joint_pos[:] = self.root_physx_view.get_dof_positions() self._data.joint_vel[:] = self.root_physx_view.get_dof_velocities() self._data.joint_acc[:] = (self._data.joint_vel - self._previous_joint_vel) / dt # -- update common data # note: these are computed in the base class self._update_common_data(dt) # -- update history buffers self._previous_joint_vel[:] = self._data.joint_vel[:] def find_joints( self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False ) -> tuple[list[int], list[str]]: """Find joints in the articulation based on the name keys. Please see the :func:`omni.isaac.orbit.utils.string.resolve_matching_names` function for more information on the name matching. Args: name_keys: A regular expression or a list of regular expressions to match the joint names. joint_subset: A subset of joints to search for. Defaults to None, which means all joints in the articulation are searched. preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. Returns: A tuple of lists containing the joint indices and names. """ if joint_subset is None: joint_subset = self.joint_names # find joints return string_utils.resolve_matching_names(name_keys, joint_subset, preserve_order) """ Operations - Setters. """ def set_external_force_and_torque( self, forces: torch.Tensor, torques: torch.Tensor, body_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): # call parent to set the external forces and torques into buffers super().set_external_force_and_torque(forces, torques, body_ids, env_ids) # reordering of the external forces and torques to match the body view ordering if self.has_external_wrench: self._external_force_body_view_b = self._external_force_b[:, self._body_view_ordering] self._external_torque_body_view_b = self._external_torque_b[:, self._body_view_ordering] """ Operations - Writers. """ def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): # resolve all indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers self._data.root_state_w[env_ids, :7] = root_pose.clone() # convert root quaternion from wxyz to xyzw root_poses_xyzw = self._data.root_state_w[:, :7].clone() root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") # set into simulation self.root_physx_view.set_root_transforms(root_poses_xyzw, indices=physx_env_ids) def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): # resolve all indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers self._data.root_state_w[env_ids, 7:] = root_velocity.clone() # set into simulation self.root_physx_view.set_root_velocities(self._data.root_state_w[:, 7:], indices=physx_env_ids) def write_joint_state_to_sim( self, position: torch.Tensor, velocity: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | slice | None = None, ): """Write joint positions and velocities to the simulation. Args: position: Joint positions. Shape is (len(env_ids), len(joint_ids)). velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the targets for. Defaults to None (all joints). env_ids: The environment indices to set the targets for. Defaults to None (all environments). """ # resolve indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) # set into internal buffers self._data.joint_pos[env_ids, joint_ids] = position self._data.joint_vel[env_ids, joint_ids] = velocity self._previous_joint_vel[env_ids, joint_ids] = velocity self._data.joint_acc[env_ids, joint_ids] = 0.0 # set into simulation self.root_physx_view.set_dof_positions(self._data.joint_pos, indices=physx_env_ids) self.root_physx_view.set_dof_velocities(self._data.joint_vel, indices=physx_env_ids) def write_joint_stiffness_to_sim( self, stiffness: torch.Tensor | float, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): """Write joint stiffness into the simulation. Args: stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the stiffness for. Defaults to None (all joints). env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). """ # note: This function isn't setting the values for actuator models. (#128) # resolve indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) # set into internal buffers self._data.joint_stiffness[env_ids, joint_ids] = stiffness # set into simulation self.root_physx_view.set_dof_stiffnesses(self._data.joint_stiffness.cpu(), indices=physx_env_ids.cpu()) def write_joint_damping_to_sim( self, damping: torch.Tensor | float, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): """Write joint damping into the simulation. Args: damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the damping for. Defaults to None (all joints). env_ids: The environment indices to set the damping for. Defaults to None (all environments). """ # note: This function isn't setting the values for actuator models. (#128) # resolve indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) # set into internal buffers self._data.joint_damping[env_ids, joint_ids] = damping # set into simulation self.root_physx_view.set_dof_dampings(self._data.joint_damping.cpu(), indices=physx_env_ids.cpu()) def write_joint_effort_limit_to_sim( self, limits: torch.Tensor | float, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): """Write joint effort limits into the simulation. Args: limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). """ # note: This function isn't setting the values for actuator models. (#128) # resolve indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) # move tensor to cpu if needed if isinstance(limits, torch.Tensor): limits = limits.cpu() # set into internal buffers torque_limit_all = self.root_physx_view.get_dof_max_forces() torque_limit_all[env_ids, joint_ids] = limits # set into simulation self.root_physx_view.set_dof_max_forces(torque_limit_all.cpu(), indices=physx_env_ids.cpu()) def write_joint_armature_to_sim( self, armature: torch.Tensor | float, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): """Write joint armature into the simulation. Args: armature: Joint armature. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). """ # resolve indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) # set into internal buffers self._data.joint_armature[env_ids, joint_ids] = armature # set into simulation self.root_physx_view.set_dof_armatures(self._data.joint_armature.cpu(), indices=physx_env_ids.cpu()) def write_joint_friction_to_sim( self, joint_friction: torch.Tensor | float, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): """Write joint friction into the simulation. Args: joint_friction: Joint friction. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). """ # resolve indices physx_env_ids = env_ids if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) # set into internal buffers self._data.joint_friction[env_ids, joint_ids] = joint_friction # set into simulation self.root_physx_view.set_dof_friction_coefficients(self._data.joint_friction.cpu(), indices=physx_env_ids.cpu()) """ Operations - State. """ def set_joint_position_target( self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None ): """Set joint position targets into internal buffers. .. note:: This function does not apply the joint targets to the simulation. It only fills the buffers with the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. Args: target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the targets for. Defaults to None (all joints). env_ids: The environment indices to set the targets for. Defaults to None (all environments). """ # resolve indices if env_ids is None: env_ids = slice(None) if joint_ids is None: joint_ids = slice(None) # set targets self._data.joint_pos_target[env_ids, joint_ids] = target def set_joint_velocity_target( self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None ): """Set joint velocity targets into internal buffers. .. note:: This function does not apply the joint targets to the simulation. It only fills the buffers with the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. Args: target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the targets for. Defaults to None (all joints). env_ids: The environment indices to set the targets for. Defaults to None (all environments). """ # resolve indices if env_ids is None: env_ids = slice(None) if joint_ids is None: joint_ids = slice(None) # set targets self._data.joint_vel_target[env_ids, joint_ids] = target def set_joint_effort_target( self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None ): """Set joint efforts into internal buffers. .. note:: This function does not apply the joint targets to the simulation. It only fills the buffers with the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. Args: target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the targets for. Defaults to None (all joints). env_ids: The environment indices to set the targets for. Defaults to None (all environments). """ # resolve indices if env_ids is None: env_ids = slice(None) if joint_ids is None: joint_ids = slice(None) # set targets self._data.joint_effort_target[env_ids, joint_ids] = target """ Internal helper. """ def _initialize_impl(self): # create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # obtain the first prim in the regex expression (all others are assumed to be a copy of this) template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) if template_prim is None: raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") template_prim_path = template_prim.GetPath().pathString # find articulation root prims root_prims = sim_utils.get_all_matching_child_prims( template_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) ) if len(root_prims) != 1: raise RuntimeError( f"Failed to find a single articulation root when resolving '{self.cfg.prim_path}'." f" Found roots '{root_prims}' under '{template_prim_path}'." ) # resolve articulation root prim back into regex expression root_prim_path = root_prims[0].GetPath().pathString root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] # -- articulation self._root_physx_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) # -- link views # note: we use the root view to get the body names, but we use the body view to get the # actual data. This is mainly needed to apply external forces to the bodies. physx_body_names = self.root_physx_view.shared_metatype.link_names body_names_regex = r"(" + "|".join(physx_body_names) + r")" body_names_regex = f"{self.cfg.prim_path}/{body_names_regex}" self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*")) # create ordering from articulation view to body view for body names # note: we need to do this since the body view is not ordered in the same way as the articulation view # -- root view root_view_body_names = self.body_names # -- body view prim_paths = self._body_physx_view.prim_paths[: self.num_bodies] body_view_body_names = [path.split("/")[-1] for path in prim_paths] # -- mapping from articulation view to body view self._body_view_ordering = [body_view_body_names.index(name) for name in root_view_body_names] self._body_view_ordering = torch.tensor(self._body_view_ordering, dtype=torch.long, device=self.device) # log information about the articulation carb.log_info(f"Articulation initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") carb.log_info(f"Is fixed root: {self.is_fixed_base}") carb.log_info(f"Number of bodies: {self.num_bodies}") carb.log_info(f"Body names: {self.body_names}") carb.log_info(f"Number of joints: {self.num_joints}") carb.log_info(f"Joint names: {self.joint_names}") carb.log_info(f"Number of fixed tendons: {self.num_fixed_tendons}") # -- assert that parsing was successful if set(physx_body_names) != set(self.body_names): raise RuntimeError("Failed to parse all bodies properly in the articulation.") # create buffers self._create_buffers() # process configuration self._process_cfg() self._process_actuators_cfg() # validate configuration self._validate_cfg() # log joint information self._log_articulation_joint_info() def _create_buffers(self): # allocate buffers super()._create_buffers() # history buffers self._previous_joint_vel = torch.zeros(self.num_instances, self.num_joints, device=self.device) # asset data # -- properties self._data.joint_names = self.joint_names # -- joint states self._data.joint_pos = torch.zeros(self.num_instances, self.num_joints, device=self.device) self._data.joint_vel = torch.zeros_like(self._data.joint_pos) self._data.joint_acc = torch.zeros_like(self._data.joint_pos) self._data.default_joint_pos = torch.zeros_like(self._data.joint_pos) self._data.default_joint_vel = torch.zeros_like(self._data.joint_pos) # -- joint commands self._data.joint_pos_target = torch.zeros_like(self._data.joint_pos) self._data.joint_vel_target = torch.zeros_like(self._data.joint_pos) self._data.joint_effort_target = torch.zeros_like(self._data.joint_pos) self._data.joint_stiffness = torch.zeros_like(self._data.joint_pos) self._data.joint_damping = torch.zeros_like(self._data.joint_pos) self._data.joint_armature = torch.zeros_like(self._data.joint_pos) self._data.joint_friction = torch.zeros_like(self._data.joint_pos) # -- joint commands (explicit) self._data.computed_torque = torch.zeros_like(self._data.joint_pos) self._data.applied_torque = torch.zeros_like(self._data.joint_pos) # -- other data self._data.soft_joint_pos_limits = torch.zeros(self.num_instances, self.num_joints, 2, device=self.device) self._data.soft_joint_vel_limits = torch.zeros(self.num_instances, self.num_joints, device=self.device) self._data.gear_ratio = torch.ones(self.num_instances, self.num_joints, device=self.device) # soft joint position limits (recommended not to be too close to limits). joint_pos_limits = self.root_physx_view.get_dof_limits() joint_pos_mean = (joint_pos_limits[..., 0] + joint_pos_limits[..., 1]) / 2 joint_pos_range = joint_pos_limits[..., 1] - joint_pos_limits[..., 0] soft_limit_factor = self.cfg.soft_joint_pos_limit_factor # add to data self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor # create buffers to store processed actions from actuator models self._joint_pos_target_sim = torch.zeros_like(self._data.joint_pos_target) self._joint_vel_target_sim = torch.zeros_like(self._data.joint_pos_target) self._joint_effort_target_sim = torch.zeros_like(self._data.joint_pos_target) def _process_cfg(self): """Post processing of configuration parameters.""" # default state super()._process_cfg() # -- joint state # joint pos indices_list, _, values_list = string_utils.resolve_matching_names_values( self.cfg.init_state.joint_pos, self.joint_names ) self._data.default_joint_pos[:, indices_list] = torch.tensor(values_list, device=self.device) # joint vel indices_list, _, values_list = string_utils.resolve_matching_names_values( self.cfg.init_state.joint_vel, self.joint_names ) self._data.default_joint_vel[:, indices_list] = torch.tensor(values_list, device=self.device) """ Internal helpers -- Actuators. """ def _process_actuators_cfg(self): """Process and apply articulation joint properties.""" # flag for implicit actuators # if this is false, we by-pass certain checks when doing actuator-related operations self._has_implicit_actuators = False # cache the values coming from the usd usd_stiffness = self.root_physx_view.get_dof_stiffnesses().clone() usd_damping = self.root_physx_view.get_dof_dampings().clone() usd_armature = self.root_physx_view.get_dof_armatures().clone() usd_friction = self.root_physx_view.get_dof_friction_coefficients().clone() usd_effort_limit = self.root_physx_view.get_dof_max_forces().clone() usd_velocity_limit = self.root_physx_view.get_dof_max_velocities().clone() # iterate over all actuator configurations for actuator_name, actuator_cfg in self.cfg.actuators.items(): # type annotation for type checkers actuator_cfg: ActuatorBaseCfg # create actuator group joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) # check if any joints are found if len(joint_names) == 0: raise ValueError( f"No joints found for actuator group: {actuator_name} with joint name expression:" f" {actuator_cfg.joint_names_expr}." ) # create actuator collection # note: for efficiency avoid indexing when over all indices actuator: ActuatorBase = actuator_cfg.class_type( cfg=actuator_cfg, joint_names=joint_names, joint_ids=slice(None) if len(joint_names) == self.num_joints else joint_ids, num_envs=self.num_instances, device=self.device, stiffness=usd_stiffness[:, joint_ids], damping=usd_damping[:, joint_ids], armature=usd_armature[:, joint_ids], friction=usd_friction[:, joint_ids], effort_limit=usd_effort_limit[:, joint_ids], velocity_limit=usd_velocity_limit[:, joint_ids], ) # log information on actuator groups carb.log_info( f"Actuator collection: {actuator_name} with model '{actuator_cfg.class_type.__name__}' and" f" joint names: {joint_names} [{joint_ids}]." ) # store actuator group self.actuators[actuator_name] = actuator # set the passed gains and limits into the simulation if isinstance(actuator, ImplicitActuator): self._has_implicit_actuators = True # the gains and limits are set into the simulation since actuator model is implicit self.write_joint_stiffness_to_sim(actuator.stiffness, joint_ids=actuator.joint_indices) self.write_joint_damping_to_sim(actuator.damping, joint_ids=actuator.joint_indices) self.write_joint_effort_limit_to_sim(actuator.effort_limit, joint_ids=actuator.joint_indices) self.write_joint_armature_to_sim(actuator.armature, joint_ids=actuator.joint_indices) self.write_joint_friction_to_sim(actuator.friction, joint_ids=actuator.joint_indices) else: # the gains and limits are processed by the actuator model # we set gains to zero, and torque limit to a high value in simulation to avoid any interference self.write_joint_stiffness_to_sim(0.0, joint_ids=actuator.joint_indices) self.write_joint_damping_to_sim(0.0, joint_ids=actuator.joint_indices) self.write_joint_effort_limit_to_sim(1.0e9, joint_ids=actuator.joint_indices) self.write_joint_armature_to_sim(actuator.armature, joint_ids=actuator.joint_indices) self.write_joint_friction_to_sim(actuator.friction, joint_ids=actuator.joint_indices) # perform some sanity checks to ensure actuators are prepared correctly total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) if total_act_joints != (self.num_joints - self.num_fixed_tendons): carb.log_warn( "Not all actuators are configured! Total number of actuated joints not equal to number of" f" joints available: {total_act_joints} != {self.num_joints}." ) def _apply_actuator_model(self): """Processes joint commands for the articulation by forwarding them to the actuators. The actions are first processed using actuator models. Depending on the robot configuration, the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. """ # process actions per group for actuator in self.actuators.values(): # prepare input for actuator model based on cached data # TODO : A tensor dict would be nice to do the indexing of all tensors together control_action = ArticulationActions( joint_positions=self._data.joint_pos_target[:, actuator.joint_indices], joint_velocities=self._data.joint_vel_target[:, actuator.joint_indices], joint_efforts=self._data.joint_effort_target[:, actuator.joint_indices], joint_indices=actuator.joint_indices, ) # compute joint command from the actuator model control_action = actuator.compute( control_action, joint_pos=self._data.joint_pos[:, actuator.joint_indices], joint_vel=self._data.joint_vel[:, actuator.joint_indices], ) # update targets (these are set into the simulation) if control_action.joint_positions is not None: self._joint_pos_target_sim[:, actuator.joint_indices] = control_action.joint_positions if control_action.joint_velocities is not None: self._joint_vel_target_sim[:, actuator.joint_indices] = control_action.joint_velocities if control_action.joint_efforts is not None: self._joint_effort_target_sim[:, actuator.joint_indices] = control_action.joint_efforts # update state of the actuator model # -- torques self._data.computed_torque[:, actuator.joint_indices] = actuator.computed_effort self._data.applied_torque[:, actuator.joint_indices] = actuator.applied_effort # -- actuator data self._data.soft_joint_vel_limits[:, actuator.joint_indices] = actuator.velocity_limit # TODO: find a cleaner way to handle gear ratio. Only needed for variable gear ratio actuators. if hasattr(actuator, "gear_ratio"): self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio """ Internal helpers -- Debugging. """ def _validate_cfg(self): """Validate the configuration after processing. Note: This function should be called only after the configuration has been processed and the buffers have been created. Otherwise, some settings that are altered during processing may not be validated. For instance, the actuator models may change the joint max velocity limits. """ # check that the default values are within the limits joint_pos_limits = self.root_physx_view.get_dof_limits()[0].to(self.device) out_of_range = self._data.default_joint_pos[0] < joint_pos_limits[:, 0] out_of_range |= self._data.default_joint_pos[0] > joint_pos_limits[:, 1] violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) # throw error if any of the default joint positions are out of the limits if len(violated_indices) > 0: # prepare message for violated joints msg = "The following joints have default positions out of the limits: \n" for idx in violated_indices: joint_name = self.data.joint_names[idx] joint_limits = joint_pos_limits[idx] joint_pos = self.data.default_joint_pos[0, idx] # add to message msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limits[0]:.3f}, {joint_limits[1]:.3f}]\n" raise ValueError(msg) # check that the default joint velocities are within the limits joint_max_vel = self.root_physx_view.get_dof_max_velocities()[0].to(self.device) out_of_range = torch.abs(self._data.default_joint_vel[0]) > joint_max_vel violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) if len(violated_indices) > 0: # prepare message for violated joints msg = "The following joints have default velocities out of the limits: \n" for idx in violated_indices: joint_name = self.data.joint_names[idx] joint_limits = [-joint_max_vel[idx], joint_max_vel[idx]] joint_vel = self.data.default_joint_vel[0, idx] # add to message msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limits[0]:.3f}, {joint_limits[1]:.3f}]\n" raise ValueError(msg) def _log_articulation_joint_info(self): """Log information about the articulation's simulated joints.""" # read out all joint parameters from simulation # -- gains stiffnesses = self.root_physx_view.get_dof_stiffnesses()[0].tolist() dampings = self.root_physx_view.get_dof_dampings()[0].tolist() # -- properties armatures = self.root_physx_view.get_dof_armatures()[0].tolist() frictions = self.root_physx_view.get_dof_friction_coefficients()[0].tolist() # -- limits position_limits = self.root_physx_view.get_dof_limits()[0].tolist() velocity_limits = self.root_physx_view.get_dof_max_velocities()[0].tolist() effort_limits = self.root_physx_view.get_dof_max_forces()[0].tolist() # create table for term information table = PrettyTable(float_format=".3f") table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" table.field_names = [ "Index", "Name", "Stiffness", "Damping", "Armature", "Friction", "Position Limits", "Velocity Limits", "Effort Limits", ] # set alignment of table columns table.align["Name"] = "l" # add info on each term for index, name in enumerate(self.joint_names): table.add_row([ index, name, stiffnesses[index], dampings[index], armatures[index], frictions[index], position_limits[index], velocity_limits[index], effort_limits[index], ]) # convert table to string carb.log_info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + table.get_string()) # read out all tendon parameters from simulation if self.num_fixed_tendons > 0: # -- gains ft_stiffnesses = self.root_physx_view.get_fixed_tendon_stiffnesses()[0].tolist() ft_dampings = self.root_physx_view.get_fixed_tendon_dampings()[0].tolist() # -- limits ft_limit_stiffnesses = self.root_physx_view.get_fixed_tendon_limit_stiffnesses()[0].tolist() ft_limits = self.root_physx_view.get_fixed_tendon_limits()[0].tolist() ft_rest_lengths = self.root_physx_view.get_fixed_tendon_rest_lengths()[0].tolist() ft_offsets = self.root_physx_view.get_fixed_tendon_offsets()[0].tolist() # create table for term information tendon_table = PrettyTable(float_format=".3f") tendon_table.title = f"Simulation Tendon Information (Prim path: {self.cfg.prim_path})" tendon_table.field_names = [ "Index", "Stiffness", "Damping", "Limit Stiffness", "Limit", "Rest Length", "Offset", ] # add info on each term for index in range(self.num_fixed_tendons): tendon_table.add_row([ index, ft_stiffnesses[index], ft_dampings[index], ft_limit_stiffnesses[index], ft_limits[index], ft_rest_lengths[index], ft_offsets[index], ]) # convert table to string carb.log_info(f"Simulation parameters for tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string())
43,405
Python
47.015487
120
0.627808
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/assets/articulation/articulation_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from dataclasses import MISSING from omni.isaac.orbit.actuators import ActuatorBaseCfg from omni.isaac.orbit.utils import configclass from ..rigid_object import RigidObjectCfg from .articulation import Articulation @configclass class ArticulationCfg(RigidObjectCfg): """Configuration parameters for an articulation.""" class_type: type = Articulation @configclass class InitialStateCfg(RigidObjectCfg.InitialStateCfg): """Initial state of the articulation.""" # root position joint_pos: dict[str, float] = {".*": 0.0} """Joint positions of the joints. Defaults to 0.0 for all joints.""" joint_vel: dict[str, float] = {".*": 0.0} """Joint velocities of the joints. Defaults to 0.0 for all joints.""" ## # Initialize configurations. ## init_state: InitialStateCfg = InitialStateCfg() """Initial state of the articulated object. Defaults to identity pose with zero velocity and zero joint state.""" soft_joint_pos_limit_factor: float = 1.0 """Fraction specifying the range of DOF position limits (parsed from the asset) to use. Defaults to 1.0.""" actuators: dict[str, ActuatorBaseCfg] = MISSING """Actuators for the robot with corresponding joint names."""
1,427
Python
31.454545
117
0.703574
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/markers/visualization_markers.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """A class to coordinate groups of visual markers (such as spheres, frames or arrows) using `UsdGeom.PointInstancer`_ class. The class :class:`VisualizationMarkers` is used to create a group of visual markers and visualize them in the viewport. The markers are represented as :class:`UsdGeom.PointInstancer` prims in the USD stage. The markers are created as prototypes in the :class:`UsdGeom.PointInstancer` prim and are instanced in the :class:`UsdGeom.PointInstancer` prim. The markers can be visualized by passing the indices of the marker prototypes and their translations, orientations and scales. The marker prototypes can be configured with the :class:`VisualizationMarkersCfg` class. .. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html """ from __future__ import annotations import numpy as np import torch from dataclasses import MISSING import omni.isaac.core.utils.stage as stage_utils import omni.kit.commands import omni.physx.scripts.utils as physx_utils from pxr import Gf, PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics, Vt import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.sim.spawners import SpawnerCfg from omni.isaac.orbit.utils.configclass import configclass from omni.isaac.orbit.utils.math import convert_quat @configclass class VisualizationMarkersCfg: """A class to configure a :class:`VisualizationMarkers`.""" prim_path: str = MISSING """The prim path where the :class:`UsdGeom.PointInstancer` will be created.""" markers: dict[str, SpawnerCfg] = MISSING """The dictionary of marker configurations. The key is the name of the marker, and the value is the configuration of the marker. The key is used to identify the marker in the class. """ class VisualizationMarkers: """A class to coordinate groups of visual markers (loaded from USD). This class allows visualization of different UI markers in the scene, such as points and frames. The class wraps around the `UsdGeom.PointInstancer`_ for efficient handling of objects in the stage via instancing the created marker prototype prims. A marker prototype prim is a reusable template prim used for defining variations of objects in the scene. For example, a sphere prim can be used as a marker prototype prim to create multiple sphere prims in the scene at different locations. Thus, prototype prims are useful for creating multiple instances of the same prim in the scene. The class parses the configuration to create different the marker prototypes into the stage. Each marker prototype prim is created as a child of the :class:`UsdGeom.PointInstancer` prim. The prim path for the the marker prim is resolved using the key of the marker in the :attr:`VisualizationMarkersCfg.markers` dictionary. The marker prototypes are created using the :meth:`omni.isaac.core.utils.create_prim` function, and then then instanced using :class:`UsdGeom.PointInstancer` prim to allow creating multiple instances of the marker prims. Switching between different marker prototypes is possible by calling the :meth:`visualize` method with the prototype indices corresponding to the marker prototype. The prototype indices are based on the order in the :attr:`VisualizationMarkersCfg.markers` dictionary. For example, if the dictionary has two markers, "marker1" and "marker2", then their prototype indices are 0 and 1 respectively. The prototype indices can be passed as a list or array of integers. Usage: The following snippet shows how to create 24 sphere markers with a radius of 1.0 at random translations within the range [-1.0, 1.0]. The first 12 markers will be colored red and the rest will be colored green. .. code-block:: python import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkersCfg, VisualizationMarkers # Create the markers configuration # This creates two marker prototypes, "marker1" and "marker2" which are spheres with a radius of 1.0. # The color of "marker1" is red and the color of "marker2" is green. cfg = VisualizationMarkersCfg( prim_path="/World/Visuals/testMarkers", markers={ "marker1": sim_utils.SphereCfg( radius=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), "marker2": VisualizationMarkersCfg.SphereCfg( radius=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ), } ) # Create the markers instance # This will create a UsdGeom.PointInstancer prim at the given path along with the marker prototypes. marker = VisualizationMarkers(cfg) # Set position of the marker # -- randomly sample translations between -1.0 and 1.0 marker_translations = np.random.uniform(-1.0, 1.0, (24, 3)) # -- this will create 24 markers at the given translations # note: the markers will all be `marker1` since the marker indices are not given marker.visualize(translations=marker_translations) # alter the markers based on their prototypes indices # first 12 markers will be marker1 and the rest will be marker2 # 0 -> marker1, 1 -> marker2 marker_indices = [0] * 12 + [1] * 12 # this will change the marker prototypes at the given indices # note: the translations of the markers will not be changed from the previous call # since the translations are not given. marker.visualize(marker_indices=marker_indices) # alter the markers based on their prototypes indices and translations marker.visualize(marker_indices=marker_indices, translations=marker_translations) .. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html """ def __init__(self, cfg: VisualizationMarkersCfg): """Initialize the class. When the class is initialized, the :class:`UsdGeom.PointInstancer` is created into the stage and the marker prims are registered into it. .. note:: If a prim already exists at the given path, the function will find the next free path and create the :class:`UsdGeom.PointInstancer` prim there. Args: cfg: The configuration for the markers. Raises: ValueError: When no markers are provided in the :obj:`cfg`. """ # get next free path for the prim prim_path = stage_utils.get_next_free_path(cfg.prim_path) # create a new prim stage = stage_utils.get_current_stage() self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path) # store inputs self.prim_path = prim_path self.cfg = cfg # check if any markers is provided if len(self.cfg.markers) == 0: raise ValueError(f"The `cfg.markers` cannot be empty. Received: {self.cfg.markers}") # create a child prim for the marker self._add_markers_prototypes(self.cfg.markers) # Note: We need to do this the first time to initialize the instancer. # Otherwise, the instancer will not be "created" and the function `GetInstanceIndices()` will fail. self._instancer_manager.GetProtoIndicesAttr().Set(list(range(self.num_prototypes))) self._instancer_manager.GetPositionsAttr().Set([Gf.Vec3f(0.0)] * self.num_prototypes) self._count = self.num_prototypes def __str__(self) -> str: """Return: A string representation of the class.""" msg = f"VisualizationMarkers(prim_path={self.prim_path})" msg += f"\n\tCount: {self.count}" msg += f"\n\tNumber of prototypes: {self.num_prototypes}" msg += "\n\tMarkers Prototypes:" for index, (name, marker) in enumerate(self.cfg.markers.items()): msg += f"\n\t\t[Index: {index}]: {name}: {marker.to_dict()}" return msg """ Properties. """ @property def num_prototypes(self) -> int: """The number of marker prototypes available.""" return len(self.cfg.markers) @property def count(self) -> int: """The total number of marker instances.""" # TODO: Update this when the USD API is available (Isaac Sim 2023.1) # return self._instancer_manager.GetInstanceCount() return self._count """ Operations. """ def set_visibility(self, visible: bool): """Sets the visibility of the markers. The method does this through the USD API. Args: visible: flag to set the visibility. """ imageable = UsdGeom.Imageable(self._instancer_manager) if visible: imageable.MakeVisible() else: imageable.MakeInvisible() def is_visible(self) -> bool: """Checks the visibility of the markers. Returns: True if the markers are visible, False otherwise. """ return self._instancer_manager.GetVisibilityAttr().Get() != UsdGeom.Tokens.invisible def visualize( self, translations: np.ndarray | torch.Tensor | None = None, orientations: np.ndarray | torch.Tensor | None = None, scales: np.ndarray | torch.Tensor | None = None, marker_indices: list[int] | np.ndarray | torch.Tensor | None = None, ): """Update markers in the viewport. .. note:: If the prim `PointInstancer` is hidden in the stage, the function will simply return without updating the markers. This helps in unnecessary computation when the markers are not visible. Whenever updating the markers, the input arrays must have the same number of elements in the first dimension. If the number of elements is different, the `UsdGeom.PointInstancer` will raise an error complaining about the mismatch. Additionally, the function supports dynamic update of the markers. This means that the number of markers can change between calls. For example, if you have 24 points that you want to visualize, you can pass 24 translations, orientations, and scales. If you want to visualize only 12 points, you can pass 12 translations, orientations, and scales. The function will automatically update the number of markers in the scene. The function will also update the marker prototypes based on their prototype indices. For instance, if you have two marker prototypes, and you pass the following marker indices: [0, 1, 0, 1], the function will update the first and third markers with the first prototype, and the second and fourth markers with the second prototype. This is useful when you want to visualize different markers in the same scene. The list of marker indices must have the same number of elements as the translations, orientations, or scales. If the number of elements is different, the function will raise an error. .. caution:: This function will update all the markers instanced from the prototypes. That means if you have 24 markers, you will need to pass 24 translations, orientations, and scales. If you want to update only a subset of the markers, you will need to handle the indices yourself and pass the complete arrays to this function. Args: translations: Translations w.r.t. parent prim frame. Shape is (M, 3). Defaults to None, which means left unchanged. orientations: Quaternion orientations (w, x, y, z) w.r.t. parent prim frame. Shape is (M, 4). Defaults to None, which means left unchanged. scales: Scale applied before any rotation is applied. Shape is (M, 3). Defaults to None, which means left unchanged. marker_indices: Decides which marker prototype to visualize. Shape is (M). Defaults to None, which means left unchanged provided that the total number of markers is the same as the previous call. If the number of markers is different, the function will update the number of markers in the scene. Raises: ValueError: When input arrays do not follow the expected shapes. ValueError: When the function is called with all None arguments. """ # check if it is visible (if not then let's not waste time) if not self.is_visible(): return # check if we have any markers to visualize num_markers = 0 # resolve inputs # -- position if translations is not None: if isinstance(translations, torch.Tensor): translations = translations.detach().cpu().numpy() # check that shape is correct if translations.shape[1] != 3 or len(translations.shape) != 2: raise ValueError(f"Expected `translations` to have shape (M, 3). Received: {translations.shape}.") # apply translations self._instancer_manager.GetPositionsAttr().Set(Vt.Vec3fArray.FromNumpy(translations)) # update number of markers num_markers = translations.shape[0] # -- orientation if orientations is not None: if isinstance(orientations, torch.Tensor): orientations = orientations.detach().cpu().numpy() # check that shape is correct if orientations.shape[1] != 4 or len(orientations.shape) != 2: raise ValueError(f"Expected `orientations` to have shape (M, 4). Received: {orientations.shape}.") # roll orientations from (w, x, y, z) to (x, y, z, w) # internally USD expects (x, y, z, w) orientations = convert_quat(orientations, to="xyzw") # apply orientations self._instancer_manager.GetOrientationsAttr().Set(Vt.QuathArray.FromNumpy(orientations)) # update number of markers num_markers = orientations.shape[0] # -- scales if scales is not None: if isinstance(scales, torch.Tensor): scales = scales.detach().cpu().numpy() # check that shape is correct if scales.shape[1] != 3 or len(scales.shape) != 2: raise ValueError(f"Expected `scales` to have shape (M, 3). Received: {scales.shape}.") # apply scales self._instancer_manager.GetScalesAttr().Set(Vt.Vec3fArray.FromNumpy(scales)) # update number of markers num_markers = scales.shape[0] # -- status if marker_indices is not None or num_markers != self._count: # apply marker indices if marker_indices is not None: if isinstance(marker_indices, torch.Tensor): marker_indices = marker_indices.detach().cpu().numpy() elif isinstance(marker_indices, list): marker_indices = np.array(marker_indices) # check that shape is correct if len(marker_indices.shape) != 1: raise ValueError(f"Expected `marker_indices` to have shape (M,). Received: {marker_indices.shape}.") # apply proto indices self._instancer_manager.GetProtoIndicesAttr().Set(Vt.IntArray.FromNumpy(marker_indices)) # update number of markers num_markers = marker_indices.shape[0] else: # check that number of markers is not zero if num_markers == 0: raise ValueError("Number of markers cannot be zero! Hint: The function was called with no inputs?") # set all markers to be the first prototype self._instancer_manager.GetProtoIndicesAttr().Set([0] * num_markers) # set number of markers self._count = num_markers """ Helper functions. """ def _add_markers_prototypes(self, markers_cfg: dict[str, sim_utils.SpawnerCfg]): """Adds markers prototypes to the scene and sets the markers instancer to use them.""" # add markers based on config for name, cfg in markers_cfg.items(): # resolve prim path marker_prim_path = f"{self.prim_path}/{name}" # create a child prim for the marker prim = cfg.func(prim_path=marker_prim_path, cfg=cfg) # make the asset uninstanceable (in case it is) # point instancer defines its own prototypes so if an asset is already instanced, this doesn't work. self._process_prototype_prim(prim) # remove any physics on the markers because they are only for visualization! physx_utils.removeRigidBodySubtree(prim) # add child reference to point instancer self._instancer_manager.GetPrototypesRel().AddTarget(marker_prim_path) # check that we loaded all the prototypes prototypes = self._instancer_manager.GetPrototypesRel().GetTargets() if len(prototypes) != len(markers_cfg): raise RuntimeError( f"Failed to load all the prototypes. Expected: {len(markers_cfg)}. Received: {len(prototypes)}." ) def _process_prototype_prim(self, prim: Usd.Prim): """Process a prim and its descendants to make them suitable for defining prototypes. Point instancer defines its own prototypes so if an asset is already instanced, this doesn't work. This function checks if the prim at the specified prim path and its descendants are instanced. If so, it makes the respective prim uninstanceable by disabling instancing on the prim. Additionally, it makes the prim invisible to secondary rays. This is useful when we do not want to see the marker prims on camera images. Args: prim_path: The prim path to check. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. """ # check if prim is valid if not prim.IsValid(): raise ValueError(f"Prim at path '{prim.GetPrimAtPath()}' is not valid.") # iterate over all prims under prim-path all_prims = [prim] while len(all_prims) > 0: # get current prim child_prim = all_prims.pop(0) # check if it is physics body -> if so, remove it if child_prim.HasAPI(UsdPhysics.ArticulationRootAPI): child_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) child_prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI) if child_prim.HasAPI(UsdPhysics.RigidBodyAPI): child_prim.RemoveAPI(UsdPhysics.RigidBodyAPI) child_prim.RemoveAPI(PhysxSchema.PhysxRigidBodyAPI) if child_prim.IsA(UsdPhysics.Joint): child_prim.GetAttribute("physics:jointEnabled").Set(False) # check if prim is instanced -> if so, make it uninstanceable if child_prim.IsInstance(): child_prim.SetInstanceable(False) # check if prim is a mesh -> if so, make it invisible to secondary rays if child_prim.IsA(UsdGeom.Gprim): # invisible to secondary rays such as depth images omni.kit.commands.execute( "ChangePropertyCommand", prop_path=Sdf.Path(f"{child_prim.GetPrimPath().pathString}.primvars:invisibleToSecondaryRays"), value=True, prev=None, type_to_create_if_not_exist=Sdf.ValueTypeNames.Bool, ) # add children to list all_prims += child_prim.GetChildren()
20,362
Python
48.787286
120
0.646646
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/markers/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Sub-package for marker utilities to simplify creation of UI elements in the GUI. Currently, the sub-package provides the following classes: * :class:`VisualizationMarkers` for creating a group of markers using `UsdGeom.PointInstancer <https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html>`_. .. note:: For some simple use-cases, it may be sufficient to use the debug drawing utilities from Isaac Sim. The debug drawing API is available in the `omni.isaac.debug_drawing`_ module. It allows drawing of points and splines efficiently on the UI. .. _omni.isaac.debug_drawing: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_debug_drawing.html """ from __future__ import annotations from .config import * # noqa: F401, F403 from .visualization_markers import VisualizationMarkers, VisualizationMarkersCfg
1,003
Python
34.857142
127
0.761715
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/markers/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers.visualization_markers import VisualizationMarkersCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Sensors. ## RAY_CASTER_MARKER_CFG = VisualizationMarkersCfg( markers={ "hit": sim_utils.SphereCfg( radius=0.02, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), }, ) """Configuration for the ray-caster marker.""" CONTACT_SENSOR_MARKER_CFG = VisualizationMarkersCfg( markers={ "contact": sim_utils.SphereCfg( radius=0.02, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), "no_contact": sim_utils.SphereCfg( radius=0.02, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), visible=False, ), }, ) """Configuration for the contact sensor marker.""" ## # Frames. ## FRAME_MARKER_CFG = VisualizationMarkersCfg( markers={ "frame": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd", scale=(0.5, 0.5, 0.5), ) } ) """Configuration for the frame marker.""" RED_ARROW_X_MARKER_CFG = VisualizationMarkersCfg( markers={ "arrow": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/arrow_x.usd", scale=(1.0, 0.1, 0.1), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ) } ) """Configuration for the red arrow marker (along x-direction).""" BLUE_ARROW_X_MARKER_CFG = VisualizationMarkersCfg( markers={ "arrow": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/arrow_x.usd", scale=(1.0, 0.1, 0.1), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), ) } ) """Configuration for the blue arrow marker (along x-direction).""" GREEN_ARROW_X_MARKER_CFG = VisualizationMarkersCfg( markers={ "arrow": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/arrow_x.usd", scale=(1.0, 0.1, 0.1), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ) } ) """Configuration for the green arrow marker (along x-direction).""" ## # Goals. ## CUBOID_MARKER_CFG = VisualizationMarkersCfg( markers={ "cuboid": sim_utils.CuboidCfg( size=(0.1, 0.1, 0.1), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), } ) """Configuration for the cuboid marker.""" POSITION_GOAL_MARKER_CFG = VisualizationMarkersCfg( markers={ "target_far": sim_utils.SphereCfg( radius=0.01, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), "target_near": sim_utils.SphereCfg( radius=0.01, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ), "target_invisible": sim_utils.SphereCfg( radius=0.01, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), visible=False, ), } ) """Configuration for the end-effector tracking marker."""
3,547
Python
27.384
87
0.610939
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/unitree.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for Unitree robots. The following configurations are available: * :obj:`UNITREE_A1_CFG`: Unitree A1 robot with DC motor model for the legs * :obj:`UNITREE_GO1_CFG`: Unitree Go1 robot with actuator net model for the legs * :obj:`UNITREE_GO2_CFG`: Unitree Go2 robot with DC motor model for the legs Reference: https://github.com/unitreerobotics/unitree_ros """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ActuatorNetMLPCfg, DCMotorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR ## # Configuration - Actuators. ## GO1_ACTUATOR_CFG = ActuatorNetMLPCfg( joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"], network_file=f"{ISAAC_ORBIT_NUCLEUS_DIR}/ActuatorNets/Unitree/unitree_go1.pt", pos_scale=-1.0, vel_scale=1.0, torque_scale=1.0, input_order="pos_vel", input_idx=[0, 1, 2], effort_limit=23.7, # taken from spec sheet velocity_limit=30.0, # taken from spec sheet saturation_effort=23.7, # same as effort limit ) """Configuration of Go1 actuators using MLP model. Actuator specifications: https://shop.unitree.com/products/go1-motor This model is taken from: https://github.com/Improbable-AI/walk-these-ways """ ## # Configuration ## UNITREE_A1_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/Unitree/A1/a1.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.42), joint_pos={ ".*L_hip_joint": 0.1, ".*R_hip_joint": -0.1, "F[L,R]_thigh_joint": 0.8, "R[L,R]_thigh_joint": 1.0, ".*_calf_joint": -1.5, }, joint_vel={".*": 0.0}, ), soft_joint_pos_limit_factor=0.9, actuators={ "base_legs": DCMotorCfg( joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"], effort_limit=33.5, saturation_effort=33.5, velocity_limit=21.0, stiffness=25.0, damping=0.5, friction=0.0, ), }, ) """Configuration of Unitree A1 using DC motor. Note: Specifications taken from: https://www.trossenrobotics.com/a1-quadruped#specifications """ UNITREE_GO1_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/Unitree/Go1/go1.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.4), joint_pos={ ".*L_hip_joint": 0.1, ".*R_hip_joint": -0.1, "F[L,R]_thigh_joint": 0.8, "R[L,R]_thigh_joint": 1.0, ".*_calf_joint": -1.5, }, joint_vel={".*": 0.0}, ), soft_joint_pos_limit_factor=0.9, actuators={ "base_legs": GO1_ACTUATOR_CFG, }, ) """Configuration of Unitree Go1 using MLP-based actuator model.""" UNITREE_GO2_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/Unitree/Go2/go2.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.4), joint_pos={ ".*L_hip_joint": 0.1, ".*R_hip_joint": -0.1, "F[L,R]_thigh_joint": 0.8, "R[L,R]_thigh_joint": 1.0, ".*_calf_joint": -1.5, }, joint_vel={".*": 0.0}, ), soft_joint_pos_limit_factor=0.9, actuators={ "base_legs": DCMotorCfg( joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"], effort_limit=23.5, saturation_effort=23.5, velocity_limit=30.0, stiffness=25.0, damping=0.5, friction=0.0, ), }, ) """Configuration of Unitree Go2 using DC-Motor actuator model."""
5,691
Python
31.340909
111
0.601652
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/shadow_hand.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the dexterous hand from Shadow Robot. The following configurations are available: * :obj:`SHADOW_HAND_CFG`: Shadow Hand with implicit actuator model. Reference: * https://www.shadowrobot.com/dexterous-hand-series/ """ from __future__ import annotations import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators.actuator_cfg import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Configuration ## SHADOW_HAND_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ShadowHand/shadow_hand_instanceable.usd", activate_contact_sensors=False, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=True, retain_accelerations=True, max_depenetration_velocity=1000.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.0005, ), # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), joint_drive_props=sim_utils.JointDrivePropertiesCfg(drive_type="force"), fixed_tendons_props=sim_utils.FixedTendonPropertiesCfg(limit_stiffness=30.0, damping=0.1), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), rot=(0.0, 0.0, -0.7071, 0.7071), joint_pos={".*": 0.0}, ), actuators={ "fingers": ImplicitActuatorCfg( joint_names_expr=["robot0_WR.*", "robot0_(FF|MF|RF|LF|TH)J(3|2|1)", "robot0_(LF|TH)J4", "robot0_THJ0"], effort_limit={ "robot0_WRJ1": 4.785, "robot0_WRJ0": 2.175, "robot0_(FF|MF|RF|LF)J1": 0.7245, "robot0_FFJ(3|2)": 0.9, "robot0_MFJ(3|2)": 0.9, "robot0_RFJ(3|2)": 0.9, "robot0_LFJ(4|3|2)": 0.9, "robot0_THJ4": 2.3722, "robot0_THJ3": 1.45, "robot0_THJ(2|1)": 0.99, "robot0_THJ0": 0.81, }, stiffness={ "robot0_WRJ.*": 5.0, "robot0_(FF|MF|RF|LF|TH)J(3|2|1)": 1.0, "robot0_(LF|TH)J4": 1.0, "robot0_THJ0": 1.0, }, damping={ "robot0_WRJ.*": 0.5, "robot0_(FF|MF|RF|LF|TH)J(3|2|1)": 0.1, "robot0_(LF|TH)J4": 0.1, "robot0_THJ0": 0.1, }, ), }, soft_joint_pos_limit_factor=1.0, ) """Configuration of Shadow Hand robot."""
2,953
Python
32.954023
115
0.573315
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/sawyer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the Rethink Robotics arms. The following configuration parameters are available: * :obj:`SAWYER_CFG`: The Sawyer arm without any tool attached. Reference: https://github.com/RethinkRobotics/sawyer_robot """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Configuration ## SAWYER_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/RethinkRobotics/sawyer_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=5.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0 ), activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ "head_pan": 0.0, "right_j0": 0.0, "right_j1": -0.785, "right_j2": 0.0, "right_j3": 1.05, "right_j4": 0.0, "right_j5": 1.3, "right_j6": 0.0, }, ), actuators={ "head": ImplicitActuatorCfg( joint_names_expr=["head_pan"], velocity_limit=100.0, effort_limit=8.0, stiffness=800.0, damping=40.0, ), "arm": ImplicitActuatorCfg( joint_names_expr=["right_j[0-6]"], velocity_limit=100.0, effort_limit={ "right_j[0-1]": 80.0, "right_j[2-3]": 40.0, "right_j[4-6]": 9.0, }, stiffness=100.0, damping=4.0, ), }, ) """Configuration of Rethink Robotics Sawyer arm."""
2,083
Python
27.944444
110
0.590494
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Package containing asset and sensor configurations.""" import os import toml # Conveniences to other module directories via relative paths ORBIT_ASSETS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")) """Path to the extension source directory.""" ORBIT_ASSETS_DATA_DIR = os.path.join(ORBIT_ASSETS_EXT_DIR, "data") """Path to the extension data directory.""" ORBIT_ASSETS_METADATA = toml.load(os.path.join(ORBIT_ASSETS_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" # Configure the module-level variables __version__ = ORBIT_ASSETS_METADATA["package"]["version"] ## # Configuration for different assets. ## from .allegro import * from .anymal import * from .cartpole import * from .franka import * from .kinova import * from .ridgeback_franka import * from .sawyer import * from .shadow_hand import * from .unitree import * from .universal_robots import *
1,243
Python
27.272727
97
0.736122
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/ridgeback_franka.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the Ridgeback-Manipulation robots. The following configurations are available: * :obj:`RIDGEBACK_FRANKA_PANDA_CFG`: Clearpath Ridgeback base with Franka Emika arm Reference: https://github.com/ridgeback/ridgeback_manipulation """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR RIDGEBACK_FRANKA_PANDA_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Clearpath/RidgebackFranka/ridgeback_franka.usd", activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ # base "dummy_base_prismatic_y_joint": 0.0, "dummy_base_prismatic_x_joint": 0.0, "dummy_base_revolute_z_joint": 0.0, # franka arm "panda_joint1": 0.0, "panda_joint2": -0.569, "panda_joint3": 0.0, "panda_joint4": -2.810, "panda_joint5": 0.0, "panda_joint6": 3.037, "panda_joint7": 0.741, # tool "panda_finger_joint.*": 0.035, }, joint_vel={".*": 0.0}, ), actuators={ "base": ImplicitActuatorCfg( joint_names_expr=["dummy_base_.*"], velocity_limit=100.0, effort_limit=1000.0, stiffness=0.0, damping=1e5, ), "panda_shoulder": ImplicitActuatorCfg( joint_names_expr=["panda_joint[1-4]"], effort_limit=87.0, velocity_limit=100.0, stiffness=800.0, damping=40.0, ), "panda_forearm": ImplicitActuatorCfg( joint_names_expr=["panda_joint[5-7]"], effort_limit=12.0, velocity_limit=100.0, stiffness=800.0, damping=40.0, ), "panda_hand": ImplicitActuatorCfg( joint_names_expr=["panda_finger_joint.*"], effort_limit=200.0, velocity_limit=0.2, stiffness=1e5, damping=1e3, ), }, ) """Configuration of Franka arm with Franka Hand on a Clearpath Ridgeback base using implicit actuator models. The following control configuration is used: * Base: velocity control with damping * Arm: position control with damping (contains default position offsets) * Hand: mimic control """
2,651
Python
30.571428
109
0.599774
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/universal_robots.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the Universal Robots. The following configuration parameters are available: * :obj:`UR10_CFG`: The UR10 arm without a gripper. Reference: https://github.com/ros-industrial/universal_robot """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR ## # Configuration ## UR10_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/UniversalRobots/UR10/ur10_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=5.0, ), activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ "shoulder_pan_joint": 0.0, "shoulder_lift_joint": -1.712, "elbow_joint": 1.712, "wrist_1_joint": 0.0, "wrist_2_joint": 0.0, "wrist_3_joint": 0.0, }, ), actuators={ "arm": ImplicitActuatorCfg( joint_names_expr=[".*"], velocity_limit=100.0, effort_limit=87.0, stiffness=800.0, damping=40.0, ), }, ) """Configuration of UR-10 arm using implicit actuator models."""
1,542
Python
27.054545
96
0.637484
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/franka.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the Franka Emika robots. The following configurations are available: * :obj:`FRANKA_PANDA_CFG`: Franka Emika Panda robot with Panda hand * :obj:`FRANKA_PANDA_HIGH_PD_CFG`: Franka Emika Panda robot with Panda hand with stiffer PD control Reference: https://github.com/frankaemika/franka_ros """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR ## # Configuration ## FRANKA_PANDA_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd", activate_contact_sensors=False, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=5.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0 ), # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ "panda_joint1": 0.0, "panda_joint2": -0.569, "panda_joint3": 0.0, "panda_joint4": -2.810, "panda_joint5": 0.0, "panda_joint6": 3.037, "panda_joint7": 0.741, "panda_finger_joint.*": 0.04, }, ), actuators={ "panda_shoulder": ImplicitActuatorCfg( joint_names_expr=["panda_joint[1-4]"], effort_limit=87.0, velocity_limit=2.175, stiffness=80.0, damping=4.0, ), "panda_forearm": ImplicitActuatorCfg( joint_names_expr=["panda_joint[5-7]"], effort_limit=12.0, velocity_limit=2.61, stiffness=80.0, damping=4.0, ), "panda_hand": ImplicitActuatorCfg( joint_names_expr=["panda_finger_joint.*"], effort_limit=200.0, velocity_limit=0.2, stiffness=2e3, damping=1e2, ), }, soft_joint_pos_limit_factor=1.0, ) """Configuration of Franka Emika Panda robot.""" FRANKA_PANDA_HIGH_PD_CFG = FRANKA_PANDA_CFG.copy() FRANKA_PANDA_HIGH_PD_CFG.spawn.rigid_props.disable_gravity = True FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_shoulder"].stiffness = 400.0 FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_shoulder"].damping = 80.0 FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_forearm"].stiffness = 400.0 FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_forearm"].damping = 80.0 """Configuration of Franka Emika Panda robot with stiffer PD control. This configuration is useful for task-space control using differential IK. """
3,036
Python
33.511363
110
0.64888
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/cartpole.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for a simple Cartpole robot.""" from __future__ import annotations import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR CARTPOLE_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/Classic/Cartpole/cartpole.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( rigid_body_enabled=True, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=100.0, enable_gyroscopic_forces=True, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.001, ), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 2.0), joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0} ), actuators={ "cart_actuator": ImplicitActuatorCfg( joint_names_expr=["slider_to_cart"], effort_limit=400.0, velocity_limit=100.0, stiffness=0.0, damping=10.0, ), "pole_actuator": ImplicitActuatorCfg( joint_names_expr=["cart_to_pole"], effort_limit=400.0, velocity_limit=100.0, stiffness=0.0, damping=0.0 ), }, )
1,711
Python
33.938775
115
0.641146
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/allegro.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the Allegro Hand robots from Wonik Robotics. The following configurations are available: * :obj:`ALLEGRO_HAND_CFG`: Allegro Hand with implicit actuator model. Reference: * https://www.wonikrobotics.com/robot-hand """ from __future__ import annotations import math import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators.actuator_cfg import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Configuration ## ALLEGRO_HAND_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/AllegroHand/allegro_hand_instanceable.usd", activate_contact_sensors=False, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=True, retain_accelerations=False, enable_gyroscopic_forces=False, angular_damping=0.01, max_linear_velocity=1000.0, max_angular_velocity=64 / math.pi * 180.0, max_depenetration_velocity=1000.0, max_contact_impulse=1e32, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0, sleep_threshold=0.005, stabilization_threshold=0.0005, ), # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), rot=(0.257551, 0.283045, 0.683330, -0.621782), joint_pos={"^(?!thumb_joint_0).*": 0.0, "thumb_joint_0": 0.28}, ), actuators={ "fingers": ImplicitActuatorCfg( joint_names_expr=[".*"], effort_limit=0.5, velocity_limit=100.0, stiffness=3.0, damping=0.1, friction=0.01, ), }, soft_joint_pos_limit_factor=1.0, ) """Configuration of Allegro Hand robot."""
2,220
Python
29.847222
98
0.647748
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/kinova.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the Kinova Robotics arms. The following configuration parameters are available: * :obj:`KINOVA_JACO2_N7S300_CFG`: The Kinova JACO2 (7-Dof) arm with a 3-finger gripper. * :obj:`KINOVA_JACO2_N6S300_CFG`: The Kinova JACO2 (6-Dof) arm with a 3-finger gripper. * :obj:`KINOVA_GEN3_N7_CFG`: The Kinova Gen3 (7-Dof) arm with no gripper. Reference: https://github.com/Kinovarobotics/kinova-ros """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ImplicitActuatorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Configuration ## KINOVA_JACO2_N7S300_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Kinova/Jaco2/J2N7S300/j2n7s300_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=5.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0 ), activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ "j2n7s300_joint_1": 0.0, "j2n7s300_joint_2": 2.76, "j2n7s300_joint_3": 0.0, "j2n7s300_joint_4": 2.0, "j2n7s300_joint_5": 2.0, "j2n7s300_joint_6": 0.0, "j2n7s300_joint_7": 0.0, "j2n7s300_joint_finger_[1-3]": 0.2, # close: 1.2, open: 0.2 "j2n7s300_joint_finger_tip_[1-3]": 0.2, # close: 1.2, open: 0.2 }, ), actuators={ "arm": ImplicitActuatorCfg( joint_names_expr=[".*_joint_[1-7]"], velocity_limit=100.0, effort_limit={ ".*_joint_[1-2]": 80.0, ".*_joint_[3-4]": 40.0, ".*_joint_[5-7]": 20.0, }, stiffness={ ".*_joint_[1-4]": 40.0, ".*_joint_[5-7]": 15.0, }, damping={ ".*_joint_[1-4]": 1.0, ".*_joint_[5-7]": 0.5, }, ), "gripper": ImplicitActuatorCfg( joint_names_expr=[".*_finger_[1-3]", ".*_finger_tip_[1-3]"], velocity_limit=100.0, effort_limit=2.0, stiffness=1.2, damping=0.01, ), }, ) """Configuration of Kinova JACO2 (7-Dof) arm with 3-finger gripper.""" KINOVA_JACO2_N6S300_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Kinova/Jaco2/J2N6S300/j2n6s300_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=5.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0 ), activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ "j2n6s300_joint_1": 0.0, "j2n6s300_joint_2": 2.76, "j2n6s300_joint_3": 2.76, "j2n6s300_joint_4": 2.5, "j2n6s300_joint_5": 2.0, "j2n6s300_joint_6": 0.0, "j2n6s300_joint_finger_[1-3]": 0.2, # close: 1.2, open: 0.2 "j2n6s300_joint_finger_tip_[1-3]": 0.2, # close: 1.2, open: 0.2 }, ), actuators={ "arm": ImplicitActuatorCfg( joint_names_expr=[".*_joint_[1-6]"], velocity_limit=100.0, effort_limit={ ".*_joint_[1-2]": 80.0, ".*_joint_3": 40.0, ".*_joint_[4-6]": 20.0, }, stiffness={ ".*_joint_[1-3]": 40.0, ".*_joint_[4-6]": 15.0, }, damping={ ".*_joint_[1-3]": 1.0, ".*_joint_[4-6]": 0.5, }, ), "gripper": ImplicitActuatorCfg( joint_names_expr=[".*_finger_[1-3]", ".*_finger_tip_[1-3]"], velocity_limit=100.0, effort_limit=2.0, stiffness=1.2, damping=0.01, ), }, ) """Configuration of Kinova JACO2 (6-Dof) arm with 3-finger gripper.""" KINOVA_GEN3_N7_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Kinova/Gen3/gen3n7_instanceable.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, max_depenetration_velocity=5.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0 ), activate_contact_sensors=False, ), init_state=ArticulationCfg.InitialStateCfg( joint_pos={ "joint_1": 0.0, "joint_2": 0.65, "joint_3": 0.0, "joint_4": 1.89, "joint_5": 0.0, "joint_6": 0.6, "joint_7": -1.57, }, ), actuators={ "arm": ImplicitActuatorCfg( joint_names_expr=["joint_[1-7]"], velocity_limit=100.0, effort_limit={ "joint_[1-4]": 39.0, "joint_[5-7]": 9.0, }, stiffness={ "joint_[1-4]": 40.0, "joint_[5-7]": 15.0, }, damping={ "joint_[1-4]": 1.0, "joint_[5-7]": 0.5, }, ), }, ) """Configuration of Kinova Gen3 (7-Dof) arm with no gripper."""
5,949
Python
32.055555
110
0.525971
NVIDIA-Omniverse/orbit/source/extensions/omni.isaac.orbit_assets/omni/isaac/orbit_assets/anymal.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configuration for the ANYbotics robots. The following configuration parameters are available: * :obj:`ANYMAL_B_CFG`: The ANYmal-B robot with ANYdrives 3.0 * :obj:`ANYMAL_C_CFG`: The ANYmal-C robot with ANYdrives 3.0 * :obj:`ANYMAL_D_CFG`: The ANYmal-D robot with ANYdrives 3.0 Reference: * https://github.com/ANYbotics/anymal_b_simple_description * https://github.com/ANYbotics/anymal_c_simple_description * https://github.com/ANYbotics/anymal_d_simple_description """ import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.actuators import ActuatorNetLSTMCfg, DCMotorCfg from omni.isaac.orbit.assets.articulation import ArticulationCfg from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR ## # Configuration - Actuators. ## ANYDRIVE_3_SIMPLE_ACTUATOR_CFG = DCMotorCfg( joint_names_expr=[".*HAA", ".*HFE", ".*KFE"], saturation_effort=120.0, effort_limit=80.0, velocity_limit=7.5, stiffness={".*": 40.0}, damping={".*": 5.0}, ) """Configuration for ANYdrive 3.x with DC actuator model.""" ANYDRIVE_3_LSTM_ACTUATOR_CFG = ActuatorNetLSTMCfg( joint_names_expr=[".*HAA", ".*HFE", ".*KFE"], network_file=f"{ISAAC_ORBIT_NUCLEUS_DIR}/ActuatorNets/ANYbotics/anydrive_3_lstm_jit.pt", saturation_effort=120.0, effort_limit=80.0, velocity_limit=7.5, ) """Configuration for ANYdrive 3.0 (used on ANYmal-C) with LSTM actuator model.""" ## # Configuration - Articulation. ## ANYMAL_B_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-B/anymal_b.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.6), joint_pos={ ".*HAA": 0.0, # all HAA ".*F_HFE": 0.4, # both front HFE ".*H_HFE": -0.4, # both hind HFE ".*F_KFE": -0.8, # both front KFE ".*H_KFE": 0.8, # both hind KFE }, ), actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG}, soft_joint_pos_limit_factor=0.95, ) """Configuration of ANYmal-B robot using actuator-net.""" ANYMAL_C_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd", # usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.6), joint_pos={ ".*HAA": 0.0, # all HAA ".*F_HFE": 0.4, # both front HFE ".*H_HFE": -0.4, # both hind HFE ".*F_KFE": -0.8, # both front KFE ".*H_KFE": 0.8, # both hind KFE }, ), actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG}, soft_joint_pos_limit_factor=0.95, ) """Configuration of ANYmal-C robot using actuator-net.""" ANYMAL_D_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", # usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d_minimal.usd", activate_contact_sensors=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, linear_damping=0.0, angular_damping=0.0, max_linear_velocity=1000.0, max_angular_velocity=1000.0, max_depenetration_velocity=1.0, ), articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.6), joint_pos={ ".*HAA": 0.0, # all HAA ".*F_HFE": 0.4, # both front HFE ".*H_HFE": -0.4, # both hind HFE ".*F_KFE": -0.8, # both front KFE ".*H_KFE": 0.8, # both hind KFE }, ), actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG}, soft_joint_pos_limit_factor=0.95, ) """Configuration of ANYmal-D robot using actuator-net. Note: Since we don't have a publicly available actuator network for ANYmal-D, we use the same network as ANYmal-C. This may impact the sim-to-real transfer performance. """
5,833
Python
34.791411
112
0.633636
NVIDIA-Omniverse/orbit/source/standalone/tools/convert_mesh.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Utility to convert a OBJ/STL/FBX into USD format. The OBJ file format is a simple data-format that represents 3D geometry alone — namely, the position of each vertex, the UV position of each texture coordinate vertex, vertex normals, and the faces that make each polygon defined as a list of vertices, and texture vertices. An STL file describes a raw, unstructured triangulated surface by the unit normal and vertices (ordered by the right-hand rule) of the triangles using a three-dimensional Cartesian coordinate system. FBX files are a type of 3D model file created using the Autodesk FBX software. They can be designed and modified in various modeling applications, such as Maya, 3ds Max, and Blender. Moreover, FBX files typically contain mesh, material, texture, and skeletal animation data. Link: https://www.autodesk.com/products/fbx/overview This script uses the asset converter extension from Isaac Sim (``omni.kit.asset_converter``) to convert a OBJ/STL/FBX asset into USD format. It is designed as a convenience script for command-line use. positional arguments: input The path to the input mesh (.OBJ/.STL/.FBX) file. output The path to store the USD file. optional arguments: -h, --help Show this help message and exit --make-instanceable, Make the asset instanceable for efficient cloning. (default: False) --collision-approximation The method used for approximating collision mesh. Defaults to convexDecomposition. Set to \"none\" to not add a collision mesh to the converted mesh. (default: convexDecomposition) --mass The mass (in kg) to assign to the converted asset. (default: None) """ """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Utility to convert a mesh file into USD format.") parser.add_argument("input", type=str, help="The path to the input mesh file.") parser.add_argument("output", type=str, help="The path to store the USD file.") parser.add_argument( "--make-instanceable", action="store_true", default=False, help="Make the asset instanceable for efficient cloning.", ) parser.add_argument( "--collision-approximation", type=str, default="convexDecomposition", choices=["convexDecomposition", "convexHull", "none"], help=( 'The method used for approximating collision mesh. Set to "none" ' "to not add a collision mesh to the converted mesh." ), ) parser.add_argument( "--mass", type=float, default=None, help="The mass (in kg) to assign to the converted asset. If not provided, then no mass is added.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import contextlib import os import carb import omni.isaac.core.utils.stage as stage_utils import omni.kit.app from omni.isaac.orbit.sim.converters import MeshConverter, MeshConverterCfg from omni.isaac.orbit.sim.schemas import schemas_cfg from omni.isaac.orbit.utils.assets import check_file_path from omni.isaac.orbit.utils.dict import print_dict def main(): # check valid file path mesh_path = args_cli.input if not os.path.isabs(mesh_path): mesh_path = os.path.abspath(mesh_path) if not check_file_path(mesh_path): raise ValueError(f"Invalid mesh file path: {mesh_path}") # create destination path dest_path = args_cli.output if not os.path.isabs(dest_path): dest_path = os.path.abspath(dest_path) print(dest_path) print(os.path.dirname(dest_path)) print(os.path.basename(dest_path)) # Mass properties if args_cli.mass is not None: mass_props = schemas_cfg.MassPropertiesCfg(mass=args_cli.mass) rigid_props = schemas_cfg.RigidBodyPropertiesCfg() else: mass_props = None rigid_props = None # Collision properties collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=args_cli.collision_approximation != "none") # Create Mesh converter config mesh_converter_cfg = MeshConverterCfg( mass_props=mass_props, rigid_props=rigid_props, collision_props=collision_props, asset_path=mesh_path, force_usd_conversion=True, usd_dir=os.path.dirname(dest_path), usd_file_name=os.path.basename(dest_path), make_instanceable=args_cli.make_instanceable, collision_approximation=args_cli.collision_approximation, ) # Print info print("-" * 80) print("-" * 80) print(f"Input Mesh file: {mesh_path}") print("Mesh importer config:") print_dict(mesh_converter_cfg.to_dict(), nesting=0) print("-" * 80) print("-" * 80) # Create Mesh converter and import the file mesh_converter = MeshConverter(mesh_converter_cfg) # print output print("Mesh importer output:") print(f"Generated USD file: {mesh_converter.usd_path}") print("-" * 80) print("-" * 80) # Determine if there is a GUI to update: # acquire settings interface carb_settings_iface = carb.settings.get_settings() # read flag for whether a local GUI is enabled local_gui = carb_settings_iface.get("/app/window/enabled") # read flag for whether livestreaming GUI is enabled livestream_gui = carb_settings_iface.get("/app/livestream/enabled") # Simulate scene (if not headless) if local_gui or livestream_gui: # Open the stage with USD stage_utils.open_stage(mesh_converter.usd_path) # Reinitialize the simulation app = omni.kit.app.get_app_interface() # Run simulation with contextlib.suppress(KeyboardInterrupt): while app.is_running(): # perform step app.update() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,290
Python
33.95
129
0.692846
NVIDIA-Omniverse/orbit/source/standalone/tools/check_instanceable.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script uses the cloner API to check if asset has been instanced properly. Usage with different inputs (replace `<Asset-Path>` and `<Asset-Path-Instanced>` with the path to the original asset and the instanced asset respectively): ```bash ./orbit.sh -p source/tools/check_instanceable.py <Asset-Path> -n 4096 --headless --physics ./orbit.sh -p source/tools/check_instanceable.py <Asset-Path-Instanced> -n 4096 --headless --physics ./orbit.sh -p source/tools/check_instanceable.py <Asset-Path> -n 4096 --headless ./orbit.sh -p source/tools/check_instanceable.py <Asset-Path-Instanced> -n 4096 --headless ``` Output from the above commands: ```bash >>> Cloning time (cloner.clone): 0.648198 seconds >>> Setup time (sim.reset): : 5.843589 seconds [#clones: 4096, physics: True] Asset: <Asset-Path-Instanced> : 6.491870 seconds >>> Cloning time (cloner.clone): 0.693133 seconds >>> Setup time (sim.reset): 50.860526 seconds [#clones: 4096, physics: True] Asset: <Asset-Path> : 51.553743 seconds >>> Cloning time (cloner.clone) : 0.687201 seconds >>> Setup time (sim.reset) : 6.302215 seconds [#clones: 4096, physics: False] Asset: <Asset-Path-Instanced> : 6.989500 seconds >>> Cloning time (cloner.clone) : 0.678150 seconds >>> Setup time (sim.reset) : 52.854054 seconds [#clones: 4096, physics: False] Asset: <Asset-Path> : 53.532287 seconds ``` """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse import contextlib import os # omni-isaac-orbit from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser("Utility to empirically check if asset in instanced properly.") parser.add_argument("input", type=str, help="The path to the USD file.") parser.add_argument("-n", "--num_clones", type=int, default=128, help="Number of clones to spawn.") parser.add_argument("-s", "--spacing", type=float, default=1.5, help="Spacing between instances in a grid.") parser.add_argument("-p", "--physics", action="store_true", default=False, help="Clone assets using physics cloner.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.core.utils.prims as prim_utils from omni.isaac.cloner import GridCloner from omni.isaac.core.simulation_context import SimulationContext from omni.isaac.core.utils.carb import set_carb_setting from omni.isaac.orbit.utils import Timer from omni.isaac.orbit.utils.assets import check_file_path def main(): """Spawns the USD asset robot and clones it using Isaac Gym Cloner API.""" # check valid file path if not check_file_path(args_cli.input): raise ValueError(f"Invalid file path: {args_cli.input}") # Load kit helper sim = SimulationContext( stage_units_in_meters=1.0, physics_dt=0.01, rendering_dt=0.01, backend="torch", device="cuda:0" ) # enable flatcache which avoids passing data over to USD structure # this speeds up the read-write operation of GPU buffers if sim.get_physics_context().use_gpu_pipeline: sim.get_physics_context().enable_flatcache(True) # enable hydra scene-graph instancing # this is needed to visualize the scene when flatcache is enabled set_carb_setting(sim._settings, "/persistent/omnihydra/useSceneGraphInstancing", True) # Create interface to clone the scene cloner = GridCloner(spacing=args_cli.spacing) cloner.define_base_env("/World/envs") prim_utils.define_prim("/World/envs/env_0") # Spawn things into stage prim_utils.create_prim("/World/Light", "DistantLight") # Everything under the namespace "/World/envs/env_0" will be cloned prim_utils.create_prim("/World/envs/env_0/Asset", "Xform", usd_path=os.path.abspath(args_cli.input)) # Clone the scene num_clones = args_cli.num_clones # Create a timer to measure the cloning time with Timer(f"[#clones: {num_clones}, physics: {args_cli.physics}] Asset: {args_cli.input}"): # Clone the scene with Timer(">>> Cloning time (cloner.clone)"): cloner.define_base_env("/World/envs") envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_clones) _ = cloner.clone( source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=args_cli.physics ) # Play the simulator with Timer(">>> Setup time (sim.reset)"): sim.reset() # Simulate scene (if not headless) if not args_cli.headless: with contextlib.suppress(KeyboardInterrupt): while sim.is_playing(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,076
Python
36.607407
117
0.6974
NVIDIA-Omniverse/orbit/source/standalone/tools/blender_obj.py
#!/usr/bin/env python # Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Convert a mesh file to `.obj` using blender. This file processes a given dae mesh file and saves the resulting mesh file in obj format. It needs to be called using the python packaged with blender, i.e.: blender --background --python blender_obj.py -- -in_file FILE -out_file FILE For more information: https://docs.blender.org/api/current/index.html The script was tested on Blender 3.2 on Ubuntu 20.04LTS. """ from __future__ import annotations import bpy import os import sys def parse_cli_args(): """Parse the input command line arguments. Reference: https://developer.blender.org/diffusion/B/browse/master/release/scripts/templates_py/background_job.py """ import argparse # get the args passed to blender after "--", all of which are ignored by # blender so scripts may receive their own arguments argv = sys.argv if "--" not in argv: argv = [] # as if no args are passed else: argv = argv[argv.index("--") + 1 :] # get all args after "--" # When --help or no args are given, print this help usage_text = ( f"Run blender in background mode with this script:\n\tblender --background --python {__file__} -- [options]" ) parser = argparse.ArgumentParser(description=usage_text) # Add arguments parser.add_argument("-i", "--in_file", metavar="FILE", type=str, required=True, help="Path to input OBJ file.") parser.add_argument("-o", "--out_file", metavar="FILE", type=str, required=True, help="Path to output OBJ file.") args = parser.parse_args(argv) # Check if any arguments provided if not argv or not args.in_file or not args.out_file: parser.print_help() return None # return arguments return args def convert_to_obj(in_file: str, out_file: str, save_usd: bool = False): """Convert a mesh file to `.obj` using blender. Args: in_file: Input mesh file to process. out_file: Path to store output obj file. """ # check valid input file if not os.path.exists(in_file): raise FileNotFoundError(in_file) # add ending of file format if not out_file.endswith(".obj"): out_file += ".obj" # create directory if it doesn't exist for destination file if not os.path.exists(os.path.dirname(out_file)): os.makedirs(os.path.dirname(out_file), exist_ok=True) # reset scene to empty bpy.ops.wm.read_factory_settings(use_empty=True) # load object into scene if in_file.endswith(".dae"): bpy.ops.wm.collada_import(filepath=in_file) elif in_file.endswith(".stl") or in_file.endswith(".STL"): bpy.ops.import_mesh.stl(filepath=in_file) else: raise ValueError(f"Input file not in dae/stl format: {in_file}") # convert to obj format and store with z up # TODO: Read the convention from dae file instead of manually fixing it. # Reference: https://docs.blender.org/api/2.79/bpy.ops.export_scene.html bpy.ops.export_scene.obj( filepath=out_file, check_existing=False, axis_forward="Y", axis_up="Z", global_scale=1, path_mode="RELATIVE" ) # save it as usd as well if save_usd: out_file = out_file.replace("obj", "usd") bpy.ops.wm.usd_export(filepath=out_file, check_existing=False) if __name__ == "__main__": # read arguments cli_args = parse_cli_args() # check CLI args if cli_args is None: sys.exit() # process via blender convert_to_obj(cli_args.in_file, cli_args.out_file)
3,662
Python
33.233645
117
0.659203
NVIDIA-Omniverse/orbit/source/standalone/tools/process_meshes_to_obj.py
#!/usr/bin/env python # Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Convert all mesh files to `.obj` in given folders.""" from __future__ import annotations import argparse import os import shutil import subprocess # Constants # Path to blender BLENDER_EXE_PATH = shutil.which("blender") def parse_cli_args(): """Parse the input command line arguments. Reference: https://developer.blender.org/diffusion/B/browse/master/release/scripts/templates_py/background_job.py """ # add argparse arguments parser = argparse.ArgumentParser("Utility to convert all mesh files to `.obj` in given folders.") parser.add_argument("input_dir", type=str, help="The input directory from which to load meshes.") parser.add_argument( "-o", "--output_dir", type=str, default=None, help="The output directory to save converted meshes into. Default is same as input directory.", ) args_cli = parser.parse_args() # resolve output directory if args_cli.output_dir is None: args_cli.output_dir = args_cli.input_dir # return arguments return args_cli def run_blender_convert2obj(in_file: str, out_file: str): """Calls the python script using `subprocess` to perform processing of mesh file. Args: in_file: Input mesh file. out_file: Output obj file. """ # resolve for python file tools_dirname = os.path.dirname(os.path.abspath(__file__)) script_file = os.path.join(tools_dirname, "blender_obj.py") # complete command command_exe = f"{BLENDER_EXE_PATH} --background --python {script_file} -- -i {in_file} -o {out_file}" # break command into list command_exe_list = command_exe.split(" ") # run command subprocess.run(command_exe_list) def convert_meshes(source_folders: list[str], destination_folders: list[str]): """Processes all mesh files of supported format into OBJ file using blender. Args: source_folders: List of directories to search for meshes. destination_folders: List of directories to dump converted files. """ # create folder for corresponding destination for folder in destination_folders: os.makedirs(folder, exist_ok=True) # iterate over each folder for in_folder, out_folder in zip(source_folders, destination_folders): # extract all dae files in the directory mesh_filenames = [f for f in os.listdir(in_folder) if f.endswith("dae")] mesh_filenames += [f for f in os.listdir(in_folder) if f.endswith("stl")] mesh_filenames += [f for f in os.listdir(in_folder) if f.endswith("STL")] # print status print(f"Found {len(mesh_filenames)} files to process in directory: {in_folder}") # iterate over each OBJ file for mesh_file in mesh_filenames: # extract meshname mesh_name = os.path.splitext(mesh_file)[0] # complete path of input and output files in_file_path = os.path.join(in_folder, mesh_file) out_file_path = os.path.join(out_folder, mesh_name + ".obj") # perform blender processing print("Processing: ", in_file_path) run_blender_convert2obj(in_file_path, out_file_path) if __name__ == "__main__": # Parse command line arguments args = parse_cli_args() # Run conversion convert_meshes([args.input_dir], [args.output_dir])
3,502
Python
34.744898
117
0.659909
NVIDIA-Omniverse/orbit/source/standalone/tools/convert_urdf.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Utility to convert a URDF into USD format. Unified Robot Description Format (URDF) is an XML file format used in ROS to describe all elements of a robot. For more information, see: http://wiki.ros.org/urdf This script uses the URDF importer extension from Isaac Sim (``omni.isaac.urdf_importer``) to convert a URDF asset into USD format. It is designed as a convenience script for command-line use. For more information on the URDF importer, see the documentation for the extension: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html positional arguments: input The path to the input URDF file. output The path to store the USD file. optional arguments: -h, --help Show this help message and exit --merge-joints Consolidate links that are connected by fixed joints. (default: False) --fix-base Fix the base to where it is imported. (default: False) --make-instanceable Make the asset instanceable for efficient cloning. (default: False) """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Utility to convert a URDF into USD format.") parser.add_argument("input", type=str, help="The path to the input URDF file.") parser.add_argument("output", type=str, help="The path to store the USD file.") parser.add_argument( "--merge-joints", action="store_true", default=False, help="Consolidate links that are connected by fixed joints.", ) parser.add_argument("--fix-base", action="store_true", default=False, help="Fix the base to where it is imported.") parser.add_argument( "--make-instanceable", action="store_true", default=False, help="Make the asset instanceable for efficient cloning.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import contextlib import os import carb import omni.isaac.core.utils.stage as stage_utils import omni.kit.app from omni.isaac.orbit.sim.converters import UrdfConverter, UrdfConverterCfg from omni.isaac.orbit.utils.assets import check_file_path from omni.isaac.orbit.utils.dict import print_dict def main(): # check valid file path urdf_path = args_cli.input if not os.path.isabs(urdf_path): urdf_path = os.path.abspath(urdf_path) if not check_file_path(urdf_path): raise ValueError(f"Invalid file path: {urdf_path}") # create destination path dest_path = args_cli.output if not os.path.isabs(dest_path): dest_path = os.path.abspath(dest_path) # Create Urdf converter config urdf_converter_cfg = UrdfConverterCfg( asset_path=urdf_path, usd_dir=os.path.dirname(dest_path), usd_file_name=os.path.basename(dest_path), fix_base=args_cli.fix_base, merge_fixed_joints=args_cli.merge_joints, force_usd_conversion=True, make_instanceable=args_cli.make_instanceable, ) # Print info print("-" * 80) print("-" * 80) print(f"Input URDF file: {urdf_path}") print("URDF importer config:") print_dict(urdf_converter_cfg.to_dict(), nesting=0) print("-" * 80) print("-" * 80) # Create Urdf converter and import the file urdf_converter = UrdfConverter(urdf_converter_cfg) # print output print("URDF importer output:") print(f"Generated USD file: {urdf_converter.usd_path}") print("-" * 80) print("-" * 80) # Determine if there is a GUI to update: # acquire settings interface carb_settings_iface = carb.settings.get_settings() # read flag for whether a local GUI is enabled local_gui = carb_settings_iface.get("/app/window/enabled") # read flag for whether livestreaming GUI is enabled livestream_gui = carb_settings_iface.get("/app/livestream/enabled") # Simulate scene (if not headless) if local_gui or livestream_gui: # Open the stage with USD stage_utils.open_stage(urdf_converter.usd_path) # Reinitialize the simulation app = omni.kit.app.get_app_interface() # Run simulation with contextlib.suppress(KeyboardInterrupt): while app.is_running(): # perform step app.update() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,784
Python
32
115
0.687709
NVIDIA-Omniverse/orbit/source/standalone/tutorials/01_assets/run_articulation.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to spawn a cart-pole and interact with it. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/01_assets/run_articulation.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with an articulation.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.sim import SimulationContext ## # Pre-defined configs ## from omni.isaac.orbit_assets import CARTPOLE_CFG # isort:skip def design_scene() -> tuple[dict, list[list[float]]]: """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a robot in it origins = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]] # Origin 1 prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) # Origin 2 prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) # Articulation cartpole_cfg = CARTPOLE_CFG.copy() cartpole_cfg.prim_path = "/World/Origin.*/Robot" cartpole = Articulation(cfg=cartpole_cfg) # return the scene information scene_entities = {"cartpole": cartpole} return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. In general, it is better to access the entities directly from # the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial. robot = entities["cartpole"] # Define simulation stepping sim_dt = sim.get_physics_dt() count = 0 # Simulation loop while simulation_app.is_running(): # Reset if count % 500 == 0: # reset counter count = 0 # reset the scene entities # root state # we offset the root state by the origin since the states are written in simulation world frame # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world root_state = robot.data.default_root_state.clone() root_state[:, :3] += origins robot.write_root_state_to_sim(root_state) # set joint positions with some noise joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() joint_pos += torch.rand_like(joint_pos) * 0.1 robot.write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers robot.reset() print("[INFO]: Resetting robot state...") # Apply random action # -- generate random joint efforts efforts = torch.randn_like(robot.data.joint_pos) * 5.0 # -- apply action to the robot robot.set_joint_effort_target(efforts) # -- write data to sim robot.write_data_to_sim() # Perform step sim.step() # Increment counter count += 1 # Update buffers robot.update(sim_dt) def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) # Design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,689
Python
31.344827
111
0.655577
NVIDIA-Omniverse/orbit/source/standalone/tutorials/01_assets/run_rigid_object.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to create a rigid object and interact with it. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/01_assets/run_rigid_object.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with a rigid object.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.sim import SimulationContext def design_scene(): """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.8, 0.8, 0.8)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a robot in it origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] for i, origin in enumerate(origins): prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) # Rigid Object cone_cfg = RigidObjectCfg( prim_path="/World/Origin.*/Cone", spawn=sim_utils.ConeCfg( radius=0.1, height=0.2, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), ), init_state=RigidObjectCfg.InitialStateCfg(), ) cone_object = RigidObject(cfg=cone_cfg) # return the scene information scene_entities = {"cone": cone_object} return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, RigidObject], origins: torch.Tensor): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. In general, it is better to access the entities directly from # the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial. cone_object = entities["cone"] # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 250 == 0: # reset counters sim_time = 0.0 count = 0 # reset root state root_state = cone_object.data.default_root_state.clone() # sample a random position on a cylinder around the origins root_state[:, :3] += origins root_state[:, :3] += math_utils.sample_cylinder( radius=0.1, h_range=(0.25, 0.5), size=cone_object.num_instances, device=cone_object.device ) # write root state to simulation cone_object.write_root_state_to_sim(root_state) # reset buffers cone_object.reset() print("----------------------------------------") print("[INFO]: Resetting object state...") # apply sim data cone_object.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers cone_object.update(sim_dt) # print the root position if count % 50 == 0: print(f"Root position (in world): {cone_object.data.root_state_w[:, :3]}") def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg() sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[1.5, 0.0, 1.0], target=[0.0, 0.0, 0.0]) # Design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,847
Python
31.32
111
0.632763
NVIDIA-Omniverse/orbit/source/standalone/tutorials/02_scene/create_scene.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/03_scene/create_scene.py --num_envs 32 """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sim import SimulationContext from omni.isaac.orbit.utils import configclass ## # Pre-defined configs ## from omni.isaac.orbit_assets import CARTPOLE_CFG # isort:skip @configclass class CartpoleSceneCfg(InteractiveSceneCfg): """Configuration for a cart-pole scene.""" # ground plane ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # articulation cartpole: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. robot = scene["cartpole"] # Define simulation stepping sim_dt = sim.get_physics_dt() count = 0 # Simulation loop while simulation_app.is_running(): # Reset if count % 500 == 0: # reset counter count = 0 # reset the scene entities # root state # we offset the root state by the origin since the states are written in simulation world frame # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world root_state = robot.data.default_root_state.clone() root_state[:, :3] += scene.env_origins robot.write_root_state_to_sim(root_state) # set joint positions with some noise joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() joint_pos += torch.rand_like(joint_pos) * 0.1 robot.write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers scene.reset() print("[INFO]: Resetting robot state...") # Apply random action # -- generate random joint efforts efforts = torch.randn_like(robot.data.joint_pos) * 5.0 # -- apply action to the robot robot.set_joint_effort_target(efforts) # -- write data to sim scene.write_data_to_sim() # Perform step sim.step() # Increment counter count += 1 # Update buffers scene.update(sim_dt) def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) # Design scene scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,251
Python
30.731343
109
0.663844
NVIDIA-Omniverse/orbit/source/standalone/tutorials/03_envs/create_cartpole_base_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to create a simple environment with a cartpole. It combines the concepts of scene, action, observation and event managers to create an environment. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on creating a cartpole base environment.") parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import math import torch import omni.isaac.orbit.envs.mdp as mdp from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.classic.cartpole.cartpole_env_cfg import CartpoleSceneCfg @configclass class ActionsCfg: """Action specifications for the environment.""" joint_efforts = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["slider_to_cart"], scale=5.0) @configclass class ObservationsCfg: """Observation specifications for the environment.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel) joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel) def __post_init__(self) -> None: self.enable_corruption = False self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class EventCfg: """Configuration for events.""" # on startup add_pole_mass = EventTerm( func=mdp.add_body_mass, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=["pole"]), "mass_range": (0.1, 0.5), }, ) # on reset reset_cart_position = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), "position_range": (-1.0, 1.0), "velocity_range": (-0.1, 0.1), }, ) reset_pole_position = EventTerm( func=mdp.reset_joints_by_offset, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), "position_range": (-0.125 * math.pi, 0.125 * math.pi), "velocity_range": (-0.01 * math.pi, 0.01 * math.pi), }, ) @configclass class CartpoleEnvCfg(BaseEnvCfg): """Configuration for the cartpole environment.""" # Scene settings scene = CartpoleSceneCfg(num_envs=1024, env_spacing=2.5) # Basic settings observations = ObservationsCfg() actions = ActionsCfg() events = EventCfg() def __post_init__(self): """Post initialization.""" # viewer settings self.viewer.eye = [4.5, 0.0, 6.0] self.viewer.lookat = [0.0, 0.0, 2.0] # step settings self.decimation = 4 # env step every 4 sim steps: 200Hz / 4 = 50Hz # simulation settings self.sim.dt = 0.005 # sim step every 5ms: 200Hz def main(): """Main function.""" # parse the arguments env_cfg = CartpoleEnvCfg() env_cfg.scene.num_envs = args_cli.num_envs # setup base environment env = BaseEnv(cfg=env_cfg) # simulate physics count = 0 while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 300 == 0: count = 0 env.reset() print("-" * 80) print("[INFO]: Resetting environment...") # sample random actions joint_efforts = torch.randn_like(env.action_manager.action) # step the environment obs, _ = env.step(joint_efforts) # print current orientation of pole print("[Env 0]: Pole joint: ", obs["policy"][0][1].item()) # update counter count += 1 # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,839
Python
27.470588
107
0.633602
NVIDIA-Omniverse/orbit/source/standalone/tutorials/03_envs/run_cartpole_rl_env.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to run the RL environment for the cartpole balancing task. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on running the cartpole RL environment.") parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch from omni.isaac.orbit.envs import RLTaskEnv from omni.isaac.orbit_tasks.classic.cartpole.cartpole_env_cfg import CartpoleEnvCfg def main(): """Main function.""" # create environment configuration env_cfg = CartpoleEnvCfg() env_cfg.scene.num_envs = args_cli.num_envs # setup RL environment env = RLTaskEnv(cfg=env_cfg) # simulate physics count = 0 while simulation_app.is_running(): with torch.inference_mode(): # reset if count % 300 == 0: count = 0 env.reset() print("-" * 80) print("[INFO]: Resetting environment...") # sample random actions joint_efforts = torch.randn_like(env.action_manager.action) # step the environment obs, rew, terminated, truncated, info = env.step(joint_efforts) # print current orientation of pole print("[Env 0]: Pole joint: ", obs["policy"][0][1].item()) # update counter count += 1 # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,054
Python
25.688311
96
0.648978
NVIDIA-Omniverse/orbit/source/standalone/tutorials/05_controllers/run_diff_ik.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the differential inverse kinematics controller with the simulator. The differential IK controller can be configured in different modes. It uses the Jacobians computed by PhysX. This helps perform parallelized computation of the inverse kinematics. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/05_controllers/ik_control.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on using the differential IK controller.") parser.add_argument("--robot", type=str, default="franka_panda", help="Name of the robot.") parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBaseCfg from omni.isaac.orbit.controllers import DifferentialIKController, DifferentialIKControllerCfg from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.math import subtract_frame_transforms ## # Pre-defined configs ## from omni.isaac.orbit_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG # isort:skip @configclass class TableTopSceneCfg(InteractiveSceneCfg): """Configuration for a cart-pole scene.""" # ground plane ground = AssetBaseCfg( prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg(), init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), ) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # mount table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) ), ) # articulation if args_cli.robot == "franka_panda": robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") elif args_cli.robot == "ur10": robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") else: raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10") def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): """Runs the simulation loop.""" # Extract scene entities # note: we only do this here for readability. robot = scene["robot"] # Create controller diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls") diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=scene.num_envs, device=sim.device) # Markers frame_marker_cfg = FRAME_MARKER_CFG.copy() frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) ee_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_current")) goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal")) # Define goals for the arm ee_goals = [ [0.5, 0.5, 0.7, 0.707, 0, 0.707, 0], [0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0], [0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0], ] ee_goals = torch.tensor(ee_goals, device=sim.device) # Track the given command current_goal_idx = 0 # Create buffers to store actions ik_commands = torch.zeros(scene.num_envs, diff_ik_controller.action_dim, device=robot.device) ik_commands[:] = ee_goals[current_goal_idx] # Specify robot-specific parameters if args_cli.robot == "franka_panda": robot_entity_cfg = SceneEntityCfg("robot", joint_names=["panda_joint.*"], body_names=["panda_hand"]) elif args_cli.robot == "ur10": robot_entity_cfg = SceneEntityCfg("robot", joint_names=[".*"], body_names=["ee_link"]) else: raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10") # Resolving the scene entities robot_entity_cfg.resolve(scene) # Obtain the frame index of the end-effector # For a fixed base robot, the frame index is one less than the body index. This is because # the root body is not included in the returned Jacobians. if robot.is_fixed_base: ee_jacobi_idx = robot_entity_cfg.body_ids[0] - 1 else: ee_jacobi_idx = robot_entity_cfg.body_ids[0] # Define simulation stepping sim_dt = sim.get_physics_dt() count = 0 # Simulation loop while simulation_app.is_running(): # reset if count % 150 == 0: # reset time count = 0 # reset joint state joint_pos = robot.data.default_joint_pos.clone() joint_vel = robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.reset() # reset actions ik_commands[:] = ee_goals[current_goal_idx] joint_pos_des = joint_pos[:, robot_entity_cfg.joint_ids].clone() # reset controller diff_ik_controller.reset() diff_ik_controller.set_command(ik_commands) # change goal current_goal_idx = (current_goal_idx + 1) % len(ee_goals) else: # obtain quantities from simulation jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids] ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7] root_pose_w = robot.data.root_state_w[:, 0:7] joint_pos = robot.data.joint_pos[:, robot_entity_cfg.joint_ids] # compute frame in root frame ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) # compute the joint commands joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos) # apply actions robot.set_joint_position_target(joint_pos_des, joint_ids=robot_entity_cfg.joint_ids) scene.write_data_to_sim() # perform step sim.step() # update sim-time count += 1 # update buffers scene.update(sim_dt) # obtain quantities from simulation ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7] # update marker positions ee_marker.visualize(ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]) goal_marker.visualize(ik_commands[:, 0:3] + scene.env_origins, ik_commands[:, 3:7]) def main(): """Main function.""" # Load kit helper sim_cfg = sim_utils.SimulationCfg(dt=0.01) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Design scene scene_cfg = TableTopSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
8,021
Python
36.311628
109
0.657399
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_usd_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use the camera sensor from the Orbit framework. The camera sensor is created and interfaced through the Omniverse Replicator API. However, instead of using the simulator or OpenGL convention for the camera, we use the robotics or ROS convention. .. code-block:: bash # Usage with GUI ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py # Usage with headless ./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --headless --offscreen_render """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU device for camera output.") parser.add_argument( "--draw", action="store_true", default=False, help="Draw the pointcloud from camera at index specified by ``--camera_id``.", ) parser.add_argument( "--save", action="store_true", default=False, help="Save the data from camera at index specified by ``--camera_id``.", ) parser.add_argument( "--camera_id", type=int, choices={0, 1}, default=0, help=( "The camera ID to use for displaying points or saving the camera data. Default is 0." " The viewport will always initialize with the perspective of camera 0." ), ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import os import random import torch import omni.isaac.core.utils.prims as prim_utils import omni.replicator.core as rep import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import RAY_CASTER_MARKER_CFG from omni.isaac.orbit.sensors.camera import Camera, CameraCfg from omni.isaac.orbit.sensors.camera.utils import create_pointcloud_from_depth from omni.isaac.orbit.utils import convert_dict_to_backend def define_sensor() -> Camera: """Defines the camera sensor to add to the scene.""" # Setup camera sensor # In contrast to the ray-cast camera, we spawn the prim at these locations. # This means the camera sensor will be attached to these prims. prim_utils.create_prim("/World/Origin_00", "Xform") prim_utils.create_prim("/World/Origin_01", "Xform") camera_cfg = CameraCfg( prim_path="/World/Origin_.*/CameraSensor", update_period=0, height=480, width=640, data_types=[ "rgb", "distance_to_image_plane", "normals", "semantic_segmentation", "instance_segmentation_fast", "instance_id_segmentation_fast", ], colorize_semantic_segmentation=True, colorize_instance_id_segmentation=True, colorize_instance_segmentation=True, spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), ) # Create camera camera = Camera(cfg=camera_cfg) return camera def design_scene() -> dict: """Design the scene.""" # Populate scene # -- Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # -- Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create a dictionary for the scene entities scene_entities = {} # Xform to hold objects prim_utils.create_prim("/World/Objects", "Xform") # Random objects for i in range(8): # sample random position position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0]) position *= np.asarray([1.5, 1.5, 0.5]) # sample random color color = (random.random(), random.random(), random.random()) # choose random prim type prim_type = random.choice(["Cube", "Cone", "Cylinder"]) common_properties = { "rigid_props": sim_utils.RigidBodyPropertiesCfg(), "mass_props": sim_utils.MassPropertiesCfg(mass=5.0), "collision_props": sim_utils.CollisionPropertiesCfg(), "visual_material": sim_utils.PreviewSurfaceCfg(diffuse_color=color, metallic=0.5), "semantic_tags": [("class", prim_type)], } if prim_type == "Cube": shape_cfg = sim_utils.CuboidCfg(size=(0.25, 0.25, 0.25), **common_properties) elif prim_type == "Cone": shape_cfg = sim_utils.ConeCfg(radius=0.1, height=0.25, **common_properties) elif prim_type == "Cylinder": shape_cfg = sim_utils.CylinderCfg(radius=0.25, height=0.25, **common_properties) # Rigid Object obj_cfg = RigidObjectCfg( prim_path=f"/World/Objects/Obj_{i:02d}", spawn=shape_cfg, init_state=RigidObjectCfg.InitialStateCfg(pos=position), ) scene_entities[f"rigid_object{i}"] = RigidObject(cfg=obj_cfg) # Sensors camera = define_sensor() # return the scene information scene_entities["camera"] = camera return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # extract entities for simplified notation camera: Camera = scene_entities["camera"] # Create replicator writer output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera") rep_writer = rep.BasicWriter( output_dir=output_dir, frame_padding=0, colorize_instance_id_segmentation=camera.cfg.colorize_instance_id_segmentation, colorize_instance_segmentation=camera.cfg.colorize_instance_segmentation, colorize_semantic_segmentation=camera.cfg.colorize_semantic_segmentation, ) # Camera positions, targets, orientations camera_positions = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device) camera_targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device) # These orientations are in ROS-convention, and will position the cameras to view the origin camera_orientations = torch.tensor( # noqa: F841 [[-0.1759, 0.3399, 0.8205, -0.4247], [-0.4247, 0.8205, -0.3399, 0.1759]], device=sim.device ) # Set pose: There are two ways to set the pose of the camera. # -- Option-1: Set pose using view camera.set_world_poses_from_view(camera_positions, camera_targets) # -- Option-2: Set pose using ROS # camera.set_world_poses(camera_positions, camera_orientations, convention="ros") # Index of the camera to use for visualization and saving camera_index = args_cli.camera_id # Create the markers for the --draw option outside of is_running() loop if sim.has_gui() and args_cli.draw: cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/CameraPointCloud") cfg.markers["hit"].radius = 0.002 pc_markers = VisualizationMarkers(cfg) # Simulate physics while simulation_app.is_running(): # Step simulation sim.step() # Update camera data camera.update(dt=sim.get_physics_dt()) # Print camera info print(camera) if "rgb" in camera.data.output.keys(): print("Received shape of rgb image : ", camera.data.output["rgb"].shape) if "distance_to_image_plane" in camera.data.output.keys(): print("Received shape of depth image : ", camera.data.output["distance_to_image_plane"].shape) if "normals" in camera.data.output.keys(): print("Received shape of normals : ", camera.data.output["normals"].shape) if "semantic_segmentation" in camera.data.output.keys(): print("Received shape of semantic segm. : ", camera.data.output["semantic_segmentation"].shape) if "instance_segmentation_fast" in camera.data.output.keys(): print("Received shape of instance segm. : ", camera.data.output["instance_segmentation_fast"].shape) if "instance_id_segmentation_fast" in camera.data.output.keys(): print("Received shape of instance id segm.: ", camera.data.output["instance_id_segmentation_fast"].shape) print("-------------------------------") # Extract camera data if args_cli.save: # Save images from camera at camera_index # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy. # tensordict allows easy indexing of tensors in the dictionary single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy") # Extract the other information single_cam_info = camera.data.info[camera_index] # Pack data back into replicator format to save them using its writer rep_output = dict() for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()): if info is not None: rep_output[key] = {"data": data, "info": info} else: rep_output[key] = data # Save images # Note: We need to provide On-time data for Replicator to save the images. rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]} rep_writer.write(rep_output) # Draw pointcloud if there is a GUI and --draw has been passed if sim.has_gui() and args_cli.draw and "distance_to_image_plane" in camera.data.output.keys(): # Derive pointcloud from camera at camera_index pointcloud = create_pointcloud_from_depth( intrinsic_matrix=camera.data.intrinsic_matrices[camera_index], depth=camera.data.output[camera_index]["distance_to_image_plane"], position=camera.data.pos_w[camera_index], orientation=camera.data.quat_w_ros[camera_index], device=sim.device, ) # In the first few steps, things are still being instanced and Camera.data # can be empty. If we attempt to visualize an empty pointcloud it will crash # the sim, so we check that the pointcloud is not empty. if pointcloud.size()[0] > 0: pc_markers.visualize(translations=pointcloud) def main(): """Main function.""" # Load simulation context sim_cfg = sim_utils.SimulationCfg(device="cpu" if args_cli.cpu else "cuda") sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # design the scene scene_entities = design_scene() # Play simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run simulator run_simulator(sim, scene_entities) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
11,535
Python
38.642612
117
0.646034
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_frame_transformer.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates the FrameTransformer sensor by visualizing the frames that it creates. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/run_frame_transformer.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser( description="This script checks the FrameTransformer sensor by visualizing the frames that it creates." ) AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything follows.""" import math import torch import omni.isaac.debug_draw._debug_draw as omni_debug_draw import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.markers import VisualizationMarkers from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG from omni.isaac.orbit.sensors import FrameTransformer, FrameTransformerCfg, OffsetCfg from omni.isaac.orbit.sim import SimulationContext ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip def define_sensor() -> FrameTransformer: """Defines the FrameTransformer sensor to add to the scene.""" # define offset rot_offset = math_utils.quat_from_euler_xyz(torch.zeros(1), torch.zeros(1), torch.tensor(-math.pi / 2)) pos_offset = math_utils.quat_apply(rot_offset, torch.tensor([0.08795, 0.01305, -0.33797])) # Example using .* to get full body + LF_FOOT frame_transformer_cfg = FrameTransformerCfg( prim_path="/World/Robot/base", target_frames=[ FrameTransformerCfg.FrameCfg(prim_path="/World/Robot/.*"), FrameTransformerCfg.FrameCfg( prim_path="/World/Robot/LF_SHANK", name="LF_FOOT_USER", offset=OffsetCfg(pos=tuple(pos_offset.tolist()), rot=tuple(rot_offset[0].tolist())), ), ], debug_vis=False, ) frame_transformer = FrameTransformer(frame_transformer_cfg) return frame_transformer def design_scene() -> dict: """Design the scene.""" # Populate scene # -- Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # -- Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # -- Robot robot = Articulation(ANYMAL_C_CFG.replace(prim_path="/World/Robot")) # -- Sensors frame_transformer = define_sensor() # return the scene information scene_entities = {"robot": robot, "frame_transformer": frame_transformer} return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # extract entities for simplified notation robot: Articulation = scene_entities["robot"] frame_transformer: FrameTransformer = scene_entities["frame_transformer"] # We only want one visualization at a time. This visualizer will be used # to step through each frame so the user can verify that the correct frame # is being visualized as the frame names are printing to console if not args_cli.headless: cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameVisualizerFromScript") cfg.markers["frame"].scale = (0.1, 0.1, 0.1) transform_visualizer = VisualizationMarkers(cfg) # debug drawing for lines connecting the frame draw_interface = omni_debug_draw.acquire_debug_draw_interface() else: transform_visualizer = None draw_interface = None frame_index = 0 # Simulate physics while simulation_app.is_running(): # perform this loop at policy control freq (50 Hz) robot.set_joint_position_target(robot.data.default_joint_pos.clone()) robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # read data from sim robot.update(sim_dt) frame_transformer.update(dt=sim_dt) # Change the frame that we are visualizing to ensure that frame names # are correctly associated with the frames if not args_cli.headless: if count % 50 == 0: # get frame names frame_names = frame_transformer.data.target_frame_names print(f"Displaying Frame ID {frame_index}: {frame_names[frame_index]}") # increment frame index frame_index += 1 frame_index = frame_index % len(frame_names) # visualize frame source_pos = frame_transformer.data.source_pos_w source_quat = frame_transformer.data.source_quat_w target_pos = frame_transformer.data.target_pos_w[:, frame_index] target_quat = frame_transformer.data.target_quat_w[:, frame_index] # draw the frames transform_visualizer.visualize( torch.cat([source_pos, target_pos], dim=0), torch.cat([source_quat, target_quat], dim=0) ) # draw the line connecting the frames draw_interface.clear_lines() # plain color for lines lines_colors = [[1.0, 1.0, 0.0, 1.0]] * source_pos.shape[0] line_thicknesses = [5.0] * source_pos.shape[0] draw_interface.draw_lines(source_pos.tolist(), target_pos.tolist(), lines_colors, line_thicknesses) def main(): """Main function.""" # Load kit helper sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) # Set main camera sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) # Design the scene scene_entities = design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities) if __name__ == "__main__": # Run the main function main() # Close the simulator simulation_app.close()
6,473
Python
33.43617
111
0.657346
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_ray_caster_camera.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script shows how to use the ray-cast camera sensor from the Orbit framework. The camera sensor is based on using Warp kernels which do ray-casting against static meshes. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/run_ray_caster_camera.py """ """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to use the ray-cast camera sensor.") parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to generate.") parser.add_argument("--save", action="store_true", default=False, help="Save the obtained data to disk.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import os import torch import omni.isaac.core.utils.prims as prim_utils import omni.replicator.core as rep import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.sensors.ray_caster import RayCasterCamera, RayCasterCameraCfg, patterns from omni.isaac.orbit.utils import convert_dict_to_backend from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.math import project_points, unproject_depth def define_sensor() -> RayCasterCamera: """Defines the ray-cast camera sensor to add to the scene.""" # Camera base frames # In contras to the USD camera, we associate the sensor to the prims at these locations. # This means that parent prim of the sensor is the prim at this location. prim_utils.create_prim("/World/Origin_00/CameraSensor", "Xform") prim_utils.create_prim("/World/Origin_01/CameraSensor", "Xform") # Setup camera sensor camera_cfg = RayCasterCameraCfg( prim_path="/World/Origin_.*/CameraSensor", mesh_prim_paths=["/World/ground"], update_period=0.1, offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), data_types=["distance_to_image_plane", "normals", "distance_to_camera"], debug_vis=True, pattern_cfg=patterns.PinholeCameraPatternCfg( focal_length=24.0, horizontal_aperture=20.955, height=480, width=640, ), ) # Create camera camera = RayCasterCamera(cfg=camera_cfg) return camera def design_scene(): # Populate scene # -- Rough terrain cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd") cfg.func("/World/ground", cfg) # -- Lights cfg = sim_utils.DistantLightCfg(intensity=600.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # -- Sensors camera = define_sensor() # return the scene information scene_entities = {"camera": camera} return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # extract entities for simplified notation camera: RayCasterCamera = scene_entities["camera"] # Create replicator writer output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "ray_caster_camera") rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3) # Set pose: There are two ways to set the pose of the camera. # -- Option-1: Set pose using view eyes = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device) targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device) camera.set_world_poses_from_view(eyes, targets) # -- Option-2: Set pose using ROS # position = torch.tensor([[2.5, 2.5, 2.5]], device=sim.device) # orientation = torch.tensor([[-0.17591989, 0.33985114, 0.82047325, -0.42470819]], device=sim.device) # camera.set_world_poses(position, orientation, indices=[0], convention="ros") # Simulate physics while simulation_app.is_running(): # Step simulation sim.step() # Update camera data camera.update(dt=sim.get_physics_dt()) # Print camera info print(camera) print("Received shape of depth image: ", camera.data.output["distance_to_image_plane"].shape) print("-------------------------------") # Extract camera data if args_cli.save: # Extract camera data camera_index = 0 # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy. if sim.backend == "torch": # tensordict allows easy indexing of tensors in the dictionary single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy") else: # for numpy, we need to manually index the data single_cam_data = dict() for key, value in camera.data.output.items(): single_cam_data[key] = value[camera_index] # Extract the other information single_cam_info = camera.data.info[camera_index] # Pack data back into replicator format to save them using its writer rep_output = dict() for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()): if info is not None: rep_output[key] = {"data": data, "info": info} else: rep_output[key] = data # Save images rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]} rep_writer.write(rep_output) # Pointcloud in world frame points_3d_cam = unproject_depth( camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices ) # Check methods are valid im_height, im_width = camera.image_shape # -- project points to (u, v, d) reproj_points = project_points(points_3d_cam, camera.data.intrinsic_matrices) reproj_depths = reproj_points[..., -1].view(-1, im_width, im_height).transpose_(1, 2) sim_depths = camera.data.output["distance_to_image_plane"].squeeze(-1) torch.testing.assert_close(reproj_depths, sim_depths) def main(): """Main function.""" # Load kit helper sim = sim_utils.SimulationContext() # Set main camera sim.set_camera_view([2.5, 2.5, 3.5], [0.0, 0.0, 0.0]) # design the scene scene_entities = design_scene() # Play simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run simulator run_simulator(sim=sim, scene_entities=scene_entities) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,126
Python
36.708995
115
0.642015
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to add and simulate on-board sensors for a robot. We add the following sensors on the quadruped robot, ANYmal-C (ANYbotics): * USD-Camera: This is a camera sensor that is attached to the robot's base. * Height Scanner: This is a height scanner sensor that is attached to the robot's base. * Contact Sensor: This is a contact sensor that is attached to the robot's feet. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.orbit.sensors import CameraCfg, ContactSensorCfg, RayCasterCfg, patterns from omni.isaac.orbit.utils import configclass ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort: skip @configclass class SensorsSceneCfg(InteractiveSceneCfg): """Design the scene with sensors on the robot.""" # ground plane ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) # lights dome_light = AssetBaseCfg( prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) ) # robot robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") # sensors camera = CameraCfg( prim_path="{ENV_REGEX_NS}/Robot/base/front_cam", update_period=0.1, height=480, width=640, data_types=["rgb", "distance_to_image_plane"], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), ) height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", update_period=0.02, offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=True, mesh_prim_paths=["/World/defaultGroundPlane"], ) contact_forces = ContactSensorCfg( prim_path="{ENV_REGEX_NS}/Robot/.*_FOOT", update_period=0.0, history_length=6, debug_vis=True ) def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): """Run the simulator.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # Reset if count % 500 == 0: # reset counter count = 0 # reset the scene entities # root state # we offset the root state by the origin since the states are written in simulation world frame # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world root_state = scene["robot"].data.default_root_state.clone() root_state[:, :3] += scene.env_origins scene["robot"].write_root_state_to_sim(root_state) # set joint positions with some noise joint_pos, joint_vel = ( scene["robot"].data.default_joint_pos.clone(), scene["robot"].data.default_joint_vel.clone(), ) joint_pos += torch.rand_like(joint_pos) * 0.1 scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers scene.reset() print("[INFO]: Resetting robot state...") # Apply default actions to the robot # -- generate actions/commands targets = scene["robot"].data.default_joint_pos # -- apply action to the robot scene["robot"].set_joint_position_target(targets) # -- write data to sim scene.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers scene.update(sim_dt) # print information from the sensors print("-------------------------------") print(scene["camera"]) print("Received shape of rgb image: ", scene["camera"].data.output["rgb"].shape) print("Received shape of depth image: ", scene["camera"].data.output["distance_to_image_plane"].shape) print("-------------------------------") print(scene["height_scanner"]) print("Received max height value: ", torch.max(scene["height_scanner"].data.ray_hits_w[..., -1]).item()) print("-------------------------------") print(scene["contact_forces"]) print("Received max contact force of: ", torch.max(scene["contact_forces"].data.net_forces_w).item()) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.005, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) # design scene scene_cfg = SensorsSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) scene = InteractiveScene(scene_cfg) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,365
Python
33.978022
112
0.635192
NVIDIA-Omniverse/orbit/source/standalone/tutorials/04_sensors/run_ray_caster.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to use the ray-caster sensor. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/04_sensors/run_ray_caster.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Ray Caster Test Script") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg from omni.isaac.orbit.sensors.ray_caster import RayCaster, RayCasterCfg, patterns from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR from omni.isaac.orbit.utils.timer import Timer def define_sensor() -> RayCaster: """Defines the ray-caster sensor to add to the scene.""" # Create a ray-caster sensor ray_caster_cfg = RayCasterCfg( prim_path="/World/Origin.*/ball", mesh_prim_paths=["/World/ground"], pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(2.0, 2.0)), attach_yaw_only=True, debug_vis=not args_cli.headless, ) ray_caster = RayCaster(cfg=ray_caster_cfg) return ray_caster def design_scene() -> dict: """Design the scene.""" # Populate scene # -- Rough terrain cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd") cfg.func("/World/ground", cfg) # -- Light cfg = sim_utils.DistantLightCfg(intensity=2000) cfg.func("/World/light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a robot in it origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] for i, origin in enumerate(origins): prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) # -- Balls cfg = RigidObjectCfg( prim_path="/World/Origin.*/ball", spawn=sim_utils.SphereCfg( radius=0.25, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=0.5), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), ), ) balls = RigidObject(cfg) # -- Sensors ray_caster = define_sensor() # return the scene information scene_entities = {"balls": balls, "ray_caster": ray_caster} return scene_entities def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): """Run the simulator.""" # Extract scene_entities for simplified notation ray_caster: RayCaster = scene_entities["ray_caster"] balls: RigidObject = scene_entities["balls"] # define an initial position of the sensor ball_default_state = balls.data.default_root_state.clone() ball_default_state[:, :3] = torch.rand_like(ball_default_state[:, :3]) * 10 # Create a counter for resetting the scene step_count = 0 # Simulate physics while simulation_app.is_running(): # Reset the scene if step_count % 250 == 0: # reset the balls balls.write_root_state_to_sim(ball_default_state) # reset the sensor ray_caster.reset() # reset the counter step_count = 0 # Step simulation sim.step() # Update the ray-caster with Timer( f"Ray-caster update with {4} x {ray_caster.num_rays} rays with max height of" f" {torch.max(ray_caster.data.pos_w).item():.2f}" ): ray_caster.update(dt=sim.get_physics_dt(), force_recompute=True) # Update counter step_count += 1 def main(): """Main function.""" # Load simulation context sim_cfg = sim_utils.SimulationCfg() sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([0.0, 15.0, 15.0], [0.0, 0.0, -2.5]) # Design the scene scene_entities = design_scene() # Play simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run simulator run_simulator(sim=sim, scene_entities=scene_entities) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,764
Python
30.143791
101
0.649664
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/launch_app.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to run IsaacSim via the AppLauncher .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/launch_app.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on running IsaacSim via the AppLauncher.") parser.add_argument("--size", type=float, default=1.0, help="Side-length of cuboid") # SimulationApp arguments https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.kit/docs/index.html?highlight=simulationapp#omni.isaac.kit.SimulationApp parser.add_argument( "--width", type=int, default=1280, help="Width of the viewport and generated images. Defaults to 1280" ) parser.add_argument( "--height", type=int, default=720, help="Height of the viewport and generated images. Defaults to 720" ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.orbit.sim as sim_utils def design_scene(): """Designs the scene by spawning ground plane, light, objects and meshes from usd files.""" # Ground-plane cfg_ground = sim_utils.GroundPlaneCfg() cfg_ground.func("/World/defaultGroundPlane", cfg_ground) # spawn distant light cfg_light_distant = sim_utils.DistantLightCfg( intensity=3000.0, color=(0.75, 0.75, 0.75), ) cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) # spawn a cuboid cfg_cuboid = sim_utils.CuboidCfg( size=[args_cli.size] * 3, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)), ) # Spawn cuboid, altering translation on the z-axis to scale to its size cfg_cuboid.func("/World/Object", cfg_cuboid, translation=(0.0, 0.0, args_cli.size / 2)) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.01, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) # Design scene by adding assets to it design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Simulate physics while simulation_app.is_running(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,854
Python
28.432989
173
0.689909
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/create_empty.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to create a simple stage in Isaac Sim. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/create_empty.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on creating an empty stage.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" from omni.isaac.orbit.sim import SimulationCfg, SimulationContext def main(): """Main function.""" # Initialize the simulation context sim_cfg = SimulationCfg(dt=0.01, substeps=1) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Simulate physics while simulation_app.is_running(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
1,436
Python
22.177419
84
0.685933
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/spawn_prims.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates how to spawn prims into the scene. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/spawn_prims.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on spawning prims into the scene.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR def design_scene(): """Designs the scene by spawning ground plane, light, objects and meshes from usd files.""" # Ground-plane cfg_ground = sim_utils.GroundPlaneCfg() cfg_ground.func("/World/defaultGroundPlane", cfg_ground) # spawn distant light cfg_light_distant = sim_utils.DistantLightCfg( intensity=3000.0, color=(0.75, 0.75, 0.75), ) cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) # create a new xform prim for all objects to be spawned under prim_utils.create_prim("/World/Objects", "Xform") # spawn a red cone cfg_cone = sim_utils.ConeCfg( radius=0.15, height=0.5, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ) cfg_cone.func("/World/Objects/Cone1", cfg_cone, translation=(-1.0, 1.0, 1.0)) cfg_cone.func("/World/Objects/Cone2", cfg_cone, translation=(-1.0, -1.0, 1.0)) # spawn a green cone with colliders and rigid body cfg_cone_rigid = sim_utils.ConeCfg( radius=0.15, height=0.5, rigid_props=sim_utils.RigidBodyPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), collision_props=sim_utils.CollisionPropertiesCfg(), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ) cfg_cone_rigid.func( "/World/Objects/ConeRigid", cfg_cone_rigid, translation=(0.0, 0.0, 2.0), orientation=(0.5, 0.0, 0.5, 0.0) ) # spawn a usd file of a table into the scene cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func("/World/Objects/Table", cfg, translation=(0.0, 0.0, 1.05)) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg(dt=0.01, substeps=1) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) # Design scene by adding assets to it design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Simulate physics while simulation_app.is_running(): # perform step sim.step() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
3,338
Python
29.354545
115
0.670761
NVIDIA-Omniverse/orbit/source/standalone/tutorials/00_sim/log_time.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to generate log outputs while the simulation plays. It accompanies the tutorial on docker usage. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/tutorials/00_sim/log_time.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse import os from omni.isaac.orbit.app import AppLauncher # create argparser parser = argparse.ArgumentParser(description="Tutorial on creating logs from within the docker container.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" from omni.isaac.orbit.sim import SimulationCfg, SimulationContext def main(): """Main function.""" # Specify that the logs must be in logs/docker_tutorial log_dir_path = os.path.join("logs", "docker_tutorial") # In the container, the absolute path will be # /workspace/orbit/logs/docker_tutorial, because # all python execution is done through /workspace/orbit/orbit.sh # and the calling process' path will be /workspace/orbit log_dir_path = os.path.abspath(log_dir_path) if not os.path.isdir(log_dir_path): os.mkdir(log_dir_path) print(f"[INFO] Logging experiment to directory: {log_dir_path}") # Initialize the simulation context sim_cfg = SimulationCfg(dt=0.01, substeps=1) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Prepare to count sim_time sim_dt = sim.get_physics_dt() sim_time = 0.0 # Open logging file with open(os.path.join(log_dir_path, "log.txt"), "w") as log_file: # Simulate physics while simulation_app.is_running(): log_file.write(f"{sim_time}" + "\n") # perform step sim.step() sim_time += sim_dt if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,342
Python
27.228915
107
0.673356
NVIDIA-Omniverse/orbit/source/standalone/demos/markers.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script demonstrates different types of markers. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/markers.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different types of markers.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.markers import VisualizationMarkers, VisualizationMarkersCfg from omni.isaac.orbit.sim import SimulationContext from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR, ISAAC_ORBIT_NUCLEUS_DIR from omni.isaac.orbit.utils.math import quat_from_angle_axis def define_markers() -> VisualizationMarkers: """Define markers with various different shapes.""" marker_cfg = VisualizationMarkersCfg( prim_path="/Visuals/myMarkers", markers={ "frame": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd", scale=(0.5, 0.5, 0.5), ), "arrow_x": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/arrow_x.usd", scale=(1.0, 0.5, 0.5), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 1.0)), ), "cube": sim_utils.CuboidCfg( size=(1.0, 1.0, 1.0), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), "sphere": sim_utils.SphereCfg( radius=0.5, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ), "cylinder": sim_utils.CylinderCfg( radius=0.5, height=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), ), "cone": sim_utils.ConeCfg( radius=0.5, height=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 0.0)), ), "mesh": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(10.0, 10.0, 10.0), ), "mesh_recolored": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(10.0, 10.0, 10.0), visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.25, 0.0)), ), "robot_mesh": sim_utils.UsdFileCfg( usd_path=f"{ISAAC_ORBIT_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", scale=(2.0, 2.0, 2.0), visual_material=sim_utils.GlassMdlCfg(glass_color=(0.0, 0.1, 0.0)), ), }, ) return VisualizationMarkers(marker_cfg) def main(): """Main function.""" # Load kit helper sim = SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view([0.0, 18.0, 12.0], [0.0, 3.0, 0.0]) # Spawn things into stage # Lights cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # create markers my_visualizer = define_markers() # define a grid of positions where the markers should be placed num_markers_per_type = 5 grid_spacing = 2.0 # Calculate the half-width and half-height half_width = (num_markers_per_type - 1) / 2.0 half_height = (my_visualizer.num_prototypes - 1) / 2.0 # Create the x and y ranges centered around the origin x_range = torch.arange(-half_width * grid_spacing, (half_width + 1) * grid_spacing, grid_spacing) y_range = torch.arange(-half_height * grid_spacing, (half_height + 1) * grid_spacing, grid_spacing) # Create the grid x_grid, y_grid = torch.meshgrid(x_range, y_range, indexing="ij") x_grid = x_grid.reshape(-1) y_grid = y_grid.reshape(-1) z_grid = torch.zeros_like(x_grid) # marker locations marker_locations = torch.stack([x_grid, y_grid, z_grid], dim=1) marker_indices = torch.arange(my_visualizer.num_prototypes).repeat(num_markers_per_type) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Yaw angle yaw = torch.zeros_like(marker_locations[:, 0]) # Simulate physics while simulation_app.is_running(): # rotate the markers around the z-axis for visualization marker_orientations = quat_from_angle_axis(yaw, torch.tensor([0.0, 0.0, 1.0])) # visualize my_visualizer.visualize(marker_locations, marker_orientations, marker_indices=marker_indices) # roll corresponding indices to show how marker prototype can be changed if yaw[0].item() % (0.5 * torch.pi) < 0.01: marker_indices = torch.roll(marker_indices, 1) # perform step sim.step() # increment yaw yaw += 0.01 if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,641
Python
34.936306
103
0.617089
NVIDIA-Omniverse/orbit/source/standalone/demos/hands.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates different dexterous hands. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/hands.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different dexterous hands.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation ## # Pre-defined configs ## from omni.isaac.orbit_assets.allegro import ALLEGRO_HAND_CFG # isort:skip from omni.isaac.orbit_assets.shadow_hand import SHADOW_HAND_CFG # isort:skip def define_origins(num_origins: int, spacing: float) -> list[list[float]]: """Defines the origins of the the scene.""" # create tensor based on number of environments env_origins = torch.zeros(num_origins, 3) # create a grid of origins num_cols = np.floor(np.sqrt(num_origins)) num_rows = np.ceil(num_origins / num_cols) xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy") env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2 env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2 env_origins[:, 2] = 0.0 # return the origins return env_origins.tolist() def design_scene() -> tuple[dict, list[list[float]]]: """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a mount and a robot on top of it origins = define_origins(num_origins=2, spacing=0.5) # Origin 1 with Allegro Hand prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) # -- Robot allegro = Articulation(ALLEGRO_HAND_CFG.replace(prim_path="/World/Origin1/Robot")) # Origin 2 with Shadow Hand prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) # -- Robot shadow_hand = Articulation(SHADOW_HAND_CFG.replace(prim_path="/World/Origin2/Robot")) # return the scene information scene_entities = { "allegro": allegro, "shadow_hand": shadow_hand, } return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): """Runs the simulation loop.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Start with hand open grasp_mode = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 1000 == 0: # reset counters sim_time = 0.0 count = 0 # reset robots for index, robot in enumerate(entities.values()): # root state root_state = robot.data.default_root_state.clone() root_state[:, :3] += origins[index] robot.write_root_state_to_sim(root_state) # joint state joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset the internal state robot.reset() print("[INFO]: Resetting robots state...") # toggle grasp mode if count % 100 == 0: grasp_mode = 1 - grasp_mode # apply default actions to the hands robots for robot in entities.values(): # generate joint positions joint_pos_target = robot.data.soft_joint_pos_limits[..., grasp_mode] # apply action to the robot robot.set_joint_position_target(joint_pos_target) # write data to sim robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers for robot in entities.values(): robot.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view(eye=[0.0, -0.5, 1.5], target=[0.0, -0.2, 0.5]) # design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main execution main() # close sim app simulation_app.close()
5,446
Python
31.041176
113
0.637716
NVIDIA-Omniverse/orbit/source/standalone/demos/arms.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates different single-arm manipulators. .. code-block:: bash # Usage ./orbit.sh -p source/standalone/demos/arms.py """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates different single-arm manipulators.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import numpy as np import torch import omni.isaac.core.utils.prims as prim_utils import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR ## # Pre-defined configs ## # isort: off from omni.isaac.orbit_assets import ( FRANKA_PANDA_CFG, UR10_CFG, KINOVA_JACO2_N7S300_CFG, KINOVA_JACO2_N6S300_CFG, KINOVA_GEN3_N7_CFG, SAWYER_CFG, ) # isort: on def define_origins(num_origins: int, spacing: float) -> list[list[float]]: """Defines the origins of the the scene.""" # create tensor based on number of environments env_origins = torch.zeros(num_origins, 3) # create a grid of origins num_rows = np.floor(np.sqrt(num_origins)) num_cols = np.ceil(num_origins / num_rows) xx, yy = torch.meshgrid(torch.arange(num_rows), torch.arange(num_cols), indexing="xy") env_origins[:, 0] = spacing * xx.flatten()[:num_origins] - spacing * (num_rows - 1) / 2 env_origins[:, 1] = spacing * yy.flatten()[:num_origins] - spacing * (num_cols - 1) / 2 env_origins[:, 2] = 0.0 # return the origins return env_origins.tolist() def design_scene() -> tuple[dict, list[list[float]]]: """Designs the scene.""" # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Create separate groups called "Origin1", "Origin2", "Origin3" # Each group will have a mount and a robot on top of it origins = define_origins(num_origins=6, spacing=2.0) # Origin 1 with Franka Panda prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func("/World/Origin1/Table", cfg, translation=(0.55, 0.0, 1.05)) # -- Robot franka_arm_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Origin1/Robot") franka_arm_cfg.init_state.pos = (0.0, 0.0, 1.05) franka_panda = Articulation(cfg=franka_arm_cfg) # Origin 2 with UR10 prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) # -- Table cfg = sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) ) cfg.func("/World/Origin2/Table", cfg, translation=(0.0, 0.0, 1.03)) # -- Robot ur10_cfg = UR10_CFG.replace(prim_path="/World/Origin2/Robot") ur10_cfg.init_state.pos = (0.0, 0.0, 1.03) ur10 = Articulation(cfg=ur10_cfg) # Origin 3 with Kinova JACO2 (7-Dof) arm prim_utils.create_prim("/World/Origin3", "Xform", translation=origins[2]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/ThorlabsTable/table_instanceable.usd") cfg.func("/World/Origin3/Table", cfg, translation=(0.0, 0.0, 0.8)) # -- Robot kinova_arm_cfg = KINOVA_JACO2_N7S300_CFG.replace(prim_path="/World/Origin3/Robot") kinova_arm_cfg.init_state.pos = (0.0, 0.0, 0.8) kinova_j2n7s300 = Articulation(cfg=kinova_arm_cfg) # Origin 4 with Kinova JACO2 (6-Dof) arm prim_utils.create_prim("/World/Origin4", "Xform", translation=origins[3]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/ThorlabsTable/table_instanceable.usd") cfg.func("/World/Origin4/Table", cfg, translation=(0.0, 0.0, 0.8)) # -- Robot kinova_arm_cfg = KINOVA_JACO2_N6S300_CFG.replace(prim_path="/World/Origin4/Robot") kinova_arm_cfg.init_state.pos = (0.0, 0.0, 0.8) kinova_j2n6s300 = Articulation(cfg=kinova_arm_cfg) # Origin 5 with Sawyer prim_utils.create_prim("/World/Origin5", "Xform", translation=origins[4]) # -- Table cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func("/World/Origin5/Table", cfg, translation=(0.55, 0.0, 1.05)) # -- Robot kinova_arm_cfg = KINOVA_GEN3_N7_CFG.replace(prim_path="/World/Origin5/Robot") kinova_arm_cfg.init_state.pos = (0.0, 0.0, 1.05) kinova_gen3n7 = Articulation(cfg=kinova_arm_cfg) # Origin 6 with Kinova Gen3 (7-Dof) arm prim_utils.create_prim("/World/Origin6", "Xform", translation=origins[5]) # -- Table cfg = sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) ) cfg.func("/World/Origin6/Table", cfg, translation=(0.0, 0.0, 1.03)) # -- Robot sawyer_arm_cfg = SAWYER_CFG.replace(prim_path="/World/Origin6/Robot") sawyer_arm_cfg.init_state.pos = (0.0, 0.0, 1.03) sawyer = Articulation(cfg=sawyer_arm_cfg) # return the scene information scene_entities = { "franka_panda": franka_panda, "ur10": ur10, "kinova_j2n7s300": kinova_j2n7s300, "kinova_j2n6s300": kinova_j2n6s300, "kinova_gen3n7": kinova_gen3n7, "sawyer": sawyer, } return scene_entities, origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): """Runs the simulation loop.""" # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 200 == 0: # reset counters sim_time = 0.0 count = 0 # reset the scene entities for index, robot in enumerate(entities.values()): # root state root_state = robot.data.default_root_state.clone() root_state[:, :3] += origins[index] robot.write_root_state_to_sim(root_state) # set joint positions joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() robot.write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers robot.reset() print("[INFO]: Resetting robots state...") # apply random actions to the robots for robot in entities.values(): # generate random joint positions joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1 joint_pos_target = joint_pos_target.clamp_( robot.data.soft_joint_pos_limits[..., 0], robot.data.soft_joint_pos_limits[..., 1] ) # apply action to the robot robot.set_joint_position_target(joint_pos_target) # write data to sim robot.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers for robot in entities.values(): robot.update(sim_dt) def main(): """Main function.""" # Initialize the simulation context sim_cfg = sim_utils.SimulationCfg() sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([3.5, 0.0, 3.2], [0.0, 0.0, 0.5]) # design scene scene_entities, scene_origins = design_scene() scene_origins = torch.tensor(scene_origins, device=sim.device) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
8,481
Python
34.940678
115
0.643438
NVIDIA-Omniverse/orbit/source/standalone/demos/bipeds.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates how to simulate a bipedal robot. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates how to simulate a bipedal robot.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.sim import SimulationContext ## # Pre-defined configs ## from omni.isaac.orbit_assets.cassie import CASSIE_CFG # isort:skip def main(): """Main function.""" # Load kit helper sim = SimulationContext( sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False, dt=0.005, physx=sim_utils.PhysxCfg(use_gpu=False)) ) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) # Spawn things into stage # Ground-plane cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) # Lights cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Robots robot_cfg = CASSIE_CFG robot_cfg.spawn.func("/World/Cassie/Robot_1", robot_cfg.spawn, translation=(1.5, 0.5, 0.42)) # create handles for the robots robots = Articulation(robot_cfg.replace(prim_path="/World/Cassie/Robot.*")) # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 count = 0 # Simulate physics while simulation_app.is_running(): # reset if count % 200 == 0: # reset counters sim_time = 0.0 count = 0 # reset dof state joint_pos, joint_vel = robots.data.default_joint_pos, robots.data.default_joint_vel robots.write_joint_state_to_sim(joint_pos, joint_vel) robots.write_root_pose_to_sim(robots.data.default_root_state[:, :7]) robots.write_root_velocity_to_sim(robots.data.default_root_state[:, 7:]) robots.reset() # reset command print(">>>>>>>> Reset!") # apply action to the robot robots.set_joint_position_target(robots.data.default_joint_pos.clone()) robots.write_data_to_sim() # perform step sim.step() # update sim-time sim_time += sim_dt count += 1 # update buffers robots.update(sim_dt) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
3,056
Python
26.790909
120
0.643652
NVIDIA-Omniverse/orbit/source/standalone/demos/procedural_terrain.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ This script demonstrates procedural terrains with flat patches. Example usage: .. code-block:: bash # Generate terrain with height color scheme ./orbit.sh -p source/standalone/demos/procedural_terrain.py --color_scheme height # Generate terrain with random color scheme ./orbit.sh -p source/standalone/demos/procedural_terrain.py --color_scheme random # Generate terrain with no color scheme ./orbit.sh -p source/standalone/demos/procedural_terrain.py --color_scheme none # Generate terrain with curriculum ./orbit.sh -p source/standalone/demos/procedural_terrain.py --use_curriculum # Generate terrain with curriculum along with flat patches ./orbit.sh -p source/standalone/demos/procedural_terrain.py --use_curriculum --show_flat_patches """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="This script demonstrates procedural terrain generation.") parser.add_argument( "--color_scheme", type=str, default="none", choices=["height", "random", "none"], help="Color scheme to use for the terrain generation.", ) parser.add_argument( "--use_curriculum", action="store_true", default=False, help="Whether to use the curriculum for the terrain generation.", ) parser.add_argument( "--show_flat_patches", action="store_true", default=False, help="Whether to show the flat patches computed during the terrain generation.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import random import torch import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import AssetBase from omni.isaac.orbit.markers import VisualizationMarkers, VisualizationMarkersCfg from omni.isaac.orbit.terrains import FlatPatchSamplingCfg, TerrainImporter, TerrainImporterCfg ## # Pre-defined configs ## from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort:skip def design_scene() -> tuple[dict, torch.Tensor]: """Designs the scene.""" # Lights cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) cfg.func("/World/Light", cfg) # Parse terrain generation terrain_gen_cfg = ROUGH_TERRAINS_CFG.replace(curriculum=args_cli.use_curriculum, color_scheme=args_cli.color_scheme) # Add flat patch configuration # Note: To have separate colors for each sub-terrain type, we set the flat patch sampling configuration name # to the sub-terrain name. However, this is not how it should be used in practice. The key name should be # the intention of the flat patch. For instance, "source" or "target" for spawn and command related flat patches. if args_cli.show_flat_patches: for sub_terrain_name, sub_terrain_cfg in terrain_gen_cfg.sub_terrains.items(): sub_terrain_cfg.flat_patch_sampling = { sub_terrain_name: FlatPatchSamplingCfg(num_patches=10, patch_radius=0.5, max_height_diff=0.05) } # Handler for terrains importing terrain_importer_cfg = TerrainImporterCfg( num_envs=2048, env_spacing=3.0, prim_path="/World/ground", max_init_terrain_level=None, terrain_type="generator", terrain_generator=terrain_gen_cfg, debug_vis=True, ) # Remove visual material for height and random color schemes to use the default material if args_cli.color_scheme in ["height", "random"]: terrain_importer_cfg.visual_material = None # Create terrain importer terrain_importer = TerrainImporter(terrain_importer_cfg) # Show the flat patches computed if args_cli.show_flat_patches: # Configure the flat patches vis_cfg = VisualizationMarkersCfg(prim_path="/Visuals/TerrainFlatPatches", markers={}) for name in terrain_importer.flat_patches: vis_cfg.markers[name] = sim_utils.CylinderCfg( radius=0.5, # note: manually set to the patch radius for visualization height=0.1, visual_material=sim_utils.GlassMdlCfg(glass_color=(random.random(), random.random(), random.random())), ) flat_patches_visualizer = VisualizationMarkers(vis_cfg) # Visualize the flat patches all_patch_locations = [] all_patch_indices = [] for i, patch_locations in enumerate(terrain_importer.flat_patches.values()): num_patch_locations = patch_locations.view(-1, 3).shape[0] # store the patch locations and indices all_patch_locations.append(patch_locations.view(-1, 3)) all_patch_indices += [i] * num_patch_locations # combine the patch locations and indices flat_patches_visualizer.visualize(torch.cat(all_patch_locations), marker_indices=all_patch_indices) # return the scene information scene_entities = {"terrain": terrain_importer} return scene_entities, terrain_importer.env_origins def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, AssetBase], origins: torch.Tensor): """Runs the simulation loop.""" # Simulate physics while simulation_app.is_running(): # perform step sim.step() def main(): """Main function.""" # Initialize the simulation context sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, substeps=1)) # Set main camera sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) # design scene scene_entities, scene_origins = design_scene() # Play the simulator sim.reset() # Now we are ready! print("[INFO]: Setup complete...") # Run the simulator run_simulator(sim, scene_entities, scene_origins) if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
6,249
Python
34.112359
120
0.692591
NVIDIA-Omniverse/orbit/source/standalone/environments/random_agent.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to an environment with random action agent.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Random agent for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def main(): """Random actions agent with Orbit environment.""" # create environment configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) # create environment env = gym.make(args_cli.task, cfg=env_cfg) # print info (this is vectorized environment) print(f"[INFO]: Gym observation space: {env.observation_space}") print(f"[INFO]: Gym action space: {env.action_space}") # reset environment env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # sample actions from -1 to 1 actions = 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1 # apply actions env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,276
Python
29.36
115
0.695079
NVIDIA-Omniverse/orbit/source/standalone/environments/list_envs.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to print all the available environments in ORBIT. The script iterates over all registered environments and stores the details in a table. It prints the name of the environment, the entry point and the config file. All the environments are registered in the `omni.isaac.orbit_tasks` extension. They start with `Isaac` in their name. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym from prettytable import PrettyTable import omni.isaac.orbit_tasks # noqa: F401 def main(): """Print all environments registered in `omni.isaac.orbit_tasks` extension.""" # print all the available environments table = PrettyTable(["S. No.", "Task Name", "Entry Point", "Config"]) table.title = "Available Environments in ORBIT" # set alignment of table columns table.align["Task Name"] = "l" table.align["Entry Point"] = "l" table.align["Config"] = "l" # count of environments index = 0 # acquire all Isaac environments names for task_spec in gym.registry.values(): if "Isaac" in task_spec.id: # add details to table table.add_row([index + 1, task_spec.id, task_spec.entry_point, task_spec.kwargs["env_cfg_entry_point"]]) # increment count index += 1 print(table) if __name__ == "__main__": try: # run the main function main() except Exception as e: raise e finally: # close the app simulation_app.close()
1,827
Python
25.882353
116
0.67214
NVIDIA-Omniverse/orbit/source/standalone/environments/teleoperation/teleop_se3_agent.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to run a keyboard teleoperation with Orbit manipulation environments.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Keyboard teleoperation for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--device", type=str, default="keyboard", help="Device for interacting with environment") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--sensitivity", type=float, default=1.0, help="Sensitivity factor.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import carb from omni.isaac.orbit.devices import Se3Gamepad, Se3Keyboard, Se3SpaceMouse import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def pre_process_actions(delta_pose: torch.Tensor, gripper_command: bool) -> torch.Tensor: """Pre-process actions for the environment.""" # compute actions based on environment if "Reach" in args_cli.task: # note: reach is the only one that uses a different action space # compute actions return delta_pose else: # resolve gripper command gripper_vel = torch.zeros(delta_pose.shape[0], 1, device=delta_pose.device) gripper_vel[:] = -1.0 if gripper_command else 1.0 # compute actions return torch.concat([delta_pose, gripper_vel], dim=1) def main(): """Running keyboard teleoperation with Orbit manipulation environment.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) # modify configuration env_cfg.terminations.time_out = None # create environment env = gym.make(args_cli.task, cfg=env_cfg) # check environment name (for reach , we don't allow the gripper) if "Reach" in args_cli.task: carb.log_warn( f"The environment '{args_cli.task}' does not support gripper control. The device command will be ignored." ) # create controller if args_cli.device.lower() == "keyboard": teleop_interface = Se3Keyboard( pos_sensitivity=0.005 * args_cli.sensitivity, rot_sensitivity=0.005 * args_cli.sensitivity ) elif args_cli.device.lower() == "spacemouse": teleop_interface = Se3SpaceMouse( pos_sensitivity=0.05 * args_cli.sensitivity, rot_sensitivity=0.005 * args_cli.sensitivity ) elif args_cli.device.lower() == "gamepad": teleop_interface = Se3Gamepad( pos_sensitivity=0.1 * args_cli.sensitivity, rot_sensitivity=0.1 * args_cli.sensitivity ) else: raise ValueError(f"Invalid device interface '{args_cli.device}'. Supported: 'keyboard', 'spacemouse'.") # add teleoperation key for env reset teleop_interface.add_callback("L", env.reset) # print helper for keyboard print(teleop_interface) # reset environment env.reset() teleop_interface.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # get keyboard command delta_pose, gripper_command = teleop_interface.advance() delta_pose = delta_pose.astype("float32") # convert to torch delta_pose = torch.tensor(delta_pose, device=env.unwrapped.device).repeat(env.unwrapped.num_envs, 1) # pre-process actions actions = pre_process_actions(delta_pose, gripper_command) # apply actions env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,590
Python
34.589147
118
0.682135
NVIDIA-Omniverse/orbit/source/standalone/environments/state_machine/lift_cube_sm.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to run an environment with a pick and lift state machine. The state machine is implemented in the kernel function `infer_state_machine`. It uses the `warp` library to run the state machine in parallel on the GPU. .. code-block:: bash ./orbit.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32 """ """Launch Omniverse Toolkit first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Pick and lift state machine for lift environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything else.""" import gymnasium as gym import torch from collections.abc import Sequence import warp as wp from omni.isaac.orbit.assets.rigid_object.rigid_object_data import RigidObjectData import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.manipulation.lift.lift_env_cfg import LiftEnvCfg from omni.isaac.orbit_tasks.utils.parse_cfg import parse_env_cfg # initialize warp wp.init() class GripperState: """States for the gripper.""" OPEN = wp.constant(1.0) CLOSE = wp.constant(-1.0) class PickSmState: """States for the pick state machine.""" REST = wp.constant(0) APPROACH_ABOVE_OBJECT = wp.constant(1) APPROACH_OBJECT = wp.constant(2) GRASP_OBJECT = wp.constant(3) LIFT_OBJECT = wp.constant(4) class PickSmWaitTime: """Additional wait times (in s) for states for before switching.""" REST = wp.constant(0.2) APPROACH_ABOVE_OBJECT = wp.constant(0.5) APPROACH_OBJECT = wp.constant(0.6) GRASP_OBJECT = wp.constant(0.3) LIFT_OBJECT = wp.constant(1.0) @wp.kernel def infer_state_machine( dt: wp.array(dtype=float), sm_state: wp.array(dtype=int), sm_wait_time: wp.array(dtype=float), ee_pose: wp.array(dtype=wp.transform), object_pose: wp.array(dtype=wp.transform), des_object_pose: wp.array(dtype=wp.transform), des_ee_pose: wp.array(dtype=wp.transform), gripper_state: wp.array(dtype=float), offset: wp.array(dtype=wp.transform), ): # retrieve thread id tid = wp.tid() # retrieve state machine state state = sm_state[tid] # decide next state if state == PickSmState.REST: des_ee_pose[tid] = ee_pose[tid] gripper_state[tid] = GripperState.OPEN # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.REST: # move to next state and reset wait time sm_state[tid] = PickSmState.APPROACH_ABOVE_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.APPROACH_ABOVE_OBJECT: des_ee_pose[tid] = wp.transform_multiply(offset[tid], object_pose[tid]) gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.APPROACH_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.APPROACH_OBJECT: des_ee_pose[tid] = object_pose[tid] gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.APPROACH_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.GRASP_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.GRASP_OBJECT: des_ee_pose[tid] = object_pose[tid] gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.GRASP_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.LIFT_OBJECT sm_wait_time[tid] = 0.0 elif state == PickSmState.LIFT_OBJECT: des_ee_pose[tid] = des_object_pose[tid] gripper_state[tid] = GripperState.CLOSE # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= PickSmWaitTime.LIFT_OBJECT: # move to next state and reset wait time sm_state[tid] = PickSmState.LIFT_OBJECT sm_wait_time[tid] = 0.0 # increment wait time sm_wait_time[tid] = sm_wait_time[tid] + dt[tid] class PickAndLiftSm: """A simple state machine in a robot's task space to pick and lift an object. The state machine is implemented as a warp kernel. It takes in the current state of the robot's end-effector and the object, and outputs the desired state of the robot's end-effector and the gripper. The state machine is implemented as a finite state machine with the following states: 1. REST: The robot is at rest. 2. APPROACH_ABOVE_OBJECT: The robot moves above the object. 3. APPROACH_OBJECT: The robot moves to the object. 4. GRASP_OBJECT: The robot grasps the object. 5. LIFT_OBJECT: The robot lifts the object to the desired pose. This is the final state. """ def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu"): """Initialize the state machine. Args: dt: The environment time step. num_envs: The number of environments to simulate. device: The device to run the state machine on. """ # save parameters self.dt = float(dt) self.num_envs = num_envs self.device = device # initialize state machine self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device) self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device) self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device) # desired state self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device) self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device) # approach above object offset self.offset = torch.zeros((self.num_envs, 7), device=self.device) self.offset[:, 2] = 0.1 self.offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # convert to warp self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32) self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32) self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32) self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform) self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32) self.offset_wp = wp.from_torch(self.offset, wp.transform) def reset_idx(self, env_ids: Sequence[int] = None): """Reset the state machine.""" if env_ids is None: env_ids = slice(None) self.sm_state[env_ids] = 0 self.sm_wait_time[env_ids] = 0.0 def compute(self, ee_pose: torch.Tensor, object_pose: torch.Tensor, des_object_pose: torch.Tensor): """Compute the desired state of the robot's end-effector and the gripper.""" # convert all transformations from (w, x, y, z) to (x, y, z, w) ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]] object_pose = object_pose[:, [0, 1, 2, 4, 5, 6, 3]] des_object_pose = des_object_pose[:, [0, 1, 2, 4, 5, 6, 3]] # convert to warp ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform) object_pose_wp = wp.from_torch(object_pose.contiguous(), wp.transform) des_object_pose_wp = wp.from_torch(des_object_pose.contiguous(), wp.transform) # run state machine wp.launch( kernel=infer_state_machine, dim=self.num_envs, inputs=[ self.sm_dt_wp, self.sm_state_wp, self.sm_wait_time_wp, ee_pose_wp, object_pose_wp, des_object_pose_wp, self.des_ee_pose_wp, self.des_gripper_state_wp, self.offset_wp, ], device=self.device, ) # convert transformations back to (w, x, y, z) des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]] # convert to torch return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1) def main(): # parse configuration env_cfg: LiftEnvCfg = parse_env_cfg( "Isaac-Lift-Cube-Franka-IK-Abs-v0", use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, ) # create environment env = gym.make("Isaac-Lift-Cube-Franka-IK-Abs-v0", cfg=env_cfg) # reset environment at start env.reset() # create action buffers (position + quaternion) actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device) actions[:, 3] = 1.0 # desired object orientation (we only do position control of object) desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device) desired_orientation[:, 1] = 1.0 # create state machine pick_sm = PickAndLiftSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device) while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # step environment dones = env.step(actions)[-2] # observations # -- end-effector frame ee_frame_sensor = env.unwrapped.scene["ee_frame"] tcp_rest_position = ee_frame_sensor.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins tcp_rest_orientation = ee_frame_sensor.data.target_quat_w[..., 0, :].clone() # -- object frame object_data: RigidObjectData = env.unwrapped.scene["object"].data object_position = object_data.root_pos_w - env.unwrapped.scene.env_origins # -- target object frame desired_position = env.unwrapped.command_manager.get_command("object_pose")[..., :3] # advance state machine actions = pick_sm.compute( torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1), torch.cat([object_position, desired_orientation], dim=-1), torch.cat([desired_position, desired_orientation], dim=-1), ) # reset state machine if dones.any(): pick_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1)) # close the environment env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
11,404
Python
37.016667
118
0.632673
NVIDIA-Omniverse/orbit/source/standalone/environments/state_machine/open_cabinet_sm.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to run an environment with a cabinet opening state machine. The state machine is implemented in the kernel function `infer_state_machine`. It uses the `warp` library to run the state machine in parallel on the GPU. .. code-block:: bash ./orbit.sh -p source/standalone/environments/state_machine/lift_cube_sm.py --num_envs 32 """ """Launch Omniverse Toolkit first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Pick and lift state machine for cabinet environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(headless=args_cli.headless) simulation_app = app_launcher.app """Rest everything else.""" import gymnasium as gym import torch import traceback from collections.abc import Sequence import carb import warp as wp from omni.isaac.orbit.sensors import FrameTransformer import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.manipulation.cabinet.cabinet_env_cfg import CabinetEnvCfg from omni.isaac.orbit_tasks.utils.parse_cfg import parse_env_cfg # initialize warp wp.init() class GripperState: """States for the gripper.""" OPEN = wp.constant(1.0) CLOSE = wp.constant(-1.0) class OpenDrawerSmState: """States for the cabinet drawer opening state machine.""" REST = wp.constant(0) APPROACH_INFRONT_HANDLE = wp.constant(1) APPROACH_HANDLE = wp.constant(2) GRASP_HANDLE = wp.constant(3) OPEN_DRAWER = wp.constant(4) RELEASE_HANDLE = wp.constant(5) class OpenDrawerSmWaitTime: """Additional wait times (in s) for states for before switching.""" REST = wp.constant(0.5) APPROACH_INFRONT_HANDLE = wp.constant(1.25) APPROACH_HANDLE = wp.constant(1.0) GRASP_HANDLE = wp.constant(1.0) OPEN_DRAWER = wp.constant(3.0) RELEASE_HANDLE = wp.constant(0.2) @wp.kernel def infer_state_machine( dt: wp.array(dtype=float), sm_state: wp.array(dtype=int), sm_wait_time: wp.array(dtype=float), ee_pose: wp.array(dtype=wp.transform), handle_pose: wp.array(dtype=wp.transform), des_ee_pose: wp.array(dtype=wp.transform), gripper_state: wp.array(dtype=float), handle_approach_offset: wp.array(dtype=wp.transform), handle_grasp_offset: wp.array(dtype=wp.transform), drawer_opening_rate: wp.array(dtype=wp.transform), ): # retrieve thread id tid = wp.tid() # retrieve state machine state state = sm_state[tid] # decide next state if state == OpenDrawerSmState.REST: des_ee_pose[tid] = ee_pose[tid] gripper_state[tid] = GripperState.OPEN # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.REST: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.APPROACH_INFRONT_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.APPROACH_INFRONT_HANDLE: des_ee_pose[tid] = wp.transform_multiply(handle_approach_offset[tid], handle_pose[tid]) gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_INFRONT_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.APPROACH_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.APPROACH_HANDLE: des_ee_pose[tid] = handle_pose[tid] gripper_state[tid] = GripperState.OPEN # TODO: error between current and desired ee pose below threshold # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.APPROACH_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.GRASP_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.GRASP_HANDLE: des_ee_pose[tid] = wp.transform_multiply(handle_grasp_offset[tid], handle_pose[tid]) gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.GRASP_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.OPEN_DRAWER sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.OPEN_DRAWER: des_ee_pose[tid] = wp.transform_multiply(drawer_opening_rate[tid], handle_pose[tid]) gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.OPEN_DRAWER: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE sm_wait_time[tid] = 0.0 elif state == OpenDrawerSmState.RELEASE_HANDLE: des_ee_pose[tid] = ee_pose[tid] gripper_state[tid] = GripperState.CLOSE # wait for a while if sm_wait_time[tid] >= OpenDrawerSmWaitTime.RELEASE_HANDLE: # move to next state and reset wait time sm_state[tid] = OpenDrawerSmState.RELEASE_HANDLE sm_wait_time[tid] = 0.0 # increment wait time sm_wait_time[tid] = sm_wait_time[tid] + dt[tid] class OpenDrawerSm: """A simple state machine in a robot's task space to open a drawer in the cabinet. The state machine is implemented as a warp kernel. It takes in the current state of the robot's end-effector and the object, and outputs the desired state of the robot's end-effector and the gripper. The state machine is implemented as a finite state machine with the following states: 1. REST: The robot is at rest. 2. APPROACH_HANDLE: The robot moves towards the handle of the drawer. 3. GRASP_HANDLE: The robot grasps the handle of the drawer. 4. OPEN_DRAWER: The robot opens the drawer. 5. RELEASE_HANDLE: The robot releases the handle of the drawer. This is the final state. """ def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu"): """Initialize the state machine. Args: dt: The environment time step. num_envs: The number of environments to simulate. device: The device to run the state machine on. """ # save parameters self.dt = float(dt) self.num_envs = num_envs self.device = device # initialize state machine self.sm_dt = torch.full((self.num_envs,), self.dt, device=self.device) self.sm_state = torch.full((self.num_envs,), 0, dtype=torch.int32, device=self.device) self.sm_wait_time = torch.zeros((self.num_envs,), device=self.device) # desired state self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device) self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device) # approach infront of the handle self.handle_approach_offset = torch.zeros((self.num_envs, 7), device=self.device) self.handle_approach_offset[:, 0] = -0.1 self.handle_approach_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # handle grasp offset self.handle_grasp_offset = torch.zeros((self.num_envs, 7), device=self.device) self.handle_grasp_offset[:, 0] = 0.025 self.handle_grasp_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # drawer opening rate self.drawer_opening_rate = torch.zeros((self.num_envs, 7), device=self.device) self.drawer_opening_rate[:, 0] = -0.015 self.drawer_opening_rate[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) # convert to warp self.sm_dt_wp = wp.from_torch(self.sm_dt, wp.float32) self.sm_state_wp = wp.from_torch(self.sm_state, wp.int32) self.sm_wait_time_wp = wp.from_torch(self.sm_wait_time, wp.float32) self.des_ee_pose_wp = wp.from_torch(self.des_ee_pose, wp.transform) self.des_gripper_state_wp = wp.from_torch(self.des_gripper_state, wp.float32) self.handle_approach_offset_wp = wp.from_torch(self.handle_approach_offset, wp.transform) self.handle_grasp_offset_wp = wp.from_torch(self.handle_grasp_offset, wp.transform) self.drawer_opening_rate_wp = wp.from_torch(self.drawer_opening_rate, wp.transform) def reset_idx(self, env_ids: Sequence[int] | None = None): """Reset the state machine.""" if env_ids is None: env_ids = slice(None) # reset state machine self.sm_state[env_ids] = 0 self.sm_wait_time[env_ids] = 0.0 def compute(self, ee_pose: torch.Tensor, handle_pose: torch.Tensor): """Compute the desired state of the robot's end-effector and the gripper.""" # convert all transformations from (w, x, y, z) to (x, y, z, w) ee_pose = ee_pose[:, [0, 1, 2, 4, 5, 6, 3]] handle_pose = handle_pose[:, [0, 1, 2, 4, 5, 6, 3]] # convert to warp ee_pose_wp = wp.from_torch(ee_pose.contiguous(), wp.transform) handle_pose_wp = wp.from_torch(handle_pose.contiguous(), wp.transform) # run state machine wp.launch( kernel=infer_state_machine, dim=self.num_envs, inputs=[ self.sm_dt_wp, self.sm_state_wp, self.sm_wait_time_wp, ee_pose_wp, handle_pose_wp, self.des_ee_pose_wp, self.des_gripper_state_wp, self.handle_approach_offset_wp, self.handle_grasp_offset_wp, self.drawer_opening_rate_wp, ], device=self.device, ) # convert transformations back to (w, x, y, z) des_ee_pose = self.des_ee_pose[:, [0, 1, 2, 6, 3, 4, 5]] # convert to torch return torch.cat([des_ee_pose, self.des_gripper_state.unsqueeze(-1)], dim=-1) def main(): # parse configuration env_cfg: CabinetEnvCfg = parse_env_cfg( "Isaac-Open-Drawer-Franka-IK-Abs-v0", use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, ) # create environment env = gym.make("Isaac-Open-Drawer-Franka-IK-Abs-v0", cfg=env_cfg) # reset environment at start env.reset() # create action buffers (position + quaternion) actions = torch.zeros(env.unwrapped.action_space.shape, device=env.unwrapped.device) actions[:, 3] = 1.0 # desired object orientation (we only do position control of object) desired_orientation = torch.zeros((env.unwrapped.num_envs, 4), device=env.unwrapped.device) desired_orientation[:, 1] = 1.0 # create state machine open_sm = OpenDrawerSm(env_cfg.sim.dt * env_cfg.decimation, env.unwrapped.num_envs, env.unwrapped.device) while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # step environment dones = env.step(actions)[-2] # observations # -- end-effector frame ee_frame_tf: FrameTransformer = env.unwrapped.scene["ee_frame"] tcp_rest_position = ee_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins tcp_rest_orientation = ee_frame_tf.data.target_quat_w[..., 0, :].clone() # -- handle frame cabinet_frame_tf: FrameTransformer = env.unwrapped.scene["cabinet_frame"] cabinet_position = cabinet_frame_tf.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins cabinet_orientation = cabinet_frame_tf.data.target_quat_w[..., 0, :].clone() # advance state machine actions = open_sm.compute( torch.cat([tcp_rest_position, tcp_rest_orientation], dim=-1), torch.cat([cabinet_position, cabinet_orientation], dim=-1), ) # reset state machine if dones.any(): open_sm.reset_idx(dones.nonzero(as_tuple=False).squeeze(-1)) # close the environment env.close() if __name__ == "__main__": try: # run the main execution main() except Exception as err: carb.log_error(err) carb.log_error(traceback.format_exc()) raise finally: # close sim app simulation_app.close()
12,935
Python
38.559633
118
0.639351
NVIDIA-Omniverse/orbit/source/standalone/workflows/skrl/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to play a checkpoint of an RL agent from skrl. Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in a more user-friendly way. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from skrl.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os import torch from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.utils.model_instantiators.torch import deterministic_model, gaussian_model, shared_model import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.skrl import SkrlVecEnvWrapper, process_skrl_cfg def main(): """Play with skrl agent.""" # parse env configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) experiment_cfg = load_cfg_from_registry(args_cli.task, "skrl_cfg_entry_point") # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for skrl env = SkrlVecEnvWrapper(env) # same as: `wrap_env(env, wrapper="isaac-orbit")` # instantiate models using skrl model instantiator utility # https://skrl.readthedocs.io/en/latest/modules/skrl.utils.model_instantiators.html models = {} # non-shared models if experiment_cfg["models"]["separate"]: models["policy"] = gaussian_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["policy"]), ) models["value"] = deterministic_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["value"]), ) # shared models else: models["policy"] = shared_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, structure=None, roles=["policy", "value"], parameters=[ process_skrl_cfg(experiment_cfg["models"]["policy"]), process_skrl_cfg(experiment_cfg["models"]["value"]), ], ) models["value"] = models["policy"] # configure and instantiate PPO agent # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent_cfg = PPO_DEFAULT_CONFIG.copy() experiment_cfg["agent"]["rewards_shaper"] = None # avoid 'dictionary changed size during iteration' agent_cfg.update(process_skrl_cfg(experiment_cfg["agent"])) agent_cfg["state_preprocessor_kwargs"].update({"size": env.observation_space, "device": env.device}) agent_cfg["value_preprocessor_kwargs"].update({"size": 1, "device": env.device}) agent_cfg["experiment"]["write_interval"] = 0 # don't log to Tensorboard agent_cfg["experiment"]["checkpoint_interval"] = 0 # don't generate checkpoints agent = PPO( models=models, memory=None, # memory is optional during evaluation cfg=agent_cfg, observation_space=env.observation_space, action_space=env.action_space, device=env.device, ) # specify directory for logging experiments (load checkpoint) log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") # get checkpoint path if args_cli.checkpoint: resume_path = os.path.abspath(args_cli.checkpoint) else: resume_path = get_checkpoint_path(log_root_path, other_dirs=["checkpoints"]) print(f"[INFO] Loading model checkpoint from: {resume_path}") # initialize agent agent.init() agent.load(resume_path) # set agent to evaluation mode agent.set_running_mode("eval") # reset environment obs, _ = env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions = agent.act(obs, timestep=0, timesteps=0)[0] # env stepping obs, _, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,657
Python
35.269231
115
0.668022
NVIDIA-Omniverse/orbit/source/standalone/workflows/skrl/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Script to train RL agent with skrl. Visit the skrl documentation (https://skrl.readthedocs.io) to see the examples structured in a more user-friendly way. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with skrl.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os from datetime import datetime from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.memories.torch import RandomMemory from skrl.utils import set_seed from skrl.utils.model_instantiators.torch import deterministic_model, gaussian_model, shared_model from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.skrl import SkrlSequentialLogTrainer, SkrlVecEnvWrapper, process_skrl_cfg def main(): """Train with skrl agent.""" # read the seed from command line args_cli_seed = args_cli.seed # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) experiment_cfg = load_cfg_from_registry(args_cli.task, "skrl_cfg_entry_point") # specify directory for logging experiments log_root_path = os.path.join("logs", "skrl", experiment_cfg["agent"]["experiment"]["directory"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs: {time-stamp}_{run_name} log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if experiment_cfg["agent"]["experiment"]["experiment_name"]: log_dir += f'_{experiment_cfg["agent"]["experiment"]["experiment_name"]}' # set directory into agent config experiment_cfg["agent"]["experiment"]["directory"] = log_root_path experiment_cfg["agent"]["experiment"]["experiment_name"] = log_dir # update log_dir log_dir = os.path.join(log_root_path, log_dir) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), experiment_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), experiment_cfg) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for skrl env = SkrlVecEnvWrapper(env) # same as: `wrap_env(env, wrapper="isaac-orbit")` # set seed for the experiment (override from command line) set_seed(args_cli_seed if args_cli_seed is not None else experiment_cfg["seed"]) # instantiate models using skrl model instantiator utility # https://skrl.readthedocs.io/en/latest/modules/skrl.utils.model_instantiators.html models = {} # non-shared models if experiment_cfg["models"]["separate"]: models["policy"] = gaussian_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["policy"]), ) models["value"] = deterministic_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, **process_skrl_cfg(experiment_cfg["models"]["value"]), ) # shared models else: models["policy"] = shared_model( observation_space=env.observation_space, action_space=env.action_space, device=env.device, structure=None, roles=["policy", "value"], parameters=[ process_skrl_cfg(experiment_cfg["models"]["policy"]), process_skrl_cfg(experiment_cfg["models"]["value"]), ], ) models["value"] = models["policy"] # instantiate a RandomMemory as rollout buffer (any memory can be used for this) # https://skrl.readthedocs.io/en/latest/modules/skrl.memories.random.html memory_size = experiment_cfg["agent"]["rollouts"] # memory_size is the agent's number of rollouts memory = RandomMemory(memory_size=memory_size, num_envs=env.num_envs, device=env.device) # configure and instantiate PPO agent # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html agent_cfg = PPO_DEFAULT_CONFIG.copy() experiment_cfg["agent"]["rewards_shaper"] = None # avoid 'dictionary changed size during iteration' agent_cfg.update(process_skrl_cfg(experiment_cfg["agent"])) agent_cfg["state_preprocessor_kwargs"].update({"size": env.observation_space, "device": env.device}) agent_cfg["value_preprocessor_kwargs"].update({"size": 1, "device": env.device}) agent = PPO( models=models, memory=memory, cfg=agent_cfg, observation_space=env.observation_space, action_space=env.action_space, device=env.device, ) # configure and instantiate a custom RL trainer for logging episode events # https://skrl.readthedocs.io/en/latest/modules/skrl.trainers.base_class.html trainer_cfg = experiment_cfg["trainer"] trainer = SkrlSequentialLogTrainer(cfg=trainer_cfg, env=env, agents=agent) # train the agent trainer.train() # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,471
Python
39.608695
117
0.680498
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to run a trained policy from robomimic.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play policy trained using robomimic for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Pytorch model checkpoint to load.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import torch import robomimic # noqa: F401 import robomimic.utils.file_utils as FileUtils import robomimic.utils.torch_utils as TorchUtils import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import parse_env_cfg def main(): """Run a trained policy from robomimic with Orbit environment.""" # parse configuration env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=1, use_fabric=not args_cli.disable_fabric) # we want to have the terms in the observations returned as a dictionary # rather than a concatenated tensor env_cfg.observations.policy.concatenate_terms = False # create environment env = gym.make(args_cli.task, cfg=env_cfg) # acquire device device = TorchUtils.get_torch_device(try_to_use_cuda=True) # restore policy policy, _ = FileUtils.policy_from_checkpoint(ckpt_path=args_cli.checkpoint, device=device, verbose=True) # reset environment obs_dict, _ = env.reset() # robomimic only cares about policy observations obs = obs_dict["policy"] # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # compute actions actions = policy(obs) actions = torch.from_numpy(actions).to(device=device).view(1, env.action_space.shape[1]) # apply actions obs_dict = env.step(actions)[0] # robomimic only cares about policy observations obs = obs_dict["policy"] # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
2,845
Python
31.340909
120
0.702988
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/collect_demonstrations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to collect demonstrations with Orbit environments.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Collect demonstrations for Orbit environments.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--device", type=str, default="keyboard", help="Device for interacting with environment") parser.add_argument("--num_demos", type=int, default=1, help="Number of episodes to store in the dataset.") parser.add_argument("--filename", type=str, default="hdf_dataset", help="Basename of output file.") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch the simulator app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import contextlib import gymnasium as gym import os import torch from omni.isaac.orbit.devices import Se3Keyboard, Se3SpaceMouse from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.manipulation.lift import mdp from omni.isaac.orbit_tasks.utils.data_collector import RobomimicDataCollector from omni.isaac.orbit_tasks.utils.parse_cfg import parse_env_cfg def pre_process_actions(delta_pose: torch.Tensor, gripper_command: bool) -> torch.Tensor: """Pre-process actions for the environment.""" # compute actions based on environment if "Reach" in args_cli.task: # note: reach is the only one that uses a different action space # compute actions return delta_pose else: # resolve gripper command gripper_vel = torch.zeros((delta_pose.shape[0], 1), dtype=torch.float, device=delta_pose.device) gripper_vel[:] = -1 if gripper_command else 1 # compute actions return torch.concat([delta_pose, gripper_vel], dim=1) def main(): """Collect demonstrations from the environment using teleop interfaces.""" assert ( args_cli.task == "Isaac-Lift-Cube-Franka-IK-Rel-v0" ), "Only 'Isaac-Lift-Cube-Franka-IK-Rel-v0' is supported currently." # parse configuration env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs) # modify configuration such that the environment runs indefinitely # until goal is reached env_cfg.terminations.time_out = None # set the resampling time range to large number to avoid resampling env_cfg.commands.object_pose.resampling_time_range = (1.0e9, 1.0e9) # we want to have the terms in the observations returned as a dictionary # rather than a concatenated tensor env_cfg.observations.policy.concatenate_terms = False # add termination condition for reaching the goal otherwise the environment won't reset env_cfg.terminations.object_reached_goal = DoneTerm(func=mdp.object_reached_goal) # create environment env = gym.make(args_cli.task, cfg=env_cfg) # create controller if args_cli.device.lower() == "keyboard": teleop_interface = Se3Keyboard(pos_sensitivity=0.04, rot_sensitivity=0.08) elif args_cli.device.lower() == "spacemouse": teleop_interface = Se3SpaceMouse(pos_sensitivity=0.05, rot_sensitivity=0.005) else: raise ValueError(f"Invalid device interface '{args_cli.device}'. Supported: 'keyboard', 'spacemouse'.") # add teleoperation key for env reset teleop_interface.add_callback("L", env.reset) # print helper print(teleop_interface) # specify directory for logging experiments log_dir = os.path.join("./logs/robomimic", args_cli.task) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) # create data-collector collector_interface = RobomimicDataCollector( env_name=args_cli.task, directory_path=log_dir, filename=args_cli.filename, num_demos=args_cli.num_demos, flush_freq=env.num_envs, env_config={"device": args_cli.device}, ) # reset environment obs_dict, _ = env.reset() # reset interfaces teleop_interface.reset() collector_interface.reset() # simulate environment -- run everything in inference mode with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode(): while not collector_interface.is_stopped(): # get keyboard command delta_pose, gripper_command = teleop_interface.advance() # convert to torch delta_pose = torch.tensor(delta_pose, dtype=torch.float, device=env.device).repeat(env.num_envs, 1) # compute actions based on environment actions = pre_process_actions(delta_pose, gripper_command) # TODO: Deal with the case when reset is triggered by teleoperation device. # The observations need to be recollected. # store signals before stepping # -- obs for key, value in obs_dict["policy"].items(): collector_interface.add(f"obs/{key}", value) # -- actions collector_interface.add("actions", actions) # perform action on environment obs_dict, rewards, terminated, truncated, info = env.step(actions) dones = terminated | truncated # check that simulation is stopped or not if env.unwrapped.sim.is_stopped(): break # robomimic only cares about policy observations # store signals from the environment # -- next_obs for key, value in obs_dict["policy"].items(): collector_interface.add(f"next_obs/{key}", value) # -- rewards collector_interface.add("rewards", rewards) # -- dones collector_interface.add("dones", dones) # -- is success label collector_interface.add("success", env.termination_manager.get_term("object_reached_goal")) # flush data from collector for successful environments reset_env_ids = dones.nonzero(as_tuple=False).squeeze(-1) collector_interface.flush(reset_env_ids) # check if enough data is collected if collector_interface.is_stopped(): break # close the simulator collector_interface.close() env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
7,116
Python
38.320442
111
0.676504
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # MIT License # # Copyright (c) 2021 Stanford Vision and Learning Lab # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ The main entry point for training policies from pre-collected data. Args: algo: name of the algorithm to run. task: name of the environment. name: if provided, override the experiment name defined in the config dataset: if provided, override the dataset path defined in the config This file has been modified from the original version in the following ways: * Added import of AppLauncher from omni.isaac.orbit.app to resolve the configuration to load for training. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" from omni.isaac.orbit.app import AppLauncher # launch omniverse app app_launcher = AppLauncher(headless=True) simulation_app = app_launcher.app """Rest everything follows.""" import argparse import gymnasium as gym import json import numpy as np import os import sys import time import torch import traceback from collections import OrderedDict from torch.utils.data import DataLoader import psutil import robomimic.utils.env_utils as EnvUtils import robomimic.utils.file_utils as FileUtils import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.torch_utils as TorchUtils import robomimic.utils.train_utils as TrainUtils from robomimic.algo import RolloutPolicy, algo_factory from robomimic.config import config_factory from robomimic.utils.log_utils import DataLogger, PrintLogger # Needed so that environment is registered import omni.isaac.orbit_tasks # noqa: F401 def train(config, device): """Train a model using the algorithm.""" # first set seeds np.random.seed(config.train.seed) torch.manual_seed(config.train.seed) print("\n============= New Training Run with Config =============") print(config) print("") log_dir, ckpt_dir, video_dir = TrainUtils.get_exp_dir(config) print(f">>> Saving logs into directory: {log_dir}") print(f">>> Saving checkpoints into directory: {ckpt_dir}") print(f">>> Saving videos into directory: {video_dir}") if config.experiment.logging.terminal_output_to_txt: # log stdout and stderr to a text file logger = PrintLogger(os.path.join(log_dir, "log.txt")) sys.stdout = logger sys.stderr = logger # read config to set up metadata for observation modalities (e.g. detecting rgb observations) ObsUtils.initialize_obs_utils_with_config(config) # make sure the dataset exists dataset_path = os.path.expanduser(config.train.data) if not os.path.exists(dataset_path): raise FileNotFoundError(f"Dataset at provided path {dataset_path} not found!") # load basic metadata from training file print("\n============= Loaded Environment Metadata =============") env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=config.train.data) shape_meta = FileUtils.get_shape_metadata_from_dataset( dataset_path=config.train.data, all_obs_keys=config.all_obs_keys, verbose=True ) if config.experiment.env is not None: env_meta["env_name"] = config.experiment.env print("=" * 30 + "\n" + "Replacing Env to {}\n".format(env_meta["env_name"]) + "=" * 30) # create environment envs = OrderedDict() if config.experiment.rollout.enabled: # create environments for validation runs env_names = [env_meta["env_name"]] if config.experiment.additional_envs is not None: for name in config.experiment.additional_envs: env_names.append(name) for env_name in env_names: env = EnvUtils.create_env_from_metadata( env_meta=env_meta, env_name=env_name, render=False, render_offscreen=config.experiment.render_video, use_image_obs=shape_meta["use_images"], ) envs[env.name] = env print(envs[env.name]) print("") # setup for a new training run data_logger = DataLogger(log_dir, config=config, log_tb=config.experiment.logging.log_tb) model = algo_factory( algo_name=config.algo_name, config=config, obs_key_shapes=shape_meta["all_shapes"], ac_dim=shape_meta["ac_dim"], device=device, ) # save the config as a json file with open(os.path.join(log_dir, "..", "config.json"), "w") as outfile: json.dump(config, outfile, indent=4) print("\n============= Model Summary =============") print(model) # print model summary print("") # load training data trainset, validset = TrainUtils.load_data_for_training(config, obs_keys=shape_meta["all_obs_keys"]) train_sampler = trainset.get_dataset_sampler() print("\n============= Training Dataset =============") print(trainset) print("") # maybe retrieve statistics for normalizing observations obs_normalization_stats = None if config.train.hdf5_normalize_obs: obs_normalization_stats = trainset.get_obs_normalization_stats() # initialize data loaders train_loader = DataLoader( dataset=trainset, sampler=train_sampler, batch_size=config.train.batch_size, shuffle=(train_sampler is None), num_workers=config.train.num_data_workers, drop_last=True, ) if config.experiment.validate: # cap num workers for validation dataset at 1 num_workers = min(config.train.num_data_workers, 1) valid_sampler = validset.get_dataset_sampler() valid_loader = DataLoader( dataset=validset, sampler=valid_sampler, batch_size=config.train.batch_size, shuffle=(valid_sampler is None), num_workers=num_workers, drop_last=True, ) else: valid_loader = None # main training loop best_valid_loss = None best_return = {k: -np.inf for k in envs} if config.experiment.rollout.enabled else None best_success_rate = {k: -1.0 for k in envs} if config.experiment.rollout.enabled else None last_ckpt_time = time.time() # number of learning steps per epoch (defaults to a full dataset pass) train_num_steps = config.experiment.epoch_every_n_steps valid_num_steps = config.experiment.validation_epoch_every_n_steps for epoch in range(1, config.train.num_epochs + 1): # epoch numbers start at 1 step_log = TrainUtils.run_epoch(model=model, data_loader=train_loader, epoch=epoch, num_steps=train_num_steps) model.on_epoch_end(epoch) # setup checkpoint path epoch_ckpt_name = f"model_epoch_{epoch}" # check for recurring checkpoint saving conditions should_save_ckpt = False if config.experiment.save.enabled: time_check = (config.experiment.save.every_n_seconds is not None) and ( time.time() - last_ckpt_time > config.experiment.save.every_n_seconds ) epoch_check = ( (config.experiment.save.every_n_epochs is not None) and (epoch > 0) and (epoch % config.experiment.save.every_n_epochs == 0) ) epoch_list_check = epoch in config.experiment.save.epochs should_save_ckpt = time_check or epoch_check or epoch_list_check ckpt_reason = None if should_save_ckpt: last_ckpt_time = time.time() ckpt_reason = "time" print(f"Train Epoch {epoch}") print(json.dumps(step_log, sort_keys=True, indent=4)) for k, v in step_log.items(): if k.startswith("Time_"): data_logger.record(f"Timing_Stats/Train_{k[5:]}", v, epoch) else: data_logger.record(f"Train/{k}", v, epoch) # Evaluate the model on validation set if config.experiment.validate: with torch.no_grad(): step_log = TrainUtils.run_epoch( model=model, data_loader=valid_loader, epoch=epoch, validate=True, num_steps=valid_num_steps ) for k, v in step_log.items(): if k.startswith("Time_"): data_logger.record(f"Timing_Stats/Valid_{k[5:]}", v, epoch) else: data_logger.record(f"Valid/{k}", v, epoch) print(f"Validation Epoch {epoch}") print(json.dumps(step_log, sort_keys=True, indent=4)) # save checkpoint if achieve new best validation loss valid_check = "Loss" in step_log if valid_check and (best_valid_loss is None or (step_log["Loss"] <= best_valid_loss)): best_valid_loss = step_log["Loss"] if config.experiment.save.enabled and config.experiment.save.on_best_validation: epoch_ckpt_name += f"_best_validation_{best_valid_loss}" should_save_ckpt = True ckpt_reason = "valid" if ckpt_reason is None else ckpt_reason # Evaluate the model by by running rollouts # do rollouts at fixed rate or if it's time to save a new ckpt video_paths = None rollout_check = (epoch % config.experiment.rollout.rate == 0) or (should_save_ckpt and ckpt_reason == "time") if config.experiment.rollout.enabled and (epoch > config.experiment.rollout.warmstart) and rollout_check: # wrap model as a RolloutPolicy to prepare for rollouts rollout_model = RolloutPolicy(model, obs_normalization_stats=obs_normalization_stats) num_episodes = config.experiment.rollout.n all_rollout_logs, video_paths = TrainUtils.rollout_with_stats( policy=rollout_model, envs=envs, horizon=config.experiment.rollout.horizon, use_goals=config.use_goals, num_episodes=num_episodes, render=False, video_dir=video_dir if config.experiment.render_video else None, epoch=epoch, video_skip=config.experiment.get("video_skip", 5), terminate_on_success=config.experiment.rollout.terminate_on_success, ) # summarize results from rollouts to tensorboard and terminal for env_name in all_rollout_logs: rollout_logs = all_rollout_logs[env_name] for k, v in rollout_logs.items(): if k.startswith("Time_"): data_logger.record(f"Timing_Stats/Rollout_{env_name}_{k[5:]}", v, epoch) else: data_logger.record(f"Rollout/{k}/{env_name}", v, epoch, log_stats=True) print("\nEpoch {} Rollouts took {}s (avg) with results:".format(epoch, rollout_logs["time"])) print(f"Env: {env_name}") print(json.dumps(rollout_logs, sort_keys=True, indent=4)) # checkpoint and video saving logic updated_stats = TrainUtils.should_save_from_rollout_logs( all_rollout_logs=all_rollout_logs, best_return=best_return, best_success_rate=best_success_rate, epoch_ckpt_name=epoch_ckpt_name, save_on_best_rollout_return=config.experiment.save.on_best_rollout_return, save_on_best_rollout_success_rate=config.experiment.save.on_best_rollout_success_rate, ) best_return = updated_stats["best_return"] best_success_rate = updated_stats["best_success_rate"] epoch_ckpt_name = updated_stats["epoch_ckpt_name"] should_save_ckpt = ( config.experiment.save.enabled and updated_stats["should_save_ckpt"] ) or should_save_ckpt if updated_stats["ckpt_reason"] is not None: ckpt_reason = updated_stats["ckpt_reason"] # Only keep saved videos if the ckpt should be saved (but not because of validation score) should_save_video = (should_save_ckpt and (ckpt_reason != "valid")) or config.experiment.keep_all_videos if video_paths is not None and not should_save_video: for env_name in video_paths: os.remove(video_paths[env_name]) # Save model checkpoints based on conditions (success rate, validation loss, etc) if should_save_ckpt: TrainUtils.save_model( model=model, config=config, env_meta=env_meta, shape_meta=shape_meta, ckpt_path=os.path.join(ckpt_dir, epoch_ckpt_name + ".pth"), obs_normalization_stats=obs_normalization_stats, ) # Finally, log memory usage in MB process = psutil.Process(os.getpid()) mem_usage = int(process.memory_info().rss / 1000000) data_logger.record("System/RAM Usage (MB)", mem_usage, epoch) print(f"\nEpoch {epoch} Memory Usage: {mem_usage} MB\n") # terminate logging data_logger.close() def main(args): """Train a model on a task using a specified algorithm.""" # load config if args.task is not None: # obtain the configuration entry point cfg_entry_point_key = f"robomimic_{args.algo}_cfg_entry_point" print(f"Loading configuration for task: {args.task}") cfg_entry_point_file = gym.spec(args.task).kwargs.pop(cfg_entry_point_key) # check if entry point exists if cfg_entry_point_file is None: raise ValueError( f"Could not find configuration for the environment: '{args.task}'." f" Please check that the gym registry has the entry point: '{cfg_entry_point_key}'." ) # load config from json file with open(cfg_entry_point_file) as f: ext_cfg = json.load(f) config = config_factory(ext_cfg["algo_name"]) # update config with external json - this will throw errors if # the external config has keys not present in the base algo config with config.values_unlocked(): config.update(ext_cfg) else: raise ValueError("Please provide a task name through CLI arguments.") if args.dataset is not None: config.train.data = args.dataset if args.name is not None: config.experiment.name = args.name # change location of experiment directory config.train.output_dir = os.path.abspath(os.path.join("./logs/robomimic", args.task)) # get torch device device = TorchUtils.get_torch_device(try_to_use_cuda=config.train.cuda) config.lock() # catch error during training and print it res_str = "finished run successfully!" try: train(config, device=device) except Exception as e: res_str = f"run failed with error:\n{e}\n\n{traceback.format_exc()}" print(res_str) if __name__ == "__main__": parser = argparse.ArgumentParser() # Experiment Name (for tensorboard, saving models, etc.) parser.add_argument( "--name", type=str, default=None, help="(optional) if provided, override the experiment name defined in the config", ) # Dataset path, to override the one in the config parser.add_argument( "--dataset", type=str, default=None, help="(optional) if provided, override the dataset path defined in the config", ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--algo", type=str, default=None, help="Name of the algorithm.") args = parser.parse_args() # run training main(args) # close sim app simulation_app.close()
16,901
Python
38.957447
118
0.633809
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/tools/episode_merging.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Tool to merge multiple episodes with single trajectory into one episode with multiple trajectories.""" from __future__ import annotations import argparse import h5py import json import os if __name__ == "__main__": # parse arguments parser = argparse.ArgumentParser(description="Merge multiple episodes with single trajectory into one episode.") parser.add_argument( "--dir", type=str, default=None, help="Path to directory that contains all single episode hdf5 files" ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--out", type=str, default="merged_dataset.hdf5", help="output hdf5 file") args_cli = parser.parse_args() # read arguments parent_dir = args_cli.dir merged_dataset_name = args_cli.out task_name = args_cli.task # check valid task name if task_name is None: raise ValueError("Please specify a valid task name.") # get hdf5 entries from specified directory entries = [i for i in os.listdir(parent_dir) if i.endswith(".hdf5")] # create new hdf5 file for merging episodes fp = h5py.File(parent_dir + merged_dataset_name, "a") # initiate data group f_grp = fp.create_group("data") f_grp.attrs["num_samples"] = 0 # merge all episodes for count, entry in enumerate(entries): fc = h5py.File(parent_dir + entry, "r") # find total number of samples in all demos f_grp.attrs["num_samples"] = f_grp.attrs["num_samples"] + fc["data"]["demo_0"].attrs["num_samples"] fc.copy("data/demo_0", fp["data"], "demo_" + str(count)) # This is needed to run env in robomimic fp["data"].attrs["env_args"] = json.dumps({"env_name": task_name, "type": 2, "env_kwargs": {}}) fp.close() print("merged")
1,934
Python
32.362068
116
0.661324
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/tools/inspect_demonstrations.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Tool to check structure of hdf5 files.""" from __future__ import annotations import argparse import h5py def check_group(f, num: int): """Print the data from different keys in stored dictionary.""" # print name of the group first for subs in f: if isinstance(subs, str): print("\t" * num, subs, ":", type(f[subs])) check_group(f[subs], num + 1) # print attributes of the group print("\t" * num, "attributes", ":") for attr in f.attrs: print("\t" * (num + 1), attr, ":", type(f.attrs[attr]), ":", f.attrs[attr]) if __name__ == "__main__": # parse arguments parser = argparse.ArgumentParser(description="Check structure of hdf5 file.") parser.add_argument("file", type=str, default=None, help="The path to HDF5 file to analyze.") args_cli = parser.parse_args() # open specified file with h5py.File(args_cli.file, "r") as f: # print name of the file first print(f) # print contents of file check_group(f["data"], 1)
1,166
Python
28.923076
97
0.614923
NVIDIA-Omniverse/orbit/source/standalone/workflows/robomimic/tools/split_train_val.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # MIT License # # Copyright (c) 2021 Stanford Vision and Learning Lab # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ Script for splitting a dataset hdf5 file into training and validation trajectories. Args: dataset: path to hdf5 dataset filter_key: if provided, split the subset of trajectories in the file that correspond to this filter key into a training and validation set of trajectories, instead of splitting the full set of trajectories ratio: validation ratio, in (0, 1). Defaults to 0.1, which is 10%. Example usage: python split_train_val.py --dataset /path/to/demo.hdf5 --ratio 0.1 """ from __future__ import annotations import argparse import h5py import numpy as np from robomimic.utils.file_utils import create_hdf5_filter_key def split_train_val_from_hdf5(hdf5_path: str, val_ratio=0.1, filter_key=None): """ Splits data into training set and validation set from HDF5 file. Args: hdf5_path: path to the hdf5 file to load the transitions from val_ratio: ratio of validation demonstrations to all demonstrations filter_key: if provided, split the subset of demonstration keys stored under mask/@filter_key instead of the full set of demonstrations """ # retrieve number of demos f = h5py.File(hdf5_path, "r") if filter_key is not None: print(f"Using filter key: {filter_key}") demos = sorted(elem.decode("utf-8") for elem in np.array(f[f"mask/{filter_key}"])) else: demos = sorted(list(f["data"].keys())) num_demos = len(demos) f.close() # get random split num_demos = len(demos) num_val = int(val_ratio * num_demos) mask = np.zeros(num_demos) mask[:num_val] = 1.0 np.random.shuffle(mask) mask = mask.astype(int) train_inds = (1 - mask).nonzero()[0] valid_inds = mask.nonzero()[0] train_keys = [demos[i] for i in train_inds] valid_keys = [demos[i] for i in valid_inds] print(f"{num_val} validation demonstrations out of {num_demos} total demonstrations.") # pass mask to generate split name_1 = "train" name_2 = "valid" if filter_key is not None: name_1 = f"{filter_key}_{name_1}" name_2 = f"{filter_key}_{name_2}" train_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1) valid_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2) print(f"Total number of train samples: {np.sum(train_lengths)}") print(f"Average number of train samples {np.mean(train_lengths)}") print(f"Total number of valid samples: {np.sum(valid_lengths)}") print(f"Average number of valid samples {np.mean(valid_lengths)}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("dataset", type=str, help="path to hdf5 dataset") parser.add_argument( "--filter_key", type=str, default=None, help=( "If provided, split the subset of trajectories in the file that correspond to this filter key" " into a training and validation set of trajectories, instead of splitting the full set of" " trajectories." ), ) parser.add_argument("--ratio", type=float, default=0.1, help="validation ratio, in (0, 1)") args = parser.parse_args() # seed to make sure results are consistent np.random.seed(0) split_train_val_from_hdf5(args.dataset, val_ratio=args.ratio, filter_key=args.filter_key)
4,685
Python
36.190476
106
0.690288
NVIDIA-Omniverse/orbit/source/standalone/workflows/sb3/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to play a checkpoint if an RL agent from Stable-Baselines3.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from Stable-Baselines3.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") parser.add_argument( "--use_last_checkpoint", action="store_true", help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import numpy as np import os import torch from stable_baselines3 import PPO from stable_baselines3.common.vec_env import VecNormalize import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils.parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper, process_sb3_cfg def main(): """Play with stable-baselines agent.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "sb3_cfg_entry_point") # post-process agent configuration agent_cfg = process_sb3_cfg(agent_cfg) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for stable baselines env = Sb3VecEnvWrapper(env) # normalize environment (if needed) if "normalize_input" in agent_cfg: env = VecNormalize( env, training=True, norm_obs="normalize_input" in agent_cfg and agent_cfg.pop("normalize_input"), norm_reward="normalize_value" in agent_cfg and agent_cfg.pop("normalize_value"), clip_obs="clip_obs" in agent_cfg and agent_cfg.pop("clip_obs"), gamma=agent_cfg["gamma"], clip_reward=np.inf, ) # directory for logging into log_root_path = os.path.join("logs", "sb3", args_cli.task) log_root_path = os.path.abspath(log_root_path) # check checkpoint is valid if args_cli.checkpoint is None: if args_cli.use_last_checkpoint: checkpoint = "model_.*.zip" else: checkpoint = "model.zip" checkpoint_path = get_checkpoint_path(log_root_path, ".*", checkpoint) else: checkpoint_path = args_cli.checkpoint # create agent from stable baselines print(f"Loading checkpoint from: {checkpoint_path}") agent = PPO.load(checkpoint_path, env, print_system_info=True) # reset environment obs = env.reset() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions, _ = agent.predict(obs, deterministic=True) # env stepping obs, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
4,014
Python
33.025423
115
0.680867
NVIDIA-Omniverse/orbit/source/standalone/workflows/sb3/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to train RL agent with Stable Baselines3. Since Stable-Baselines3 does not support buffers living on GPU directly, we recommend using smaller number of environments. Otherwise, there will be significant overhead in GPU->CPU transfer. """ from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with Stable-Baselines3.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import numpy as np import os from datetime import datetime from stable_baselines3 import PPO from stable_baselines3.common.callbacks import CheckpointCallback from stable_baselines3.common.logger import configure from stable_baselines3.common.vec_env import VecNormalize from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.sb3 import Sb3VecEnvWrapper, process_sb3_cfg def main(): """Train with stable-baselines agent.""" # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "sb3_cfg_entry_point") # override configuration with command line arguments if args_cli.seed is not None: agent_cfg["seed"] = args_cli.seed # directory for logging into log_dir = os.path.join("logs", "sb3", args_cli.task, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # post-process agent configuration agent_cfg = process_sb3_cfg(agent_cfg) # read configurations about the agent-training policy_arch = agent_cfg.pop("policy") n_timesteps = agent_cfg.pop("n_timesteps") # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for stable baselines env = Sb3VecEnvWrapper(env) # set the seed env.seed(seed=agent_cfg["seed"]) if "normalize_input" in agent_cfg: env = VecNormalize( env, training=True, norm_obs="normalize_input" in agent_cfg and agent_cfg.pop("normalize_input"), norm_reward="normalize_value" in agent_cfg and agent_cfg.pop("normalize_value"), clip_obs="clip_obs" in agent_cfg and agent_cfg.pop("clip_obs"), gamma=agent_cfg["gamma"], clip_reward=np.inf, ) # create agent from stable baselines agent = PPO(policy_arch, env, verbose=1, **agent_cfg) # configure the logger new_logger = configure(log_dir, ["stdout", "tensorboard"]) agent.set_logger(new_logger) # callbacks for agent checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=log_dir, name_prefix="model", verbose=2) # train the agent agent.learn(total_timesteps=n_timesteps, callback=checkpoint_callback) # save the final model agent.save(os.path.join(log_dir, "model")) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,362
Python
37.307143
117
0.696382
NVIDIA-Omniverse/orbit/source/standalone/workflows/rl_games/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to play a checkpoint if an RL agent from RL-Games.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from RL-Games.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.") parser.add_argument( "--use_last_checkpoint", action="store_true", help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.", ) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import math import os import torch from rl_games.common import env_configurations, vecenv from rl_games.common.player import BasePlayer from rl_games.torch_runner import Runner from omni.isaac.orbit.utils.assets import retrieve_file_path import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper def main(): """Play with RL-Games agent.""" # parse env configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point") # wrap around environment for rl-games rl_device = agent_cfg["params"]["config"]["device"] clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf) clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for rl-games env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions) # register the environment to rl-games registry # note: in agents configuration: environment name must be "rlgpu" vecenv.register( "IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs) ) env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env}) # specify directory for logging experiments log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") # find checkpoint if args_cli.checkpoint is None: # specify directory for logging runs run_dir = agent_cfg["params"]["config"].get("full_experiment_name", ".*") # specify name of checkpoint if args_cli.use_last_checkpoint: checkpoint_file = ".*" else: # this loads the best checkpoint checkpoint_file = f"{agent_cfg['params']['config']['name']}.pth" # get path to previous checkpoint resume_path = get_checkpoint_path(log_root_path, run_dir, checkpoint_file, other_dirs=["nn"]) else: resume_path = retrieve_file_path(args_cli.checkpoint) # load previously trained model agent_cfg["params"]["load_checkpoint"] = True agent_cfg["params"]["load_path"] = resume_path print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}") # set number of actors into agent config agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs # create runner from rl-games runner = Runner() runner.load(agent_cfg) # obtain the agent from the runner agent: BasePlayer = runner.create_player() agent.restore(resume_path) agent.reset() # reset environment obs = env.reset() # required: enables the flag for batched observations _ = agent.get_batch_size(obs, 1) # simulate environment # note: We simplified the logic in rl-games player.py (:func:`BasePlayer.run()`) function in an # attempt to have complete control over environment stepping. However, this removes other # operations such as masking that is used for multi-agent learning by RL-Games. while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # convert obs to agent format obs = agent.obs_to_torch(obs) # agent stepping actions = agent.get_action(obs, is_deterministic=True) # env stepping obs, _, dones, _ = env.step(actions) # perform operations for terminated episodes if len(dones) > 0: # reset rnn state for terminated episodes if agent.is_rnn and agent.states is not None: for s in agent.states: s[:, dones, :] = 0.0 # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,785
Python
37.573333
117
0.676059
NVIDIA-Omniverse/orbit/source/standalone/workflows/rl_games/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to train RL agent with RL-Games.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RL-Games.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import math import os from datetime import datetime from rl_games.common import env_configurations, vecenv from rl_games.common.algo_observer import IsaacAlgoObserver from rl_games.torch_runner import Runner from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml import omni.isaac.orbit_tasks # noqa: F401 from omni.isaac.orbit_tasks.utils import load_cfg_from_registry, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper def main(): """Train with RL-Games agent.""" # parse seed from command line args_cli_seed = args_cli.seed # parse configuration env_cfg = parse_env_cfg( args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric ) agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point") # override from command line if args_cli_seed is not None: agent_cfg["params"]["seed"] = args_cli_seed # specify directory for logging experiments log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs log_dir = agent_cfg["params"]["config"].get("full_experiment_name", datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) # set directory into agent config # logging directory path: <train_dir>/<full_experiment_name> agent_cfg["params"]["config"]["train_dir"] = log_root_path agent_cfg["params"]["config"]["full_experiment_name"] = log_dir # dump the configuration into log-directory dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_root_path, log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_root_path, log_dir, "params", "agent.pkl"), agent_cfg) # read configurations about the agent-training rl_device = agent_cfg["params"]["config"]["device"] clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf) clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for rl-games env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions) # register the environment to rl-games registry # note: in agents configuration: environment name must be "rlgpu" vecenv.register( "IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs) ) env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env}) # set number of actors into agent config agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs # create runner from rl-games runner = Runner(IsaacAlgoObserver()) runner.load(agent_cfg) # set seed of the env env.seed(agent_cfg["params"]["seed"]) # reset the agent and env runner.reset() # train the agent runner.run({"train": True, "play": False, "sigma": None}) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
5,558
Python
39.576642
117
0.692155
NVIDIA-Omniverse/orbit/docs/conf.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit")) sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit/omni/isaac/orbit")) sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit_tasks")) sys.path.insert(0, os.path.abspath("../source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks")) # -- Project information ----------------------------------------------------- project = "orbit" copyright = "2022-2024, The ORBIT Project Developers." author = "The ORBIT Project Developers." version = "0.2.0" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "autodocsumm", "myst_parser", "sphinx.ext.napoleon", "sphinxemoji.sphinxemoji", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.githubpages", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinxcontrib.bibtex", "sphinx_copybutton", "sphinx_design", ] # mathjax hacks mathjax3_config = { "tex": { "inlineMath": [["\\(", "\\)"]], "displayMath": [["\\[", "\\]"]], }, } # panels hacks panels_add_bootstrap_css = False panels_add_fontawesome_css = True # supported file extensions for source files source_suffix = { ".rst": "restructuredtext", ".md": "markdown", } # make sure we don't have any unknown references # TODO: Enable this by default once we have fixed all the warnings # nitpicky = True # put type hints inside the signature instead of the description (easier to maintain) autodoc_typehints = "signature" # autodoc_typehints_format = "fully-qualified" # document class *and* __init__ methods autoclass_content = "class" # # separate class docstring from __init__ docstring autodoc_class_signature = "separated" # sort members by source order autodoc_member_order = "bysource" # inherit docstrings from base classes autodoc_inherit_docstrings = True # BibTeX configuration bibtex_bibfiles = ["source/_static/refs.bib"] # generate autosummary even if no references autosummary_generate = True autosummary_generate_overwrite = False # default autodoc settings autodoc_default_options = { "autosummary": True, } # generate links to the documentation of objects in external projects intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "numpy": ("https://numpy.org/doc/stable/", None), "torch": ("https://pytorch.org/docs/stable/", None), "isaac": ("https://docs.omniverse.nvidia.com/py/isaacsim", None), "gymnasium": ("https://gymnasium.farama.org/", None), "warp": ("https://nvidia.github.io/warp/", None), } # Add any paths that contain templates here, relative to this directory. templates_path = [] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md", "licenses/*"] # Mock out modules that are not available on RTD autodoc_mock_imports = [ "torch", "numpy", "matplotlib", "scipy", "carb", "warp", "pxr", "omni.kit", "omni.usd", "omni.client", "omni.physx", "omni.physics", "pxr.PhysxSchema", "pxr.PhysicsSchemaTools", "omni.replicator", "omni.isaac.core", "omni.isaac.kit", "omni.isaac.cloner", "omni.isaac.urdf", "omni.isaac.version", "omni.isaac.motion_generation", "omni.isaac.ui", "omni.syntheticdata", "omni.timeline", "omni.ui", "gym", "skrl", "stable_baselines3", "rsl_rl", "rl_games", "ray", "h5py", "hid", "prettytable", "tqdm", "tensordict", "trimesh", "toml", ] # List of zero or more Sphinx-specific warning categories to be squelched (i.e., # suppressed, ignored). suppress_warnings = [ # FIXME: *THIS IS TERRIBLE.* Generally speaking, we do want Sphinx to inform # us about cross-referencing failures. Remove this hack entirely after Sphinx # resolves this open issue: # https://github.com/sphinx-doc/sphinx/issues/4961 # Squelch mostly ignorable warnings resembling: # WARNING: more than one target found for cross-reference 'TypeHint': # beartype.door._doorcls.TypeHint, beartype.door.TypeHint # # Sphinx currently emits *MANY* of these warnings against our # documentation. All of these warnings appear to be ignorable. Although we # could explicitly squelch *SOME* of these warnings by canonicalizing # relative to absolute references in docstrings, Sphinx emits still others # of these warnings when parsing PEP-compliant type hints via static # analysis. Since those hints are actual hints that *CANNOT* by definition # by canonicalized, our only recourse is to squelch warnings altogether. "ref.python", ] # -- Internationalization ---------------------------------------------------- # specifying the natural language populates some key tags language = "en" # -- Options for HTML output ------------------------------------------------- import sphinx_book_theme html_title = "orbit documentation" html_theme_path = [sphinx_book_theme.get_html_theme_path()] html_theme = "sphinx_book_theme" html_favicon = "source/_static/favicon.ico" html_show_copyright = True html_show_sphinx = False html_last_updated_fmt = "" # to reveal the build date in the pages meta # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["source/_static/css"] html_css_files = ["custom.css"] html_theme_options = { "collapse_navigation": True, "repository_url": "https://github.com/NVIDIA-Omniverse/Orbit", "announcement": "We have now released v0.2.0! Please use the latest version for the best experience.", "use_repository_button": True, "use_issues_button": True, "use_edit_page_button": True, "show_toc_level": 1, "use_sidenotes": True, "logo": { "text": "orbit documentation", "image_light": "source/_static/NVIDIA-logo-white.png", "image_dark": "source/_static/NVIDIA-logo-black.png", }, "icon_links": [ { "name": "GitHub", "url": "https://github.com/NVIDIA-Omniverse/Orbit", "icon": "fa-brands fa-square-github", "type": "fontawesome", }, { "name": "Isaac Sim", "url": "https://developer.nvidia.com/isaac-sim", "icon": "https://img.shields.io/badge/IsaacSim-2023.1.1-silver.svg", "type": "url", }, { "name": "Stars", "url": "https://img.shields.io/github/stars/NVIDIA-Omniverse/Orbit?color=fedcba", "icon": "https://img.shields.io/github/stars/NVIDIA-Omniverse/Orbit?color=fedcba", "type": "url", }, ], "icon_links_label": "Quick Links", } html_sidebars = {"**": ["navbar-logo.html", "icon-links.html", "search-field.html", "sbt-sidebar-nav.html"]} # -- Advanced configuration ------------------------------------------------- def skip_member(app, what, name, obj, skip, options): # List the names of the functions you want to skip here exclusions = ["from_dict", "to_dict", "replace", "copy", "__post_init__"] if name in exclusions: return True return None def setup(app): app.connect("autodoc-skip-member", skip_member)
8,492
Python
32.175781
108
0.644842
NVIDIA-Omniverse/PhysX/physx/tools/physxmetadatagenerator/generateMetaData.py
## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions ## are met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## * Neither the name of NVIDIA CORPORATION nor the names of its ## contributors may be used to endorse or promote products derived ## from this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY ## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY ## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## ## Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. import argparse import os import stat import sys import re import platform import shutil from lib import utils from lib import compare # test mode: create copy of reference files # update mode: try to open file in p4 if necessary def setup_targetdir(metaDataDir, isTestMode): if isTestMode: targetDir = metaDataDir + "_test" if os.path.isdir(targetDir): print("deleting", targetDir) shutil.rmtree(targetDir) def ignore_non_autogen(dir, files): return [f for f in files if not (os.path.isdir(os.path.join(dir, f)) or re.search(r"AutoGenerated", f))] shutil.copytree(metaDataDir, targetDir, ignore=ignore_non_autogen) #set write to new files: for root, dirs, files in os.walk(targetDir): for file in files: os.chmod(os.path.join(root, file) , stat.S_IWRITE|stat.S_IREAD) else: targetDir = metaDataDir if not utils.check_files_writable(utils.list_autogen_files(targetDir)): utils.try_checkout_files(utils.list_autogen_files(targetDir)) if not utils.check_files_writable(utils.list_autogen_files(targetDir)): print("auto generated meta data files not writable:", targetDir) print("aborting") sys.exit(1) utils.clear_files(utils.list_autogen_files(targetDir)) return targetDir # test mode: run perl script to compare reference and generated files def test_targetdir(targetDir, metaDataDir, isTestMode): if isTestMode: print("compare generated meta data files with reference meta data files:") result = compare.compareMetaDataDirectories(targetDir, metaDataDir) if not result: print("failed!") sys.exit(1) else: print("passed.") def get_osx_platform_path(): cmd = "xcodebuild -showsdks" (stdout, stderr) = utils.run_cmd(cmd) if stderr != "": print(stderr) sys.exit(1) match = re.search(r"(-sdk macosx\d+.\d+)", stdout, flags=re.MULTILINE) if not match: print("coundn't parse output of:\n", cmd, "\naborting!") sys.exit(1) sdkparam = match.group(0) cmd = "xcodebuild -version " + sdkparam + " Path" (sdkPath, stderr) = utils.run_cmd(cmd) if stderr != "": print(stderr) sys.exit(1) print("using sdk path:", sdkPath.rstrip()) return sdkPath.rstrip() def includeString(path): return ' -I"' + path + '"' ########################################################################################################### # main ########################################################################################################### parser = argparse.ArgumentParser(description='Generates meta data source files.') parser.add_argument('-test', help='enables testing mode, internal only', action='store_true') args = parser.parse_args() scriptDir = os.path.dirname(os.path.realpath(__file__)) try: os.makedirs("temp") except: None # find SDK_ROOT and PX_SHARED sdkRoot = utils.find_root_path(scriptDir, "source") clangRoot = os.path.normpath(os.environ['PM_clangMetadata_PATH']) print("testmode:", args.test) print("root sdk:", sdkRoot) print("root clang:", clangRoot) boilerPlateFile = os.path.join(sdkRoot, os.path.normpath("tools/physxmetadatagenerator/PxBoilerPlate.h")) includes = '' includes += includeString(sdkRoot + '/include') includes += includeString(sdkRoot + '/tools/physxmetadatagenerator') print("platform:", platform.system()) commonFlags = '-DNDEBUG -DPX_GENERATE_META_DATA -DPX_ENABLE_FEATURES_UNDER_CONSTRUCTION=0 -x c++-header -w -Wno-c++11-narrowing -fms-extensions ' if platform.system() == "Windows": debugFile = open("temp/clangCommandLine_windows.txt", "a") # read INCLUDE variable, set by calling batch script sysIncludes = os.environ['INCLUDE'] sysIncludes = sysIncludes.rstrip(';') sysIncludeList = sysIncludes.split(';') sysIncludeFlags = ' -isystem"' + '" -isystem"'.join(sysIncludeList) + '"' # for some reason -cc1 needs to go first in commonFlags commonFlags = '-cc1 ' + commonFlags platformFlags = '-DPX_VC=14 -D_WIN32 -std=c++14' + sysIncludeFlags clangExe = os.path.join(clangRoot, os.path.normpath('win32/bin/clang.exe')) elif platform.system() == "Linux": debugFile = open("temp/clangCommandLine_linux.txt", "a") platformFlags = '-std=c++0x' clangExe = os.path.join(clangRoot, os.path.normpath('linux32/bin/clang')) elif platform.system() == "Darwin": debugFile = open("temp/clangCommandLine_osx.txt", "a") platformFlags = '-std=c++0x -isysroot' + get_osx_platform_path() clangExe = os.path.join(clangRoot, os.path.normpath('osx/bin/clang')) else: print("unsupported platform, aborting!") sys.exit(1) commonFlags += ' -boilerplate-file ' + boilerPlateFile #some checks if not os.path.isfile(clangExe): print("didn't find,", clangExe, ", aborting!") sys.exit(1) clangExe = '"' + clangExe + '"' # required for execution of clang.exe os.environ["PWD"] = os.path.join(sdkRoot, os.path.normpath("tools/physxmetadatagenerator")) ############################### # PxPhysicsWithExtensions # ############################### print("PxPhysicsWithExtensions:") srcPath = "PxPhysicsWithExtensionsAPI.h" metaDataDir = os.path.join(sdkRoot, os.path.normpath("source/physxmetadata")) targetDir = setup_targetdir(metaDataDir, args.test) cmd = " ".join(["", clangExe, commonFlags, "", platformFlags, includes, srcPath, "-o", '"'+targetDir+'"']) print(cmd, file=debugFile) (stdout, stderr) = utils.run_cmd(cmd) if (stderr != "" or stdout != ""): print(stderr, "\n", stdout) print("wrote meta data files in", targetDir) test_targetdir(targetDir, metaDataDir, args.test) ############################### # PxVehicleExtension # ############################### print("PxVehicleExtension:") srcPath = "PxVehicleExtensionAPI.h" metaDataDir = os.path.join(sdkRoot, os.path.normpath("source/physxvehicle/src/physxmetadata")) includes += includeString(sdkRoot + '/include/vehicle') #TODO, get rid of source include includes += includeString(sdkRoot + '/source/physxvehicle/src') targetDir = setup_targetdir(metaDataDir, args.test) cmd = " ".join(["", clangExe, commonFlags, "", platformFlags, includes, srcPath, "-o", '"'+targetDir+'"']) print(cmd, file=debugFile) (stdout, stderr) = utils.run_cmd(cmd) if (stderr != "" or stdout != ""): print(stderr, "\n", stdout) print("wrote meta data files in", targetDir) test_targetdir(targetDir, metaDataDir, args.test)
7,833
Python
33.511013
145
0.691944
NVIDIA-Omniverse/PhysX/physx/tools/physxmetadatagenerator/lib/utils.py
## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions ## are met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## * Neither the name of NVIDIA CORPORATION nor the names of its ## contributors may be used to endorse or promote products derived ## from this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY ## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY ## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## ## Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. # general utility module import os import sys import re import subprocess def list_autogen_files(dirPath): autogenFiles = [] for (root, subdirs, files) in os.walk(dirPath): files = [f for f in files if re.search(r"AutoGenerated", f)] autogenFiles.extend([os.path.join(root, f) for f in files]) return autogenFiles #checkout files with p4 if available def try_checkout_files(files): print("checking p4 connection parameter...") # checking p4 cmd = "p4" (stdout, stderr) = run_cmd(cmd) if stderr == "": print("p4 available.") else: print("p4 unavailable.") return cmd = "p4 edit " + " " + " ".join(files) (stdout, stderr) = run_cmd(cmd) print(stderr) print(stdout) # check files writability def check_files_writable(files): for file in files: if not os.access(file, os.W_OK): return False return True # find a root directory containing a known directory (as a hint) def find_root_path(startDir, containedDir): currentDir = startDir # search directory tree mergedDir = os.path.join(currentDir, containedDir) while not os.path.isdir(mergedDir): (currentDir, dir) = os.path.split(currentDir) if not dir: return None mergedDir = os.path.join(currentDir, containedDir) return currentDir def run_cmd(cmd, stdin = ""): process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdoutRaw, stderrRaw) = process.communicate(stdin.encode('utf-8')) stdout = stdoutRaw.decode(encoding='utf-8') stderr = stderrRaw.decode(encoding='utf-8') return (stdout, stderr) # clears content of files def clear_files(files): for file in files: open(file, 'w').close() ############################################################################## # internal functions ##############################################################################
3,425
Python
32.588235
115
0.703066
NVIDIA-Omniverse/PhysX/physx/tools/physxmetadatagenerator/lib/compare.py
## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions ## are met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## * Neither the name of NVIDIA CORPORATION nor the names of its ## contributors may be used to endorse or promote products derived ## from this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY ## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY ## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## ## Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. # general utility module import os import sys import re from . import utils def compareMetaDataDirectories(candidateDir, referenceDir): print("reference dir:", referenceDir) print("candidate dir:", candidateDir) if not _checkFileExistence(candidateDir, referenceDir): return False referenceFiles = utils.list_autogen_files(referenceDir) #get corresponding candidate files without relying on os.walk order def mapRefToCand(refFile): return os.path.join(candidateDir, os.path.relpath(refFile, referenceDir)) candidateFiles = [mapRefToCand(f) for f in referenceFiles] for (fileCand, fileRef) in zip(candidateFiles, referenceFiles): timeCand = os.path.getmtime(fileCand) timeRef = os.path.getmtime(fileRef) if timeCand <= timeRef: print("last modified time of candidate is not later than last modified time of reference:") print("candidate:", fileCand, "\n", "reference:", fileRef) print("ref:", timeRef) print("cand:", timeCand) return False #_read_file_content will remove line endings(windows/unix), but not ignore empty lines candLines = _read_file_content(fileCand) refLines = _read_file_content(fileRef) if not (candLines and refLines): return False if len(candLines) != len(refLines): print("files got different number of lines:") print("candidate:", fileCand, "\n", "reference:", fileRef) print("ref:", len(refLines)) print("cand:", len(candLines)) return False for (i, (lineCand, lineRef)) in enumerate(zip(candLines, refLines)): if (lineCand != lineRef): print("candidate line is not equal to refence line:") print("candidate:", fileCand, "\n", "reference:", fileRef) print("@line number:", i) print("ref:", lineRef) print("cand:", lineCand) return False return True ############################################################################## # internal functions ############################################################################## #will remove line endings(windows/unix), but not ignore empty lines def _read_file_content(filePath): lines = [] try: with open(filePath, "r") as file: for line in file: lines.append(line.rstrip()) except: print("issue with reading file:", filePath) return lines def _checkFileExistence(candidateDir, referenceDir): candidateSet = set([os.path.relpath(f, candidateDir) for f in utils.list_autogen_files(candidateDir)]) referenceSet = set([os.path.relpath(f, referenceDir) for f in utils.list_autogen_files(referenceDir)]) missingSet = referenceSet - candidateSet if missingSet: print("the following files are missing from the candidates:\n", "\n".join(missingSet)) return False excessSet = candidateSet - referenceSet if excessSet: print("too many candidate files:\n", "\n".join(excessSet)) return False return True
4,397
Python
36.271186
103
0.710257
NVIDIA-Omniverse/PhysX/physx/buildtools/cmake_generate_projects.py
import sys import os import glob import os.path import shutil import subprocess import xml.etree.ElementTree def packmanExt(): if sys.platform == 'win32': return 'cmd' return 'sh' def cmakeExt(): if sys.platform == 'win32': return '.exe' return '' def filterPreset(presetName): winPresetFilter = ['win','switch','crosscompile'] if sys.platform == 'win32': if any(presetName.find(elem) != -1 for elem in winPresetFilter): return True else: if all(presetName.find(elem) == -1 for elem in winPresetFilter): return True return False def noPresetProvided(): global input print('Preset parameter required, available presets:') presetfiles = [] for file in glob.glob("buildtools/presets/*.xml"): presetfiles.append(file) if len(presetfiles) == 0: for file in glob.glob("buildtools/presets/public/*.xml"): presetfiles.append(file) counter = 0 presetList = [] for preset in presetfiles: if filterPreset(preset): presetXml = xml.etree.ElementTree.parse(preset).getroot() if(preset.find('user') == -1): print('(' + str(counter) + ') ' + presetXml.get('name') + ' <--- ' + presetXml.get('comment')) presetList.append(presetXml.get('name')) else: print('(' + str(counter) + ') ' + presetXml.get('name') + '.user <--- ' + presetXml.get('comment')) presetList.append(presetXml.get('name') + '.user') counter = counter + 1 # Fix Python 2.x. try: input = raw_input except NameError: pass mode = int(eval(input('Enter preset number: '))) return presetList[mode] class CMakePreset: presetName = '' targetPlatform = '' compiler = '' generator = '' cmakeSwitches = [] cmakeParams = [] def __init__(self, presetName): xmlPath = "buildtools/presets/"+presetName+'.xml' if os.path.isfile(xmlPath): print('Using preset xml: '+xmlPath) else: xmlPath = "buildtools/presets/public/"+presetName+'.xml' if os.path.isfile(xmlPath): print('Using preset xml: '+xmlPath) else: print('Preset xml file: '+xmlPath+' not found') exit() # get the xml presetNode = xml.etree.ElementTree.parse(xmlPath).getroot() self.presetName = presetNode.attrib['name'] for platform in presetNode.findall('platform'): self.targetPlatform = platform.attrib['targetPlatform'] self.compiler = platform.attrib['compiler'] self.generator = platform.get('generator') print('Target platform: ' + self.targetPlatform + ' using compiler: ' + self.compiler) if self.generator is not None: print(' using generator: ' + self.generator) for cmakeSwitch in presetNode.find('CMakeSwitches'): cmSwitch = '-D' + \ cmakeSwitch.attrib['name'] + '=' + \ cmakeSwitch.attrib['value'].upper() self.cmakeSwitches.append(cmSwitch) for cmakeParam in presetNode.find('CMakeParams'): if cmakeParam.attrib['name'] == 'CMAKE_INSTALL_PREFIX' or cmakeParam.attrib['name'] == 'PX_OUTPUT_LIB_DIR' or cmakeParam.attrib['name'] == 'PX_OUTPUT_EXE_DIR' or cmakeParam.attrib['name'] == 'PX_OUTPUT_DLL_DIR': cmParam = '-D' + cmakeParam.attrib['name'] + '=\"' + \ os.environ['PHYSX_ROOT_DIR'] + '/' + \ cmakeParam.attrib['value'] + '\"' else: cmParam = '-D' + \ cmakeParam.attrib['name'] + '=' + \ cmakeParam.attrib['value'] self.cmakeParams.append(cmParam) pass def isMultiConfigPlatform(self): if self.targetPlatform == 'linux': return False elif self.targetPlatform == 'linuxAarch64': return False return True def getCMakeSwitches(self): outString = '' # We need gpuProjectsFound flag to avoid issues when we have both # PX_GENERATE_GPU_PROJECTS and PX_GENERATE_GPU_PROJECTS_ONLY switches gpuProjectsFound = False # initialize flag for cmakeSwitch in self.cmakeSwitches: outString = outString + ' ' + cmakeSwitch if not gpuProjectsFound and cmakeSwitch.find('PX_GENERATE_GPU_PROJECTS') != -1: gpuProjectsFound = True # set flag to True when keyword found if os.environ.get('PM_CUDA_PATH') is not None: outString = outString + ' -DCUDAToolkit_ROOT_DIR=' + \ os.environ['PM_CUDA_PATH'] if self.compiler in ['vc15', 'vc16', 'vc17'] and self.generator != 'ninja': outString = outString + ' -T cuda=' + os.environ['PM_CUDA_PATH'] # TODO: Need to do the same for gcc (aarch64) when we package it with Packman elif self.compiler == 'clang': if os.environ.get('PM_clang_PATH') is not None: outString = outString + ' -DCMAKE_CUDA_HOST_COMPILER=' + \ os.environ['PM_clang_PATH'] + '/bin/clang++' return outString def getCMakeParams(self): outString = '' for cmakeParam in self.cmakeParams: outString = outString + ' ' + cmakeParam # + ' --trace' return outString def getPlatformCMakeParams(self): cmake_modules_root = os.environ['PHYSX_ROOT_DIR'] + '/source/compiler/cmake/modules' outString = ' ' vs_versions = { 'vc15': '\"Visual Studio 15 2017\"', 'vc16': '\"Visual Studio 16 2019\"', 'vc17': '\"Visual Studio 17 2022\"' } # Visual studio if self.compiler in vs_versions: generator = '-G \"Ninja Multi-Config\"' if self.generator == 'ninja' else '-G ' + vs_versions[self.compiler] outString += generator # mac elif self.compiler == 'xcode': outString = outString + '-G Xcode' # Linux elif self.targetPlatform in ['linux', 'linuxAarch64']: if self.generator is not None and self.generator == 'ninja': outString = outString + '-G \"Ninja\"' outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ['PM_ninja_PATH'] + '/ninja' else: outString = outString + '-G \"Unix Makefiles\"' if self.targetPlatform == 'win64': if self.generator != 'ninja': outString = outString + ' -Ax64' outString = outString + ' -DTARGET_BUILD_PLATFORM=windows' outString = outString + ' -DPX_OUTPUT_ARCH=x86' return outString elif self.targetPlatform == 'switch64': outString = outString + ' -DTARGET_BUILD_PLATFORM=switch' outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \ cmake_modules_root + '/switch/NX64Toolchain.txt' outString = outString + ' -DCMAKE_GENERATOR_PLATFORM=NX64' return outString elif self.targetPlatform == 'linux': outString = outString + ' -DTARGET_BUILD_PLATFORM=linux' outString = outString + ' -DPX_OUTPUT_ARCH=x86' if self.compiler == 'clang-crosscompile': outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \ cmake_modules_root + '/linux/LinuxCrossToolchain.x86_64-unknown-linux-gnu.cmake' outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ.get('PM_MinGW_PATH') + '/bin/mingw32-make.exe' elif self.compiler == 'clang': if os.environ.get('PM_clang_PATH') is not None: outString = outString + ' -DCMAKE_C_COMPILER=' + \ os.environ['PM_clang_PATH'] + '/bin/clang' outString = outString + ' -DCMAKE_CXX_COMPILER=' + \ os.environ['PM_clang_PATH'] + '/bin/clang++' else: outString = outString + ' -DCMAKE_C_COMPILER=clang' outString = outString + ' -DCMAKE_CXX_COMPILER=clang++' return outString elif self.targetPlatform == 'linuxAarch64': outString = outString + ' -DTARGET_BUILD_PLATFORM=linux' outString = outString + ' -DPX_OUTPUT_ARCH=arm' if self.compiler == 'clang-crosscompile': outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \ cmake_modules_root + '/linux/LinuxCrossToolchain.aarch64-unknown-linux-gnueabihf.cmake' outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ.get('PM_MinGW_PATH') + '/bin/mingw32-make.exe' elif self.compiler == 'gcc': # TODO: To change so it uses Packman's compiler. Then add it as # host compiler for CUDA above. outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=\"' + \ cmake_modules_root + '/linux/LinuxAarch64.cmake\"' return outString elif self.targetPlatform == 'mac64': outString = outString + ' -DTARGET_BUILD_PLATFORM=mac' outString = outString + ' -DPX_OUTPUT_ARCH=x86' return outString return '' def getCommonParams(): outString = '--no-warn-unused-cli' outString = outString + ' -DCMAKE_PREFIX_PATH=\"' + os.environ['PM_PATHS'] + '\"' outString = outString + ' -DPHYSX_ROOT_DIR=\"' + \ os.environ['PHYSX_ROOT_DIR'] + '\"' outString = outString + ' -DPX_OUTPUT_LIB_DIR=\"' + \ os.environ['PHYSX_ROOT_DIR'] + '\"' outString = outString + ' -DPX_OUTPUT_BIN_DIR=\"' + \ os.environ['PHYSX_ROOT_DIR'] + '\"' if os.environ.get('GENERATE_SOURCE_DISTRO') == '1': outString = outString + ' -DPX_GENERATE_SOURCE_DISTRO=1' return outString def cleanupCompilerDir(compilerDirName): if os.path.exists(compilerDirName): if sys.platform == 'win32': os.system('rmdir /S /Q ' + compilerDirName) else: shutil.rmtree(compilerDirName, True) if os.path.exists(compilerDirName) == False: os.makedirs(compilerDirName) def presetProvided(pName): parsedPreset = CMakePreset(pName) print('PM_PATHS: ' + os.environ['PM_PATHS']) if os.environ.get('PM_cmake_PATH') is not None: cmakeExec = os.environ['PM_cmake_PATH'] + '/bin/cmake' + cmakeExt() else: cmakeExec = 'cmake' + cmakeExt() print('Cmake: ' + cmakeExec) # gather cmake parameters cmakeParams = parsedPreset.getPlatformCMakeParams() cmakeParams = cmakeParams + ' ' + getCommonParams() cmakeParams = cmakeParams + ' ' + parsedPreset.getCMakeSwitches() cmakeParams = cmakeParams + ' ' + parsedPreset.getCMakeParams() # print(cmakeParams) if os.path.isfile(os.environ['PHYSX_ROOT_DIR'] + '/compiler/internal/CMakeLists.txt'): cmakeMasterDir = 'internal' else: cmakeMasterDir = 'public' if parsedPreset.isMultiConfigPlatform(): # cleanup and create output directory outputDir = os.path.join('compiler', parsedPreset.presetName) cleanupCompilerDir(outputDir) # run the cmake script #print('Cmake params:' + cmakeParams) os.chdir(os.path.join(os.environ['PHYSX_ROOT_DIR'], outputDir)) os.system(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams) os.chdir(os.environ['PHYSX_ROOT_DIR']) else: configs = ['debug', 'checked', 'profile', 'release'] for config in configs: # cleanup and create output directory outputDir = os.path.join('compiler', parsedPreset.presetName + '-' + config) cleanupCompilerDir(outputDir) # run the cmake script #print('Cmake params:' + cmakeParams) os.chdir(os.path.join(os.environ['PHYSX_ROOT_DIR'], outputDir)) # print(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams + ' -DCMAKE_BUILD_TYPE=' + config) os.system(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams + ' -DCMAKE_BUILD_TYPE=' + config) os.chdir(os.environ['PHYSX_ROOT_DIR']) pass def main(): if (sys.version_info[0] < 3) or (sys.version_info[0] == 3 and sys.version_info[1] < 5): print("You are using Python {}. You must use Python 3.5 and up. Please read README.md for requirements.").format(sys.version) exit() physx_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) os.environ['PHYSX_ROOT_DIR'] = physx_root_dir.replace("\\", "/") if len(sys.argv) != 2: presetName = noPresetProvided() if sys.platform == 'win32': print('Running generate_projects.bat ' + presetName) cmd = 'generate_projects.bat {}'.format(presetName) result = subprocess.run(cmd, cwd=os.environ['PHYSX_ROOT_DIR'], check=True, universal_newlines=True) # TODO: catch exception and add capture errors else: print('Running generate_projects.sh ' + presetName) # TODO: once we have Python 3.7.2 for linux, add the text=True instead of universal_newlines cmd = './generate_projects.sh {}'.format(presetName) result = subprocess.run(['bash', './generate_projects.sh', presetName], cwd=os.environ['PHYSX_ROOT_DIR'], check=True, universal_newlines=True) # TODO: catch exception and add capture errors else: presetName = sys.argv[1] if filterPreset(presetName): presetProvided(presetName) else: print('Preset not supported on this build platform.') main()
14,130
Python
42.885093
223
0.573107
NVIDIA-Omniverse/OpenUSD-Code-Samples/build_docs.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 import argparse import logging import os from pathlib import Path import shutil from rstcloth import RstCloth import sphinx.cmd.build import toml REPO_ROOT = Path(__file__).parent SOURCE_DIR = REPO_ROOT / "source" SPHINX_DIR = REPO_ROOT / "sphinx" SPHINX_CODE_SAMPLES_DIR = SPHINX_DIR / "usd" # 0 = normal toctree, 1 = :doc: tags TOCTREE_STYLE = 0 REPLACE_USDA_EXT = True STRIP_COPYRIGHTS = True IMAGE_TYPES = {".jpg" , ".gif"} logger = logging.getLogger(__name__) def main(): # flush build dir if os.path.exists(SPHINX_CODE_SAMPLES_DIR): shutil.rmtree(SPHINX_CODE_SAMPLES_DIR) SPHINX_CODE_SAMPLES_DIR.mkdir(exist_ok=False) samples = {} # each config.toml should be a sample for config_file in SOURCE_DIR.rglob("config.toml"): category_name = config_file.parent.parent.name sample_name = config_file.parent.name if category_name not in samples: samples[category_name] = [] logger.info(f"processing: {sample_name}") sample_source_dir = config_file.parent sample_output_dir = SPHINX_CODE_SAMPLES_DIR / sample_source_dir.parent.relative_to(SOURCE_DIR) / f"{sample_name}" # make sure category dir exists category_output_dir = SPHINX_CODE_SAMPLES_DIR / sample_source_dir.parent.relative_to(SOURCE_DIR) if not os.path.exists(category_output_dir): category_output_dir.mkdir(exist_ok=False) sample_rst_out = category_output_dir / f"{sample_name}.rst" with open(config_file) as f: content = f.read() config = toml.loads(content) title = config["core"]["title"] samples[category_name].append([sample_name, title]) sample_output_dir.mkdir(exist_ok=True) with open(sample_rst_out, "w") as f: doc = RstCloth(f) if TOCTREE_STYLE == 1: doc._add(":orphan:") doc.newline() doc.directive("meta", fields=[ ('description', config["metadata"]["description"]), ('keywords', ", ".join(config["metadata"]["keywords"])) ]) doc.newline() doc.title(config["core"]["title"], overline=False) doc.newline() md_file_path = sample_source_dir / "header.md" new_md_name = sample_name + "_header.md" out_md = category_output_dir / new_md_name prepend_include_path(md_file_path, out_md, sample_name) fields = [("parser" , "myst_parser.sphinx_")] doc.directive( "include", new_md_name, fields) doc.newline() doc.newline() doc.directive("tab-set") doc.newline() code_flavors = {"USD Python" : "py_usd.md", "Python omni.usd" : "py_omni_usd.md", "Python Kit Commands" : "py_kit_cmds.md", "USD C++" : "cpp_usd.md", "C++ omni.usd" : "cpp_omni_usd.md", "C++ Kit Commands" : "cpp_kit_cmds.md", "usdview": "py_usdview.md", "USDA" : "usda.md", } for tab_name in code_flavors: md_file_name = code_flavors[tab_name] md_file_path = sample_source_dir / code_flavors[tab_name] if md_file_path.exists(): doc.directive("tab-item", tab_name, None, None, 3) doc.newline() # make sure all md flavor names are unique new_md_name = sample_name + "_" + md_file_name category_output_dir out_md = category_output_dir / new_md_name prepend_include_path(md_file_path, out_md, sample_name) fields = [("parser" , "myst_parser.sphinx_")] doc.directive( "include", new_md_name, fields, None, 6) doc.newline() # copy all samples ignore=shutil.ignore_patterns('*.md', 'config.toml') if REPLACE_USDA_EXT: ignore=shutil.ignore_patterns('*.md', 'config.toml', '*.usda') shutil.copytree(sample_source_dir, sample_output_dir, ignore=ignore, dirs_exist_ok=True ) # copy any usda's to .py if REPLACE_USDA_EXT: for filename in os.listdir(sample_source_dir): base_file, ext = os.path.splitext(filename) if ext == ".usda": orig = str(sample_source_dir) + "/" + filename newname = str(sample_output_dir) + "/" + str(base_file) + ".py" shutil.copy(orig, newname) # strip out copyright comments in output files if STRIP_COPYRIGHTS: for filename in os.listdir(sample_output_dir): full_path = os.path.join(sample_output_dir, filename) strip_copyrights(full_path) doc.newline() generate_sphinx_index(samples) sphinx.cmd.build.main([str(SPHINX_DIR), str(SPHINX_DIR / "_build"), "-b", "html"]) def strip_copyrights(filename): base_file, ext = os.path.splitext(filename) if ext in IMAGE_TYPES: print(f"strip_copyrights, skip image :: {filename}") return with open(filename) as sample_file: sample_lines = sample_file.readlines() # strip copyrights # .py while sample_lines[0].startswith("# SPDX-"): sample_lines.pop(0) # .cpp while sample_lines[0].startswith("// SPDX-"): sample_lines.pop(0) # get rid of empty spacer line if len(sample_lines[0].strip()) < 1: sample_lines.pop(0) with open(filename, "w") as sample_file: for line in sample_lines: sample_file.write(line) def prepend_include_path(in_file_path: str, out_file_path: str, dir_path: str): with open(in_file_path) as mdf: md_data = mdf.read() md_lines = md_data.split("\n") lc = 0 for line in md_lines: inc_str ="``` {literalinclude}" sp = line.split(inc_str) if len(sp) > 1: filename = sp[1].strip() if REPLACE_USDA_EXT: sfn = filename.split(".") if len(sfn) > 1 and sfn[1] == "usda": filename = sfn[0] + ".py" newl = inc_str + " " + dir_path + "/" + filename md_lines[lc] = newl lc += 1 with open(out_file_path,"w") as nmdf: for line in md_lines: nmdf.writelines(line + "\n") def generate_sphinx_index(samples): cat_names_path = SOURCE_DIR / "category-display-names.toml" cat_names = toml.load(cat_names_path)["name_mappings"] print(f"CAT_NAMES: {cat_names}") ref_links = {"variant-sets" : "variant_sets_ref"} index_rst = SPHINX_DIR / "usd.rst" with open(index_rst, "w") as f: doc = RstCloth(f) doc.directive("include", "usd_header.rst") doc.newline() #doc.title("OpenUSD Code Samples") for category, cat_samples in samples.items(): if category in ref_links: doc.ref_target(ref_links[category]) doc.newline() human_readable = readable_from_category_dir_name(category) if category in cat_names.keys(): human_readable = cat_names[category] doc.h2(human_readable) fields = [ #("caption", human_readable), ("titlesonly", ""), ] doc.newline() if TOCTREE_STYLE == 0: sample_paths = [f"usd/{category}/{sample[0]}" for sample in cat_samples] doc.directive("toctree", None, fields, sample_paths) doc.newline() elif TOCTREE_STYLE == 1: #doc.h2(human_readable) doc.newline() for sample, title in cat_samples: doc._add("- :doc:`" + title + f" <usd/{category}/" + sample + ">`") doc.newline() doc.directive("include", "usd_footer.rst") doc.newline() def readable_from_category_dir_name(category): sub_strs = category.split("-") readable = "" for sub in sub_strs: readable += sub.capitalize() + " " return readable.strip() if __name__ == "__main__": # Create an argument parser parser = argparse.ArgumentParser(description='Build rST documentation from code sample source.') # Parse the arguments args = parser.parse_args() logging.basicConfig(level=logging.INFO) main()
9,545
Python
34.225092
122
0.503929
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # Add all the imports that you need for you snippets from pxr import Usd, Sdf, UsdGeom def descriptive_code_sample_name(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera: """Docstring is optional. Use Google style docstrings if you choose to add them. The code sample should be defined as a function. As a descriptive name for the function. Use function arguments to: - Pass in any objects that your code sample expects to exist (e.g. a Stage) - Pass in Paths rather than hard-coding them. Use type-hinting to help learners understand what type every variable is. Don't assume they'll know. Args: stage (Usd.Stage): _description_ prim_path (str, optional): _description_. Defaults to "/World/MyPerspCam". Returns: UsdGeom.Camera: _description_ """ camera_path = Sdf.Path(prim_path) usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path) usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective) return usd_camera ############# # Full Usage ############# # Here you will show your code sample in context. Add any additional imports # that you may need for your "Full Usage" code # You can create an in-memory stage and do any stage setup before calling # you code sample. stage: Usd.Stage = Usd.Stage.CreateInMemory() default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")) stage.SetDefaultPrim(default_prim.GetPrim()) cam_path = default_prim.GetPath().AppendPath("MyPerspCam") # Call your code sample function camera = descriptive_code_sample_name(stage, cam_path) # print out the result usda = stage.GetRootLayer().ExportToString() print(usda) # Do some basic asserts to show learners how to interact with the results. prim = camera.GetPrim() assert prim.IsValid() assert camera.GetPath() == Sdf.Path(cam_path) assert prim.GetTypeName() == "Camera" projection = camera.GetProjectionAttr().Get() assert projection == UsdGeom.Tokens.perspective
2,131
Python
35.75862
105
0.725481
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_omni_usd.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 """ Source code for code block in the py_omni_usd flavor. See the py_usd.py for a full example of writing a code sample. You should use omni.usd.get_stage() instead of creating an in-memory stage for the Full Usage part since this is meant to run in Omniverse. """
403
Python
39.399996
98
0.764268
NVIDIA-Omniverse/OpenUSD-Code-Samples/example-category/example-code-sample/py_usd_var1.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 """ Source code for another code block in the py_usd flavor. See the py_usd.py for a full example of writing a code sample. """
265
Python
36.999995
98
0.758491
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_usd.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from pxr import Sdf, Usd, UsdGeom def create_orthographic_camera(stage: Usd.Stage, prim_path: str="/World/MyOrthoCam") -> UsdGeom.Camera: """Create an orthographic camera Args: stage (Usd.Stage): A USD Stage to create the camera on. prim_path (str, optional): The prim path for where to create the camera. Defaults to "/World/MyOrthoCam". """ camera_path = Sdf.Path(prim_path) usd_camera = UsdGeom.Camera.Define(stage, camera_path) usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.orthographic) return usd_camera ############# # Full Usage ############# cam_path = "/World/MyOrthoCam" stage: Usd.Stage = Usd.Stage.CreateInMemory() root_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")) stage.SetDefaultPrim(root_prim.GetPrim()) camera = create_orthographic_camera(stage, cam_path) usda = stage.GetRootLayer().ExportToString() print(usda) # Check that the camera was created prim = camera.GetPrim() assert prim.IsValid() assert camera.GetPath() == Sdf.Path(cam_path) assert prim.GetTypeName() == "Camera" projection = camera.GetProjectionAttr().Get() assert projection == UsdGeom.Tokens.orthographic
1,298
Python
31.474999
113
0.718028
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-orthographic-camera/py_kit_cmds.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 import omni.kit.commands from pxr import UsdGeom def create_orthographic_camera(prim_path: str="/World/MyOrthoCam"): """Create an orthographic camera Args: prim_path (str, optional): The prim path where the camera should be created. Defaults to "/World/MyOrthoCam". """ omni.kit.commands.execute("CreatePrimWithDefaultXform", prim_type="Camera", prim_path="/World/MyOrthoCam", attributes={"projection": UsdGeom.Tokens.orthographic} ) ############# # Full Usage ############# import omni.usd # Create an orthographic camera at /World/MyOrthoCam path = "/World/MyOrthoCam" create_orthographic_camera(path) # Check that the camera was created stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) assert prim.IsValid() == True assert prim.GetTypeName() == "Camera" projection = prim.GetAttribute("projection").Get() assert projection == UsdGeom.Tokens.orthographic
1,082
Python
28.27027
117
0.711645
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from pxr import Usd, Sdf, UsdGeom def create_perspective_camera(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera: camera_path = Sdf.Path(prim_path) usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path) usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective) return usd_camera ############# # Full Usage ############# # Create an in-memory Stage with /World Xform prim as the default prim stage: Usd.Stage = Usd.Stage.CreateInMemory() default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")) stage.SetDefaultPrim(default_prim.GetPrim()) # Create the perspective camera at /World/MyPerspCam cam_path = default_prim.GetPath().AppendPath("MyPerspCam") camera = create_perspective_camera(stage, cam_path) # Export the complete Stage as a string and print it. usda = stage.GetRootLayer().ExportToString() print(usda) # Check that the camera was created prim = camera.GetPrim() assert prim.IsValid() assert camera.GetPath() == Sdf.Path(cam_path) assert prim.GetTypeName() == "Camera" projection = camera.GetProjectionAttr().Get() assert projection == UsdGeom.Tokens.perspective
1,288
Python
33.837837
102
0.743789
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_kit_cmds.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 import omni.kit.commands from pxr import UsdGeom def create_perspective_camera(prim_path: str="/World/MyPerspCam"): """Create a perspective camera Args: prim_path (str, optional): The prim path where the camera should be created. Defaults to "/World/MyPerspCam". """ omni.kit.commands.execute("CreatePrimWithDefaultXform", prim_type="Camera", prim_path=prim_path, attributes={ "projection": UsdGeom.Tokens.perspective, "focalLength": 35, "horizontalAperture": 20.955, "verticalAperture": 15.2908, "clippingRange": (0.1, 100000) } ) ############# # Full Usage ############# import omni.usd # Create a perspective camera at /World/MyPerspCam path = "/World/MyPerspCam" create_perspective_camera(path) # Check that the camera was created stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) assert prim.IsValid() == True assert prim.GetTypeName() == "Camera" projection = prim.GetAttribute("projection").Get() assert projection == UsdGeom.Tokens.perspective
1,239
Python
27.181818
117
0.673123
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/cameras/create-perspective-camera/py_usd_var1.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from pxr import Usd, Sdf, UsdGeom def create_perspective_35mm_camera(stage: Usd.Stage, prim_path: str="/World/MyPerspCam") -> UsdGeom.Camera: camera_path = Sdf.Path(prim_path) usd_camera: UsdGeom.Camera = UsdGeom.Camera.Define(stage, camera_path) usd_camera.CreateProjectionAttr().Set(UsdGeom.Tokens.perspective) usd_camera.CreateFocalLengthAttr().Set(35) # Set a few other common attributes too. usd_camera.CreateHorizontalApertureAttr().Set(20.955) usd_camera.CreateVerticalApertureAttr().Set(15.2908) usd_camera.CreateClippingRangeAttr().Set((0.1,100000)) return usd_camera ############# # Full Usage ############# # Create an in-memory Stage with /World Xform prim as the default prim stage: Usd.Stage = Usd.Stage.CreateInMemory() default_prim = UsdGeom.Xform.Define(stage, Sdf.Path("/World")) stage.SetDefaultPrim(default_prim.GetPrim()) # Create the perspective camera at path /World/MyPerspCam with 35mm # set for the focal length. cam_path = default_prim.GetPath().AppendPath("MyPerspCam") camera = create_perspective_35mm_camera(stage, cam_path) # Export the complete Stage as a string and print it. usda = stage.GetRootLayer().ExportToString() print(usda) # Check the camera attributes focal_len = camera.GetFocalLengthAttr().Get() assert focal_len == 35.0 clip_range = camera.GetClippingRangeAttr().Get() assert clip_range == (0.1,100000)
1,533
Python
34.674418
107
0.740378
NVIDIA-Omniverse/OpenUSD-Code-Samples/source/visibility/show-hide-prim/py_usd.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from typing import Union from pxr import Sdf, Usd, UsdGeom def get_visibility_attribute( stage: Usd.Stage, prim_path: str ) -> Union[Usd.Attribute, None]: """Return the visibility attribute of a prim""" path = Sdf.Path(prim_path) prim = stage.GetPrimAtPath(path) if not prim.IsValid(): return None visibility_attribute = prim.GetAttribute("visibility") return visibility_attribute def hide_prim(stage: Usd.Stage, prim_path: str): """Hide a prim Args: stage (Usd.Stage, required): The USD Stage prim_path (str, required): The prim path of the prim to hide """ visibility_attribute = get_visibility_attribute(stage, prim_path) if visibility_attribute is None: return visibility_attribute.Set("invisible") def show_prim(stage: Usd.Stage, prim_path: str): """Show a prim Args: stage (Usd.Stage, required): The USD Stage prim_path (str, required): The prim path of the prim to show """ visibility_attribute = get_visibility_attribute(stage, prim_path) if visibility_attribute is None: return visibility_attribute.Set("inherited") ############# # Full Usage ############# # Here you will show your code sample in context. Add any additional imports # that you may need for your "Full Usage" code # Create a simple in-memory stage with a Cube stage: Usd.Stage = Usd.Stage.CreateInMemory() default_prim_path = Sdf.Path("/World") default_prim = UsdGeom.Xform.Define(stage, default_prim_path) stage.SetDefaultPrim(default_prim.GetPrim()) cube_path = default_prim_path.AppendPath("Cube") cube = UsdGeom.Cube.Define(stage, cube_path) # The prim is initially visible. Assert so and then demonstrate how to toggle # it off and on assert get_visibility_attribute(stage, cube_path).Get() == "inherited" hide_prim(stage, cube_path) assert get_visibility_attribute(stage, cube_path).Get() == "invisible" show_prim(stage, cube_path) assert get_visibility_attribute(stage, cube_path).Get() == "inherited" # Print the USDA out usda = stage.GetRootLayer().ExportToString() print(usda)
2,246
Python
30.647887
98
0.702137