| @@ -0,0 +1,123 @@ | |||
| # dora-rs & isaacsim | |||
|  | |||
| ## Preparation | |||
| 1. Clone this repository. | |||
| 2. Add https://huggingface.co/berrylvz/policy_act/tree/main/assets/ckpt into `node-hub/dora-act/dora_act/assets/ckpt/`. | |||
| 3. Download [isaacsim](https://docs.isaacsim.omniverse.nvidia.com/4.5.0/installation/download.html). | |||
| 4. Add scenary to `node-hub/dora-isaacsim/dora_isaacsim/assets/`. | |||
| ```shell | |||
| assets/ | |||
| ├── franka/ | |||
| ├── gripper/ | |||
| ├── Owl/ | |||
| ├── Simple_Room/ | |||
| └── stack_cube_franka.usd | |||
| ``` | |||
| 5. Change `ISAAC_PYTHON_PATH` in `./dataflow.yml` to `"<path of isaacsim>/python.sh"`. | |||
| 6. Create conda env. | |||
| ```shell | |||
| conda create -n dora_isaacsim python=3.10.4 | |||
| conda activate dora_isaacsim | |||
| pip install -r requirements.txt | |||
| ``` | |||
| 7. Install necessary packages into isaacsim. | |||
| ```shell | |||
| <path of isaacsim>/python.sh -m pip install -r requirements_isaacsim.txt | |||
| ``` | |||
| ## Getting Started | |||
| 1. Activate Conda environment. | |||
| ```shell | |||
| conda activate dora_isaacsim | |||
| ``` | |||
| 2. Spawn coordinator and daemon. | |||
| ```shell | |||
| dora up | |||
| ``` | |||
| 3. Start the dataflow. | |||
| ```shell | |||
| dora build dataflow.yml | |||
| dora start dataflow.yml | |||
| ``` | |||
| 4. Execution example (Refer [Youtube video](https://youtu.be/oK5c1U3C87g)). | |||
| Click on the Stage > /World/franka and press `F`. | |||
| 5. Close the dora-rs. | |||
| ```shell | |||
| dora destroy | |||
| ``` | |||
| ## YAML Specification | |||
| ```yaml | |||
| nodes: | |||
| - id: dora-isaacsim | |||
| build: pip install -e ../../node-hub/dora-isaacsim | |||
| path: ../../node-hub/dora-isaacsim/dora_isaacsim/main.py | |||
| env: | |||
| # substitute to your own "<path of isaacsim>/python.sh" | |||
| ISAAC_PYTHON_PATH: "/home/lv/isaacsim/python.sh" | |||
| CONFIG_NAME: "stack_cube_act" | |||
| inputs: | |||
| request_camera: dora-act/request_camera | |||
| request_joint_pos: dora-act/request_joint_pos | |||
| action: dora-act/action | |||
| outputs: | |||
| - camera | |||
| - joint_pos | |||
| - id: dora-act | |||
| build: pip install -e ../../node-hub/dora-act | |||
| path: ../../node-hub/dora-act/dora_act/main.py | |||
| args: | |||
| --task_name stack_cube | |||
| # path of model parameters (relative to the current dataflow file) | |||
| --ckpt_dir ../../dora-act/dora_act/assets/ckpt/ | |||
| --policy_class ACT | |||
| --kl_weight 10 | |||
| --chunk_size 20 | |||
| --hidden_dim 512 | |||
| --batch_size 8 | |||
| --dim_feedforward 3200 | |||
| --num_epochs 2000 | |||
| --lr 1e-5 | |||
| --seed 0 | |||
| --temporal_agg | |||
| # --eval | |||
| inputs: | |||
| camera: dora-isaacsim/camera | |||
| joint_pos: dora-isaacsim/joint_pos | |||
| outputs: | |||
| - request_camera | |||
| - request_joint_pos | |||
| - action | |||
| env: | |||
| SCENARIO: sim | |||
| ``` | |||
| ## Development | |||
| Both of `request_camera` and `request_joint_pos` are requests and have no requirements for data format, and can also be modified to DORA's built-in timer, like `request_*: dora/timer/millis/50`. | |||
| `camera` is transmitted as RGB data that is flattened into a one-dimensional array. | |||
| Both of `joint_pos` and `action` are pose data of the robotic arm, which is a floating-point list. | |||
| @@ -0,0 +1,42 @@ | |||
| nodes: | |||
| - id: dora-isaacsim | |||
| build: pip install -e ../../node-hub/dora-isaacsim | |||
| path: ../../node-hub/dora-isaacsim/dora_isaacsim/main.py | |||
| env: | |||
| # substitute to your own "<path of isaacsim>/python.sh" | |||
| ISAAC_PYTHON_PATH: "/home/lv/isaacsim/python.sh" | |||
| CONFIG_NAME: "stack_cube_act" | |||
| inputs: | |||
| request_camera: dora-act/request_camera | |||
| request_joint_pos: dora-act/request_joint_pos | |||
| action: dora-act/action | |||
| outputs: | |||
| - camera | |||
| - joint_pos | |||
| - id: dora-act | |||
| build: pip install -e ../../node-hub/dora-act | |||
| path: ../../node-hub/dora-act/dora_act/main.py | |||
| args: | |||
| --task_name stack_cube | |||
| --ckpt_dir ../../dora-act/dora_act/assets/ckpt/ | |||
| --policy_class ACT | |||
| --kl_weight 10 | |||
| --chunk_size 20 | |||
| --hidden_dim 512 | |||
| --batch_size 8 | |||
| --dim_feedforward 3200 | |||
| --num_epochs 2000 | |||
| --lr 1e-5 | |||
| --seed 0 | |||
| --temporal_agg | |||
| # --eval | |||
| inputs: | |||
| camera: dora-isaacsim/camera | |||
| joint_pos: dora-isaacsim/joint_pos | |||
| outputs: | |||
| - request_camera | |||
| - request_joint_pos | |||
| - action | |||
| env: | |||
| SCENARIO: sim | |||
| @@ -0,0 +1,40 @@ | |||
| # dora-act | |||
| ## Getting started | |||
| - Install it with uv: | |||
| ```bash | |||
| uv venv -p 3.11 --seed | |||
| uv pip install -e . | |||
| ``` | |||
| ## Contribution Guide | |||
| - Format with [ruff](https://docs.astral.sh/ruff/): | |||
| ```bash | |||
| uv pip install ruff | |||
| uv run ruff check . --fix | |||
| ``` | |||
| - Lint with ruff: | |||
| ```bash | |||
| uv run ruff check . | |||
| ``` | |||
| - Test with [pytest](https://github.com/pytest-dev/pytest) | |||
| ```bash | |||
| uv pip install pytest | |||
| uv run pytest . # Test | |||
| ``` | |||
| ## YAML Specification | |||
| ## Examples | |||
| ## License | |||
| dora-act's code are released under the MIT License | |||
| @@ -0,0 +1,13 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md") | |||
| # Read the content of the README file | |||
| try: | |||
| with open(readme_path, encoding="utf-8") as f: | |||
| __doc__ = f.read() | |||
| except FileNotFoundError: | |||
| __doc__ = "README file not found." | |||
| @@ -0,0 +1,6 @@ | |||
| """TODO: Add docstring.""" | |||
| from .main import main | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -0,0 +1,27 @@ | |||
| ### Task parameters | |||
| DATA_DIR = "data/input" | |||
| IMG_H = 360 | |||
| IMG_W = 640 | |||
| TASK_CONFIGS = { | |||
| "grasp_corn": { | |||
| "dataset_dir": DATA_DIR + "/grasp_corn_joint_for_real_v0", | |||
| "num_episodes": 68, | |||
| "episode_len": 180, | |||
| "camera_names": ["front"], | |||
| "state_dim": 8, | |||
| }, | |||
| "grasp_cube": { | |||
| "dataset_dir": DATA_DIR + "/grasp_cube_joint_for_real_v0", | |||
| "num_episodes": 81, | |||
| "episode_len": 180, | |||
| "camera_names": ["front"], | |||
| "state_dim": 8, | |||
| }, | |||
| "stack_cube": { | |||
| "dataset_dir": "./dataset/stack_cube", # tip: 这里的路径要改成你自己的数据集的路径 | |||
| "num_episodes": 15, # 实际采集的 eposide 的数量 | |||
| "episode_len": 125, # 每个 episode 的长度 | |||
| "camera_names": ["front"], | |||
| "state_dim": 8, | |||
| }, | |||
| } | |||
| @@ -0,0 +1,201 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright 2020 - present, Facebook, Inc | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -0,0 +1,9 @@ | |||
| This part of the codebase is modified from DETR https://github.com/facebookresearch/detr under APACHE 2.0. | |||
| @article{Carion2020EndtoEndOD, | |||
| title={End-to-End Object Detection with Transformers}, | |||
| author={Nicolas Carion and Francisco Massa and Gabriel Synnaeve and Nicolas Usunier and Alexander Kirillov and Sergey Zagoruyko}, | |||
| journal={ArXiv}, | |||
| year={2020}, | |||
| volume={abs/2005.12872} | |||
| } | |||
| @@ -0,0 +1,170 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| import argparse | |||
| from pathlib import Path | |||
| import IPython | |||
| import numpy as np | |||
| import torch | |||
| from .models import build_ACT_model, build_CNNMLP_model | |||
| e = IPython.embed | |||
| def get_args_parser(): | |||
| parser = argparse.ArgumentParser("Set transformer detector", add_help=False) | |||
| parser.add_argument("--lr", default=1e-4, type=float) # will be overridden | |||
| parser.add_argument("--lr_backbone", default=1e-5, type=float) # will be overridden | |||
| parser.add_argument("--batch_size", default=2, type=int) # not used | |||
| parser.add_argument("--weight_decay", default=1e-4, type=float) | |||
| parser.add_argument("--epochs", default=300, type=int) # not used | |||
| parser.add_argument("--lr_drop", default=200, type=int) # not used | |||
| parser.add_argument( | |||
| "--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm" # not used | |||
| ) | |||
| # Model parameters | |||
| # * Backbone | |||
| parser.add_argument( | |||
| "--backbone", | |||
| default="resnet18", | |||
| type=str, # will be overridden | |||
| help="Name of the convolutional backbone to use", | |||
| ) | |||
| parser.add_argument( | |||
| "--dilation", | |||
| action="store_true", | |||
| help="If true, we replace stride with dilation in the last convolutional block (DC5)", | |||
| ) | |||
| parser.add_argument( | |||
| "--position_embedding", | |||
| default="sine", | |||
| type=str, | |||
| choices=("sine", "learned"), | |||
| help="Type of positional embedding to use on top of the image features", | |||
| ) | |||
| parser.add_argument( | |||
| "--camera_names", default=[], type=list, help="A list of camera names" # will be overridden | |||
| ) | |||
| # * Transformer | |||
| parser.add_argument( | |||
| "--enc_layers", | |||
| default=4, | |||
| type=int, # will be overridden | |||
| help="Number of encoding layers in the transformer", | |||
| ) | |||
| parser.add_argument( | |||
| "--dec_layers", | |||
| default=6, | |||
| type=int, # will be overridden | |||
| help="Number of decoding layers in the transformer", | |||
| ) | |||
| parser.add_argument( | |||
| "--dim_feedforward", | |||
| default=2048, | |||
| type=int, # will be overridden | |||
| help="Intermediate size of the feedforward layers in the transformer blocks", | |||
| ) | |||
| parser.add_argument( | |||
| "--hidden_dim", | |||
| default=256, | |||
| type=int, # will be overridden | |||
| help="Size of the embeddings (dimension of the transformer)", | |||
| ) | |||
| parser.add_argument( | |||
| "--dropout", default=0.1, type=float, help="Dropout applied in the transformer" | |||
| ) | |||
| parser.add_argument( | |||
| "--nheads", | |||
| default=8, | |||
| type=int, # will be overridden | |||
| help="Number of attention heads inside the transformer's attentions", | |||
| ) | |||
| parser.add_argument( | |||
| "--num_queries", default=400, type=int, help="Number of query slots" # will be overridden | |||
| ) | |||
| parser.add_argument("--pre_norm", action="store_true") | |||
| # * Segmentation | |||
| parser.add_argument( | |||
| "--masks", action="store_true", help="Train segmentation head if the flag is provided" | |||
| ) | |||
| # repeat args in imitate_episodes just to avoid error. Will not be used | |||
| parser.add_argument("--eval", action="store_true") | |||
| parser.add_argument("--onscreen_render", action="store_true") | |||
| parser.add_argument("--ckpt_dir", action="store", type=str, help="ckpt_dir", required=True) | |||
| parser.add_argument( | |||
| "--policy_class", action="store", type=str, help="policy_class, capitalize", required=True | |||
| ) | |||
| parser.add_argument("--task_name", action="store", type=str, help="task_name", required=True) | |||
| parser.add_argument("--seed", action="store", type=int, help="seed", required=True) | |||
| parser.add_argument("--num_epochs", action="store", type=int, help="num_epochs", required=True) | |||
| parser.add_argument("--kl_weight", action="store", type=int, help="KL Weight", required=False) | |||
| parser.add_argument("--chunk_size", action="store", type=int, help="chunk_size", required=False) | |||
| parser.add_argument("--temporal_agg", action="store_true") | |||
| return parser | |||
| def build_ACT_model_and_optimizer(args_override): | |||
| parser = argparse.ArgumentParser( | |||
| "DETR training and evaluation script", parents=[get_args_parser()] | |||
| ) | |||
| args = parser.parse_args() | |||
| for k, v in args_override.items(): | |||
| # setattr(x, 'y', v) is equivalent to x.y = v | |||
| setattr(args, k, v) | |||
| model = build_ACT_model(args) | |||
| model.cuda() | |||
| # 为 backbone 设置不同的学习率 | |||
| param_dicts = [ | |||
| { | |||
| "params": [ | |||
| p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad | |||
| ] | |||
| }, | |||
| { | |||
| "params": [ | |||
| p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad | |||
| ], | |||
| "lr": args.lr_backbone, | |||
| }, | |||
| ] | |||
| optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay) | |||
| return model, optimizer | |||
| def build_CNNMLP_model_and_optimizer(args_override): | |||
| parser = argparse.ArgumentParser( | |||
| "DETR training and evaluation script", parents=[get_args_parser()] | |||
| ) | |||
| args = parser.parse_args() | |||
| for k, v in args_override.items(): | |||
| setattr(args, k, v) | |||
| model = build_CNNMLP_model(args) | |||
| model.cuda() | |||
| param_dicts = [ | |||
| { | |||
| "params": [ | |||
| p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad | |||
| ] | |||
| }, | |||
| { | |||
| "params": [ | |||
| p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad | |||
| ], | |||
| "lr": args.lr_backbone, | |||
| }, | |||
| ] | |||
| optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay) | |||
| return model, optimizer | |||
| @@ -0,0 +1,11 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| from .detr_vae import build as build_vae | |||
| from .detr_vae import build_cnnmlp as build_cnnmlp | |||
| def build_ACT_model(args): | |||
| return build_vae(args) | |||
| def build_CNNMLP_model(args): | |||
| return build_cnnmlp(args) | |||
| @@ -0,0 +1,128 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| """ | |||
| Backbone modules. | |||
| """ | |||
| from collections import OrderedDict | |||
| from typing import Dict, List | |||
| import IPython | |||
| import torch | |||
| import torch.nn.functional as F | |||
| import torchvision | |||
| from torch import nn | |||
| from torchvision.models._utils import IntermediateLayerGetter | |||
| from ..util.misc import NestedTensor, is_main_process | |||
| from .position_encoding import build_position_encoding | |||
| e = IPython.embed | |||
| class FrozenBatchNorm2d(torch.nn.Module): | |||
| """ | |||
| BatchNorm2d where the batch statistics and the affine parameters are fixed. | |||
| Copy-paste from torchvision.misc.ops with added eps before rqsrt, | |||
| without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101] | |||
| produce nans. | |||
| """ | |||
| def __init__(self, n): | |||
| super(FrozenBatchNorm2d, self).__init__() | |||
| self.register_buffer("weight", torch.ones(n)) | |||
| self.register_buffer("bias", torch.zeros(n)) | |||
| self.register_buffer("running_mean", torch.zeros(n)) | |||
| self.register_buffer("running_var", torch.ones(n)) | |||
| def _load_from_state_dict( | |||
| self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs | |||
| ): | |||
| num_batches_tracked_key = prefix + "num_batches_tracked" | |||
| if num_batches_tracked_key in state_dict: | |||
| del state_dict[num_batches_tracked_key] | |||
| super(FrozenBatchNorm2d, self)._load_from_state_dict( | |||
| state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs | |||
| ) | |||
| def forward(self, x): | |||
| # move reshapes to the beginning | |||
| # to make it fuser-friendly | |||
| w = self.weight.reshape(1, -1, 1, 1) | |||
| b = self.bias.reshape(1, -1, 1, 1) | |||
| rv = self.running_var.reshape(1, -1, 1, 1) | |||
| rm = self.running_mean.reshape(1, -1, 1, 1) | |||
| eps = 1e-5 | |||
| scale = w * (rv + eps).rsqrt() | |||
| bias = b - rm * scale | |||
| return x * scale + bias | |||
| class BackboneBase(nn.Module): | |||
| def __init__( | |||
| self, | |||
| backbone: nn.Module, | |||
| train_backbone: bool, | |||
| num_channels: int, | |||
| return_interm_layers: bool, | |||
| ): | |||
| super().__init__() | |||
| # for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this? | |||
| # if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: | |||
| # parameter.requires_grad_(False) | |||
| if return_interm_layers: | |||
| return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} | |||
| else: | |||
| return_layers = {"layer4": "0"} | |||
| self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) | |||
| self.num_channels = num_channels | |||
| def forward(self, tensor): | |||
| xs = self.body(tensor) | |||
| return xs | |||
| # out: Dict[str, NestedTensor] = {} | |||
| # for name, x in xs.items(): | |||
| # m = tensor_list.mask | |||
| # assert m is not None | |||
| # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] | |||
| # out[name] = NestedTensor(x, mask) | |||
| # return out | |||
| class Backbone(BackboneBase): | |||
| """ResNet backbone with frozen BatchNorm.""" | |||
| def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool): | |||
| backbone = getattr(torchvision.models, name)( | |||
| replace_stride_with_dilation=[False, False, dilation], | |||
| pretrained=is_main_process(), | |||
| norm_layer=FrozenBatchNorm2d, | |||
| ) # pretrained # TODO do we want frozen batch_norm?? | |||
| num_channels = 512 if name in ("resnet18", "resnet34") else 2048 | |||
| super().__init__(backbone, train_backbone, num_channels, return_interm_layers) | |||
| class Joiner(nn.Sequential): | |||
| def __init__(self, backbone, position_embedding): | |||
| super().__init__(backbone, position_embedding) | |||
| def forward(self, tensor_list: NestedTensor): | |||
| xs = self[0](tensor_list) | |||
| out: List[NestedTensor] = [] | |||
| pos = [] | |||
| for name, x in xs.items(): | |||
| out.append(x) | |||
| # position encoding | |||
| pos.append(self[1](x).to(x.dtype)) | |||
| return out, pos | |||
| def build_backbone(args): | |||
| position_embedding = build_position_encoding(args) | |||
| train_backbone = args.lr_backbone > 0 | |||
| return_interm_layers = args.masks | |||
| backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) | |||
| model = Joiner(backbone, position_embedding) | |||
| model.num_channels = backbone.num_channels | |||
| return model | |||
| @@ -0,0 +1,308 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| """ | |||
| DETR model and criterion classes. | |||
| """ | |||
| import IPython | |||
| import numpy as np | |||
| import torch | |||
| from torch import nn | |||
| from torch.autograd import Variable | |||
| from .backbone import build_backbone | |||
| from .transformer import TransformerEncoder, TransformerEncoderLayer, build_transformer | |||
| e = IPython.embed | |||
| def reparametrize(mu, logvar): | |||
| std = logvar.div(2).exp() | |||
| eps = Variable(std.data.new(std.size()).normal_()) | |||
| return mu + std * eps | |||
| # 正弦位置编码 | |||
| def get_sinusoid_encoding_table(n_position, d_hid): | |||
| def get_position_angle_vec(position): | |||
| return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] | |||
| sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) | |||
| sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i | |||
| sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 | |||
| return torch.FloatTensor(sinusoid_table).unsqueeze(0) | |||
| class DETRVAE(nn.Module): | |||
| """This is the DETR module that performs object detection""" | |||
| def __init__(self, backbones, transformer, encoder, state_dim, num_queries, camera_names): | |||
| """Initializes the model. | |||
| Parameters: | |||
| backbones: torch module of the backbone to be used. See backbone.py | |||
| transformer: torch module of the transformer architecture. See transformer.py | |||
| state_dim: robot state dimension of the environment | |||
| num_queries: number of object queries, ie detection slot. This is the maximal number of objects | |||
| DETR can detect in a single image. For COCO, we recommend 100 queries. | |||
| num_queries = chunk_size | |||
| aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. | |||
| """ | |||
| super().__init__() | |||
| self.num_queries = num_queries # num_queries = chunk_size | |||
| self.camera_names = camera_names | |||
| self.transformer = transformer | |||
| self.encoder = encoder | |||
| hidden_dim = transformer.d_model # d_model = 512 | |||
| self.action_head = nn.Linear(hidden_dim, state_dim) | |||
| self.is_pad_head = nn.Linear(hidden_dim, 1) | |||
| self.query_embed = nn.Embedding(num_queries, hidden_dim) | |||
| if backbones is not None: | |||
| self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) | |||
| self.backbones = nn.ModuleList(backbones) | |||
| self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) | |||
| else: | |||
| # TODO | |||
| # input_dim = 14 + 7 # robot_state + env_state | |||
| self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) | |||
| self.input_proj_env_state = nn.Linear(7, hidden_dim) | |||
| self.pos = torch.nn.Embedding(2, hidden_dim) | |||
| self.backbones = None | |||
| # encoder extra parameters | |||
| self.latent_dim = 32 # final size of latent z | |||
| self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding | |||
| self.encoder_action_proj = nn.Linear( | |||
| state_dim, hidden_dim | |||
| ) # project action to embedding (8 -> 512) | |||
| self.encoder_joint_proj = nn.Linear( | |||
| state_dim, hidden_dim | |||
| ) # project qpos to embedding (8 -> 512) | |||
| self.latent_proj = nn.Linear( | |||
| hidden_dim, self.latent_dim * 2 | |||
| ) # project hidden state to latent std, var (512 -> 64) | |||
| self.register_buffer( | |||
| "pos_table", get_sinusoid_encoding_table(1 + 1 + num_queries, hidden_dim) | |||
| ) # [CLS], qpos, a_seq = 1 + 1 + num_queries | |||
| # decoder extra parameters | |||
| self.latent_out_proj = nn.Linear( | |||
| self.latent_dim, hidden_dim | |||
| ) # project latent sample to embedding | |||
| self.additional_pos_embed = nn.Embedding( | |||
| 2, hidden_dim | |||
| ) # learned position embedding for proprio and latent | |||
| def forward(self, qpos, image, env_state, actions=None, is_pad=None): | |||
| """ | |||
| qpos: batch, qpos_dim | |||
| image: batch, num_cam, channel, height, width | |||
| env_state: None | |||
| actions: batch, seq, action_dim | |||
| """ | |||
| qpos = torch.tensor(qpos, dtype=torch.float32) | |||
| is_training = actions is not None # train or val | |||
| bs, _ = qpos.shape | |||
| ### Obtain latent z from action sequence | |||
| # 训练阶段:VAE 的 encoder | |||
| if is_training: | |||
| actions = torch.tensor(actions, dtype=torch.float32) | |||
| # project action sequence to embedding dim, and concat with a CLS token | |||
| action_embed = self.encoder_action_proj(actions) # (bs, seq, hidden_dim) | |||
| qpos_embed = self.encoder_joint_proj(qpos) # (bs, hidden_dim) | |||
| qpos_embed = torch.unsqueeze(qpos_embed, axis=1) # (bs, 1, hidden_dim) | |||
| cls_embed = self.cls_embed.weight # (1, hidden_dim) | |||
| cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim) | |||
| encoder_input = torch.cat( | |||
| [cls_embed, qpos_embed, action_embed], axis=1 | |||
| ) # (bs, seq+1, hidden_dim) | |||
| encoder_input = encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim) | |||
| # do not mask cls token | |||
| cls_joint_is_pad = torch.full((bs, 2), False).to(qpos.device) # False: not a padding | |||
| is_pad = torch.cat([cls_joint_is_pad, is_pad], axis=1) # (bs, seq+1) | |||
| # obtain position embedding | |||
| pos_embed = self.pos_table.clone().detach() | |||
| pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim) | |||
| # query model | |||
| encoder_output = self.encoder(encoder_input, pos=pos_embed, src_key_padding_mask=is_pad) | |||
| encoder_output = encoder_output[0] # take cls output only | |||
| latent_info = self.latent_proj(encoder_output) | |||
| mu = latent_info[:, : self.latent_dim] | |||
| logvar = latent_info[:, self.latent_dim :] | |||
| latent_sample = reparametrize(mu, logvar) | |||
| latent_input = self.latent_out_proj(latent_sample) | |||
| else: | |||
| # 测试时 z 设置为零 | |||
| mu = logvar = None | |||
| latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(qpos.device) | |||
| latent_input = self.latent_out_proj(latent_sample) | |||
| if self.backbones is not None: | |||
| # Image observation features and position embeddings | |||
| all_cam_features = [] | |||
| all_cam_pos = [] | |||
| for cam_id, cam_name in enumerate(self.camera_names): | |||
| features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED | |||
| features = features[0] # take the last layer feature | |||
| pos = pos[0] | |||
| all_cam_features.append(self.input_proj(features)) | |||
| all_cam_pos.append(pos) | |||
| # proprioception features | |||
| proprio_input = self.input_proj_robot_state(qpos) | |||
| # fold camera dimension into width dimension | |||
| src = torch.cat(all_cam_features, axis=3) | |||
| pos = torch.cat(all_cam_pos, axis=3) | |||
| hs = self.transformer( | |||
| src, | |||
| None, | |||
| self.query_embed.weight, | |||
| pos, | |||
| latent_input, | |||
| proprio_input, | |||
| self.additional_pos_embed.weight, | |||
| )[0] | |||
| else: | |||
| qpos = self.input_proj_robot_state(qpos) | |||
| env_state = self.input_proj_env_state(env_state) | |||
| transformer_input = torch.cat([qpos, env_state], axis=1) # seq length = 2 | |||
| hs = self.transformer( | |||
| transformer_input, None, self.query_embed.weight, self.pos.weight | |||
| )[0] | |||
| a_hat = self.action_head(hs) | |||
| is_pad_hat = self.is_pad_head(hs) | |||
| return a_hat, is_pad_hat, [mu, logvar] | |||
| class CNNMLP(nn.Module): | |||
| def __init__(self, backbones, state_dim, camera_names): | |||
| """Initializes the model. | |||
| Parameters: | |||
| backbones: torch module of the backbone to be used. See backbone.py | |||
| transformer: torch module of the transformer architecture. See transformer.py | |||
| state_dim: robot state dimension of the environment | |||
| num_queries: number of object queries, ie detection slot. This is the maximal number of objects | |||
| DETR can detect in a single image. For COCO, we recommend 100 queries. | |||
| aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. | |||
| """ | |||
| super().__init__() | |||
| self.camera_names = camera_names | |||
| self.action_head = nn.Linear(1000, state_dim) # TODO add more | |||
| if backbones is not None: | |||
| self.backbones = nn.ModuleList(backbones) | |||
| backbone_down_projs = [] | |||
| for backbone in backbones: | |||
| down_proj = nn.Sequential( | |||
| nn.Conv2d(backbone.num_channels, 128, kernel_size=5), | |||
| nn.Conv2d(128, 64, kernel_size=5), | |||
| nn.Conv2d(64, 32, kernel_size=5), | |||
| ) | |||
| backbone_down_projs.append(down_proj) | |||
| self.backbone_down_projs = nn.ModuleList(backbone_down_projs) | |||
| mlp_in_dim = 768 * len(backbones) + 14 | |||
| self.mlp = mlp(input_dim=mlp_in_dim, hidden_dim=1024, output_dim=14, hidden_depth=2) | |||
| else: | |||
| raise NotImplementedError | |||
| def forward(self, qpos, image, env_state, actions=None): | |||
| """ | |||
| qpos: batch, qpos_dim | |||
| image: batch, num_cam, channel, height, width | |||
| env_state: None | |||
| actions: batch, seq, action_dim | |||
| """ | |||
| is_training = actions is not None # train or val | |||
| bs, _ = qpos.shape | |||
| # Image observation features and position embeddings | |||
| all_cam_features = [] | |||
| for cam_id, cam_name in enumerate(self.camera_names): | |||
| features, pos = self.backbones[cam_id](image[:, cam_id]) | |||
| features = features[0] # take the last layer feature | |||
| pos = pos[0] # not used | |||
| all_cam_features.append(self.backbone_down_projs[cam_id](features)) | |||
| # flatten everything | |||
| flattened_features = [] | |||
| for cam_feature in all_cam_features: | |||
| flattened_features.append(cam_feature.reshape([bs, -1])) | |||
| flattened_features = torch.cat(flattened_features, axis=1) # 768 each | |||
| features = torch.cat([flattened_features, qpos], axis=1) # qpos: 14 | |||
| a_hat = self.mlp(features) | |||
| return a_hat | |||
| def mlp(input_dim, hidden_dim, output_dim, hidden_depth): | |||
| if hidden_depth == 0: | |||
| mods = [nn.Linear(input_dim, output_dim)] | |||
| else: | |||
| mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)] | |||
| for i in range(hidden_depth - 1): | |||
| mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)] | |||
| mods.append(nn.Linear(hidden_dim, output_dim)) | |||
| trunk = nn.Sequential(*mods) | |||
| return trunk | |||
| def build_encoder(args): | |||
| d_model = args.hidden_dim # 512 | |||
| dropout = args.dropout # 0.1 | |||
| nhead = args.nheads # 8 | |||
| dim_feedforward = args.dim_feedforward # 2048 | |||
| num_encoder_layers = args.enc_layers # 4 # TODO shared with VAE decoder | |||
| normalize_before = args.pre_norm # False | |||
| activation = "relu" | |||
| encoder_layer = TransformerEncoderLayer( | |||
| d_model, nhead, dim_feedforward, dropout, activation, normalize_before | |||
| ) | |||
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None | |||
| encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) | |||
| return encoder | |||
| def build(args): | |||
| # From state | |||
| # backbone = None # from state for now, no need for conv nets | |||
| # From image | |||
| backbones = [] | |||
| backbone = build_backbone(args) | |||
| backbones.append(backbone) | |||
| transformer = build_transformer(args) | |||
| encoder = build_encoder(args) | |||
| model = DETRVAE( | |||
| backbones, | |||
| transformer, | |||
| encoder, | |||
| state_dim=args.state_dim, | |||
| num_queries=args.num_queries, | |||
| camera_names=args.camera_names, | |||
| ) | |||
| n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) | |||
| print("number of parameters: %.2fM" % (n_parameters / 1e6,)) | |||
| return model | |||
| def build_cnnmlp(args): | |||
| state_dim = 14 # TODO hardcode | |||
| # From state | |||
| # backbone = None # from state for now, no need for conv nets | |||
| # From image | |||
| backbones = [] | |||
| for _ in args.camera_names: | |||
| backbone = build_backbone(args) | |||
| backbones.append(backbone) | |||
| model = CNNMLP( | |||
| backbones, | |||
| state_dim=state_dim, | |||
| camera_names=args.camera_names, | |||
| ) | |||
| n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) | |||
| print("number of parameters: %.2fM" % (n_parameters / 1e6,)) | |||
| return model | |||
| @@ -0,0 +1,109 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| """ | |||
| Various positional encodings for the transformer. | |||
| """ | |||
| import math | |||
| import IPython | |||
| import torch | |||
| from torch import nn | |||
| from ..util.misc import NestedTensor | |||
| e = IPython.embed | |||
| class PositionEmbeddingSine(nn.Module): | |||
| """ | |||
| This is a more standard version of the position embedding, very similar to the one | |||
| used by the Attention is all you need paper, generalized to work on images. | |||
| """ | |||
| def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): | |||
| super().__init__() | |||
| self.num_pos_feats = num_pos_feats | |||
| self.temperature = temperature | |||
| self.normalize = normalize | |||
| if scale is not None and normalize is False: | |||
| raise ValueError("normalize should be True if scale is passed") | |||
| if scale is None: | |||
| scale = 2 * math.pi | |||
| self.scale = scale | |||
| def forward(self, tensor): | |||
| x = tensor | |||
| # mask = tensor_list.mask | |||
| # assert mask is not None | |||
| # not_mask = ~mask | |||
| not_mask = torch.ones_like(x[0, [0]]) | |||
| y_embed = not_mask.cumsum(1, dtype=torch.float32) | |||
| x_embed = not_mask.cumsum(2, dtype=torch.float32) | |||
| if self.normalize: | |||
| eps = 1e-6 | |||
| y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale | |||
| x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale | |||
| dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) | |||
| dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) | |||
| pos_x = x_embed[:, :, :, None] / dim_t | |||
| pos_y = y_embed[:, :, :, None] / dim_t | |||
| pos_x = torch.stack( | |||
| (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 | |||
| ).flatten(3) | |||
| pos_y = torch.stack( | |||
| (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 | |||
| ).flatten(3) | |||
| pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) | |||
| return pos | |||
| class PositionEmbeddingLearned(nn.Module): | |||
| """ | |||
| Absolute pos embedding, learned. | |||
| """ | |||
| def __init__(self, num_pos_feats=256): | |||
| super().__init__() | |||
| self.row_embed = nn.Embedding(50, num_pos_feats) | |||
| self.col_embed = nn.Embedding(50, num_pos_feats) | |||
| self.reset_parameters() | |||
| def reset_parameters(self): | |||
| nn.init.uniform_(self.row_embed.weight) | |||
| nn.init.uniform_(self.col_embed.weight) | |||
| def forward(self, tensor_list: NestedTensor): | |||
| x = tensor_list.tensors | |||
| h, w = x.shape[-2:] | |||
| i = torch.arange(w, device=x.device) | |||
| j = torch.arange(h, device=x.device) | |||
| x_emb = self.col_embed(i) | |||
| y_emb = self.row_embed(j) | |||
| pos = ( | |||
| torch.cat( | |||
| [ | |||
| x_emb.unsqueeze(0).repeat(h, 1, 1), | |||
| y_emb.unsqueeze(1).repeat(1, w, 1), | |||
| ], | |||
| dim=-1, | |||
| ) | |||
| .permute(2, 0, 1) | |||
| .unsqueeze(0) | |||
| .repeat(x.shape[0], 1, 1, 1) | |||
| ) | |||
| return pos | |||
| def build_position_encoding(args): | |||
| N_steps = args.hidden_dim // 2 | |||
| if args.position_embedding in ("v2", "sine"): | |||
| # TODO find a better way of exposing other arguments | |||
| position_embedding = PositionEmbeddingSine(N_steps, normalize=True) | |||
| elif args.position_embedding in ("v3", "learned"): | |||
| position_embedding = PositionEmbeddingLearned(N_steps) | |||
| else: | |||
| raise ValueError(f"not supported {args.position_embedding}") | |||
| return position_embedding | |||
| @@ -0,0 +1,410 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| """ | |||
| DETR Transformer class. | |||
| Copy-paste from torch.nn.Transformer with modifications: | |||
| * positional encodings are passed in MHattention | |||
| * extra LN at the end of encoder is removed | |||
| * decoder returns a stack of activations from all decoding layers | |||
| """ | |||
| import copy | |||
| from typing import List, Optional | |||
| import IPython | |||
| import torch | |||
| import torch.nn.functional as F | |||
| from torch import Tensor, nn | |||
| e = IPython.embed | |||
| class Transformer(nn.Module): | |||
| def __init__( | |||
| self, | |||
| d_model=512, | |||
| nhead=8, | |||
| num_encoder_layers=6, | |||
| num_decoder_layers=6, | |||
| dim_feedforward=2048, | |||
| dropout=0.1, | |||
| activation="relu", | |||
| normalize_before=False, | |||
| return_intermediate_dec=False, | |||
| ): | |||
| super().__init__() | |||
| encoder_layer = TransformerEncoderLayer( | |||
| d_model, nhead, dim_feedforward, dropout, activation, normalize_before | |||
| ) | |||
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None | |||
| self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) | |||
| decoder_layer = TransformerDecoderLayer( | |||
| d_model, nhead, dim_feedforward, dropout, activation, normalize_before | |||
| ) | |||
| decoder_norm = nn.LayerNorm(d_model) | |||
| self.decoder = TransformerDecoder( | |||
| decoder_layer, | |||
| num_decoder_layers, | |||
| decoder_norm, | |||
| return_intermediate=return_intermediate_dec, | |||
| ) | |||
| self._reset_parameters() | |||
| self.d_model = d_model | |||
| self.nhead = nhead | |||
| def _reset_parameters(self): | |||
| for p in self.parameters(): | |||
| if p.dim() > 1: | |||
| nn.init.xavier_uniform_(p) | |||
| def forward( | |||
| self, | |||
| src, | |||
| mask, | |||
| query_embed, | |||
| pos_embed, | |||
| latent_input=None, | |||
| proprio_input=None, | |||
| additional_pos_embed=None, | |||
| ): | |||
| # TODO flatten only when input has H and W | |||
| if len(src.shape) == 4: # has H and W | |||
| # flatten NxCxHxW to HWxNxC | |||
| bs, c, h, w = src.shape | |||
| src = src.flatten(2).permute(2, 0, 1) | |||
| pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1) | |||
| query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) | |||
| # mask = mask.flatten(1) | |||
| additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat( | |||
| 1, bs, 1 | |||
| ) # seq, bs, dim | |||
| pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0) | |||
| addition_input = torch.stack([latent_input, proprio_input], axis=0) | |||
| src = torch.cat([addition_input, src], axis=0) | |||
| else: | |||
| assert len(src.shape) == 3 | |||
| # flatten NxHWxC to HWxNxC | |||
| bs, hw, c = src.shape | |||
| src = src.permute(1, 0, 2) | |||
| pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1) | |||
| query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) | |||
| tgt = torch.zeros_like(query_embed) | |||
| memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) | |||
| hs = self.decoder( | |||
| tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed | |||
| ) | |||
| hs = hs.transpose(1, 2) | |||
| return hs | |||
| class TransformerEncoder(nn.Module): | |||
| def __init__(self, encoder_layer, num_layers, norm=None): | |||
| super().__init__() | |||
| self.layers = _get_clones(encoder_layer, num_layers) | |||
| self.num_layers = num_layers | |||
| self.norm = norm | |||
| def forward( | |||
| self, | |||
| src, | |||
| mask: Optional[Tensor] = None, | |||
| src_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| ): | |||
| output = src | |||
| for layer in self.layers: | |||
| output = layer( | |||
| output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos | |||
| ) | |||
| if self.norm is not None: | |||
| output = self.norm(output) | |||
| return output | |||
| class TransformerDecoder(nn.Module): | |||
| def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): | |||
| super().__init__() | |||
| self.layers = _get_clones(decoder_layer, num_layers) | |||
| self.num_layers = num_layers | |||
| self.norm = norm | |||
| self.return_intermediate = return_intermediate | |||
| def forward( | |||
| self, | |||
| tgt, | |||
| memory, | |||
| tgt_mask: Optional[Tensor] = None, | |||
| memory_mask: Optional[Tensor] = None, | |||
| tgt_key_padding_mask: Optional[Tensor] = None, | |||
| memory_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| query_pos: Optional[Tensor] = None, | |||
| ): | |||
| output = tgt | |||
| intermediate = [] | |||
| for layer in self.layers: | |||
| output = layer( | |||
| output, | |||
| memory, | |||
| tgt_mask=tgt_mask, | |||
| memory_mask=memory_mask, | |||
| tgt_key_padding_mask=tgt_key_padding_mask, | |||
| memory_key_padding_mask=memory_key_padding_mask, | |||
| pos=pos, | |||
| query_pos=query_pos, | |||
| ) | |||
| if self.return_intermediate: | |||
| intermediate.append(self.norm(output)) | |||
| if self.norm is not None: | |||
| output = self.norm(output) | |||
| if self.return_intermediate: | |||
| intermediate.pop() | |||
| intermediate.append(output) | |||
| if self.return_intermediate: | |||
| return torch.stack(intermediate) | |||
| return output.unsqueeze(0) | |||
| class TransformerEncoderLayer(nn.Module): | |||
| def __init__( | |||
| self, | |||
| d_model, | |||
| nhead, | |||
| dim_feedforward=2048, | |||
| dropout=0.1, | |||
| activation="relu", | |||
| normalize_before=False, | |||
| ): | |||
| super().__init__() | |||
| self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |||
| # Implementation of Feedforward model | |||
| self.linear1 = nn.Linear(d_model, dim_feedforward) | |||
| self.dropout = nn.Dropout(dropout) | |||
| self.linear2 = nn.Linear(dim_feedforward, d_model) | |||
| self.norm1 = nn.LayerNorm(d_model) | |||
| self.norm2 = nn.LayerNorm(d_model) | |||
| self.dropout1 = nn.Dropout(dropout) | |||
| self.dropout2 = nn.Dropout(dropout) | |||
| self.activation = _get_activation_fn(activation) | |||
| self.normalize_before = normalize_before | |||
| def with_pos_embed(self, tensor, pos: Optional[Tensor]): | |||
| return tensor if pos is None else tensor + pos | |||
| def forward_post( | |||
| self, | |||
| src, | |||
| src_mask: Optional[Tensor] = None, | |||
| src_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| ): | |||
| q = k = self.with_pos_embed(src, pos) | |||
| src2 = self.self_attn( | |||
| q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask | |||
| )[0] | |||
| src = src + self.dropout1(src2) | |||
| src = self.norm1(src) | |||
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) | |||
| src = src + self.dropout2(src2) | |||
| src = self.norm2(src) | |||
| return src | |||
| def forward_pre( | |||
| self, | |||
| src, | |||
| src_mask: Optional[Tensor] = None, | |||
| src_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| ): | |||
| src2 = self.norm1(src) | |||
| q = k = self.with_pos_embed(src2, pos) | |||
| src2 = self.self_attn( | |||
| q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask | |||
| )[0] | |||
| src = src + self.dropout1(src2) | |||
| src2 = self.norm2(src) | |||
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) | |||
| src = src + self.dropout2(src2) | |||
| return src | |||
| def forward( | |||
| self, | |||
| src, | |||
| src_mask: Optional[Tensor] = None, | |||
| src_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| ): | |||
| if self.normalize_before: | |||
| return self.forward_pre(src, src_mask, src_key_padding_mask, pos) | |||
| return self.forward_post(src, src_mask, src_key_padding_mask, pos) | |||
| class TransformerDecoderLayer(nn.Module): | |||
| def __init__( | |||
| self, | |||
| d_model, | |||
| nhead, | |||
| dim_feedforward=2048, | |||
| dropout=0.1, | |||
| activation="relu", | |||
| normalize_before=False, | |||
| ): | |||
| super().__init__() | |||
| self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |||
| self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) | |||
| # Implementation of Feedforward model | |||
| self.linear1 = nn.Linear(d_model, dim_feedforward) | |||
| self.dropout = nn.Dropout(dropout) | |||
| self.linear2 = nn.Linear(dim_feedforward, d_model) | |||
| self.norm1 = nn.LayerNorm(d_model) | |||
| self.norm2 = nn.LayerNorm(d_model) | |||
| self.norm3 = nn.LayerNorm(d_model) | |||
| self.dropout1 = nn.Dropout(dropout) | |||
| self.dropout2 = nn.Dropout(dropout) | |||
| self.dropout3 = nn.Dropout(dropout) | |||
| self.activation = _get_activation_fn(activation) | |||
| self.normalize_before = normalize_before | |||
| def with_pos_embed(self, tensor, pos: Optional[Tensor]): | |||
| return tensor if pos is None else tensor + pos | |||
| def forward_post( | |||
| self, | |||
| tgt, | |||
| memory, | |||
| tgt_mask: Optional[Tensor] = None, | |||
| memory_mask: Optional[Tensor] = None, | |||
| tgt_key_padding_mask: Optional[Tensor] = None, | |||
| memory_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| query_pos: Optional[Tensor] = None, | |||
| ): | |||
| q = k = self.with_pos_embed(tgt, query_pos) | |||
| tgt2 = self.self_attn( | |||
| q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask | |||
| )[0] | |||
| tgt = tgt + self.dropout1(tgt2) | |||
| tgt = self.norm1(tgt) | |||
| tgt2 = self.multihead_attn( | |||
| query=self.with_pos_embed(tgt, query_pos), | |||
| key=self.with_pos_embed(memory, pos), | |||
| value=memory, | |||
| attn_mask=memory_mask, | |||
| key_padding_mask=memory_key_padding_mask, | |||
| )[0] | |||
| tgt = tgt + self.dropout2(tgt2) | |||
| tgt = self.norm2(tgt) | |||
| tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) | |||
| tgt = tgt + self.dropout3(tgt2) | |||
| tgt = self.norm3(tgt) | |||
| return tgt | |||
| def forward_pre( | |||
| self, | |||
| tgt, | |||
| memory, | |||
| tgt_mask: Optional[Tensor] = None, | |||
| memory_mask: Optional[Tensor] = None, | |||
| tgt_key_padding_mask: Optional[Tensor] = None, | |||
| memory_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| query_pos: Optional[Tensor] = None, | |||
| ): | |||
| tgt2 = self.norm1(tgt) | |||
| q = k = self.with_pos_embed(tgt2, query_pos) | |||
| tgt2 = self.self_attn( | |||
| q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask | |||
| )[0] | |||
| tgt = tgt + self.dropout1(tgt2) | |||
| tgt2 = self.norm2(tgt) | |||
| tgt2 = self.multihead_attn( | |||
| query=self.with_pos_embed(tgt2, query_pos), | |||
| key=self.with_pos_embed(memory, pos), | |||
| value=memory, | |||
| attn_mask=memory_mask, | |||
| key_padding_mask=memory_key_padding_mask, | |||
| )[0] | |||
| tgt = tgt + self.dropout2(tgt2) | |||
| tgt2 = self.norm3(tgt) | |||
| tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) | |||
| tgt = tgt + self.dropout3(tgt2) | |||
| return tgt | |||
| def forward( | |||
| self, | |||
| tgt, | |||
| memory, | |||
| tgt_mask: Optional[Tensor] = None, | |||
| memory_mask: Optional[Tensor] = None, | |||
| tgt_key_padding_mask: Optional[Tensor] = None, | |||
| memory_key_padding_mask: Optional[Tensor] = None, | |||
| pos: Optional[Tensor] = None, | |||
| query_pos: Optional[Tensor] = None, | |||
| ): | |||
| if self.normalize_before: | |||
| return self.forward_pre( | |||
| tgt, | |||
| memory, | |||
| tgt_mask, | |||
| memory_mask, | |||
| tgt_key_padding_mask, | |||
| memory_key_padding_mask, | |||
| pos, | |||
| query_pos, | |||
| ) | |||
| return self.forward_post( | |||
| tgt, | |||
| memory, | |||
| tgt_mask, | |||
| memory_mask, | |||
| tgt_key_padding_mask, | |||
| memory_key_padding_mask, | |||
| pos, | |||
| query_pos, | |||
| ) | |||
| def _get_clones(module, N): | |||
| return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | |||
| def build_transformer(args): | |||
| return Transformer( | |||
| d_model=args.hidden_dim, | |||
| dropout=args.dropout, | |||
| nhead=args.nheads, | |||
| dim_feedforward=args.dim_feedforward, | |||
| num_encoder_layers=args.enc_layers, | |||
| num_decoder_layers=args.dec_layers, | |||
| normalize_before=args.pre_norm, | |||
| return_intermediate_dec=True, | |||
| ) | |||
| def _get_activation_fn(activation): | |||
| """Return an activation function given a string""" | |||
| if activation == "relu": | |||
| return F.relu | |||
| if activation == "gelu": | |||
| return F.gelu | |||
| if activation == "glu": | |||
| return F.glu | |||
| raise RuntimeError(f"activation should be relu/gelu, not {activation}.") | |||
| @@ -0,0 +1,11 @@ | |||
| from distutils.core import setup | |||
| from setuptools import find_packages | |||
| setup( | |||
| name="detr", | |||
| version="0.0.0", | |||
| packages=find_packages(), | |||
| license="MIT License", | |||
| # long_description=open("README.md").read(), | |||
| ) | |||
| @@ -0,0 +1 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| @@ -0,0 +1,86 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| """ | |||
| Utilities for bounding box manipulation and GIoU. | |||
| """ | |||
| import torch | |||
| from torchvision.ops.boxes import box_area | |||
| def box_cxcywh_to_xyxy(x): | |||
| x_c, y_c, w, h = x.unbind(-1) | |||
| b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] | |||
| return torch.stack(b, dim=-1) | |||
| def box_xyxy_to_cxcywh(x): | |||
| x0, y0, x1, y1 = x.unbind(-1) | |||
| b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] | |||
| return torch.stack(b, dim=-1) | |||
| # modified from torchvision to also return the union | |||
| def box_iou(boxes1, boxes2): | |||
| area1 = box_area(boxes1) | |||
| area2 = box_area(boxes2) | |||
| lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] | |||
| rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] | |||
| wh = (rb - lt).clamp(min=0) # [N,M,2] | |||
| inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] | |||
| union = area1[:, None] + area2 - inter | |||
| iou = inter / union | |||
| return iou, union | |||
| def generalized_box_iou(boxes1, boxes2): | |||
| """ | |||
| Generalized IoU from https://giou.stanford.edu/ | |||
| The boxes should be in [x0, y0, x1, y1] format | |||
| Returns a [N, M] pairwise matrix, where N = len(boxes1) | |||
| and M = len(boxes2) | |||
| """ | |||
| # degenerate boxes gives inf / nan results | |||
| # so do an early check | |||
| assert (boxes1[:, 2:] >= boxes1[:, :2]).all() | |||
| assert (boxes2[:, 2:] >= boxes2[:, :2]).all() | |||
| iou, union = box_iou(boxes1, boxes2) | |||
| lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) | |||
| rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) | |||
| wh = (rb - lt).clamp(min=0) # [N,M,2] | |||
| area = wh[:, :, 0] * wh[:, :, 1] | |||
| return iou - (area - union) / area | |||
| def masks_to_boxes(masks): | |||
| """Compute the bounding boxes around the provided masks | |||
| The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. | |||
| Returns a [N, 4] tensors, with the boxes in xyxy format | |||
| """ | |||
| if masks.numel() == 0: | |||
| return torch.zeros((0, 4), device=masks.device) | |||
| h, w = masks.shape[-2:] | |||
| y = torch.arange(0, h, dtype=torch.float) | |||
| x = torch.arange(0, w, dtype=torch.float) | |||
| y, x = torch.meshgrid(y, x) | |||
| x_mask = masks * x.unsqueeze(0) | |||
| x_max = x_mask.flatten(1).max(-1)[0] | |||
| x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] | |||
| y_mask = masks * y.unsqueeze(0) | |||
| y_max = y_mask.flatten(1).max(-1)[0] | |||
| y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] | |||
| return torch.stack([x_min, y_min, x_max, y_max], 1) | |||
| @@ -0,0 +1,490 @@ | |||
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |||
| """ | |||
| Misc functions, including distributed helpers. | |||
| Mostly copy-paste from torchvision references. | |||
| """ | |||
| import datetime | |||
| import os | |||
| import pickle | |||
| import subprocess | |||
| import time | |||
| from collections import defaultdict, deque | |||
| from typing import List, Optional | |||
| import torch | |||
| import torch.distributed as dist | |||
| # needed due to empty tensor bug in pytorch and torchvision 0.5 | |||
| import torchvision | |||
| from packaging import version | |||
| from torch import Tensor | |||
| if version.parse(torchvision.__version__) < version.parse("0.7"): | |||
| from torchvision.ops import _new_empty_tensor | |||
| from torchvision.ops.misc import _output_size | |||
| class SmoothedValue(object): | |||
| """Track a series of values and provide access to smoothed values over a | |||
| window or the global series average. | |||
| """ | |||
| def __init__(self, window_size=20, fmt=None): | |||
| if fmt is None: | |||
| fmt = "{median:.4f} ({global_avg:.4f})" | |||
| self.deque = deque(maxlen=window_size) | |||
| self.total = 0.0 | |||
| self.count = 0 | |||
| self.fmt = fmt | |||
| def update(self, value, n=1): | |||
| self.deque.append(value) | |||
| self.count += n | |||
| self.total += value * n | |||
| def synchronize_between_processes(self): | |||
| """ | |||
| Warning: does not synchronize the deque! | |||
| """ | |||
| if not is_dist_avail_and_initialized(): | |||
| return | |||
| t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") | |||
| dist.barrier() | |||
| dist.all_reduce(t) | |||
| t = t.tolist() | |||
| self.count = int(t[0]) | |||
| self.total = t[1] | |||
| @property | |||
| def median(self): | |||
| d = torch.tensor(list(self.deque)) | |||
| return d.median().item() | |||
| @property | |||
| def avg(self): | |||
| d = torch.tensor(list(self.deque), dtype=torch.float32) | |||
| return d.mean().item() | |||
| @property | |||
| def global_avg(self): | |||
| return self.total / self.count | |||
| @property | |||
| def max(self): | |||
| return max(self.deque) | |||
| @property | |||
| def value(self): | |||
| return self.deque[-1] | |||
| def __str__(self): | |||
| return self.fmt.format( | |||
| median=self.median, | |||
| avg=self.avg, | |||
| global_avg=self.global_avg, | |||
| max=self.max, | |||
| value=self.value, | |||
| ) | |||
| def all_gather(data): | |||
| """ | |||
| Run all_gather on arbitrary picklable data (not necessarily tensors) | |||
| Args: | |||
| data: any picklable object | |||
| Returns: | |||
| list[data]: list of data gathered from each rank | |||
| """ | |||
| world_size = get_world_size() | |||
| if world_size == 1: | |||
| return [data] | |||
| # serialized to a Tensor | |||
| buffer = pickle.dumps(data) | |||
| storage = torch.ByteStorage.from_buffer(buffer) | |||
| tensor = torch.ByteTensor(storage).to("cuda") | |||
| # obtain Tensor size of each rank | |||
| local_size = torch.tensor([tensor.numel()], device="cuda") | |||
| size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] | |||
| dist.all_gather(size_list, local_size) | |||
| size_list = [int(size.item()) for size in size_list] | |||
| max_size = max(size_list) | |||
| # receiving Tensor from all ranks | |||
| # we pad the tensor because torch all_gather does not support | |||
| # gathering tensors of different shapes | |||
| tensor_list = [] | |||
| for _ in size_list: | |||
| tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) | |||
| if local_size != max_size: | |||
| padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") | |||
| tensor = torch.cat((tensor, padding), dim=0) | |||
| dist.all_gather(tensor_list, tensor) | |||
| data_list = [] | |||
| for size, tensor in zip(size_list, tensor_list): | |||
| buffer = tensor.cpu().numpy().tobytes()[:size] | |||
| data_list.append(pickle.loads(buffer)) | |||
| return data_list | |||
| def reduce_dict(input_dict, average=True): | |||
| """ | |||
| Args: | |||
| input_dict (dict): all the values will be reduced | |||
| average (bool): whether to do average or sum | |||
| Reduce the values in the dictionary from all processes so that all processes | |||
| have the averaged results. Returns a dict with the same fields as | |||
| input_dict, after reduction. | |||
| """ | |||
| world_size = get_world_size() | |||
| if world_size < 2: | |||
| return input_dict | |||
| with torch.no_grad(): | |||
| names = [] | |||
| values = [] | |||
| # sort the keys so that they are consistent across processes | |||
| for k in sorted(input_dict.keys()): | |||
| names.append(k) | |||
| values.append(input_dict[k]) | |||
| values = torch.stack(values, dim=0) | |||
| dist.all_reduce(values) | |||
| if average: | |||
| values /= world_size | |||
| reduced_dict = {k: v for k, v in zip(names, values)} | |||
| return reduced_dict | |||
| class MetricLogger(object): | |||
| def __init__(self, delimiter="\t"): | |||
| self.meters = defaultdict(SmoothedValue) | |||
| self.delimiter = delimiter | |||
| def update(self, **kwargs): | |||
| for k, v in kwargs.items(): | |||
| if isinstance(v, torch.Tensor): | |||
| v = v.item() | |||
| assert isinstance(v, (float, int)) | |||
| self.meters[k].update(v) | |||
| def __getattr__(self, attr): | |||
| if attr in self.meters: | |||
| return self.meters[attr] | |||
| if attr in self.__dict__: | |||
| return self.__dict__[attr] | |||
| raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) | |||
| def __str__(self): | |||
| loss_str = [] | |||
| for name, meter in self.meters.items(): | |||
| loss_str.append("{}: {}".format(name, str(meter))) | |||
| return self.delimiter.join(loss_str) | |||
| def synchronize_between_processes(self): | |||
| for meter in self.meters.values(): | |||
| meter.synchronize_between_processes() | |||
| def add_meter(self, name, meter): | |||
| self.meters[name] = meter | |||
| def log_every(self, iterable, print_freq, header=None): | |||
| i = 0 | |||
| if not header: | |||
| header = "" | |||
| start_time = time.time() | |||
| end = time.time() | |||
| iter_time = SmoothedValue(fmt="{avg:.4f}") | |||
| data_time = SmoothedValue(fmt="{avg:.4f}") | |||
| space_fmt = ":" + str(len(str(len(iterable)))) + "d" | |||
| if torch.cuda.is_available(): | |||
| log_msg = self.delimiter.join( | |||
| [ | |||
| header, | |||
| "[{0" + space_fmt + "}/{1}]", | |||
| "eta: {eta}", | |||
| "{meters}", | |||
| "time: {time}", | |||
| "data: {data}", | |||
| "max mem: {memory:.0f}", | |||
| ] | |||
| ) | |||
| else: | |||
| log_msg = self.delimiter.join( | |||
| [ | |||
| header, | |||
| "[{0" + space_fmt + "}/{1}]", | |||
| "eta: {eta}", | |||
| "{meters}", | |||
| "time: {time}", | |||
| "data: {data}", | |||
| ] | |||
| ) | |||
| MB = 1024.0 * 1024.0 | |||
| for obj in iterable: | |||
| data_time.update(time.time() - end) | |||
| yield obj | |||
| iter_time.update(time.time() - end) | |||
| if i % print_freq == 0 or i == len(iterable) - 1: | |||
| eta_seconds = iter_time.global_avg * (len(iterable) - i) | |||
| eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) | |||
| if torch.cuda.is_available(): | |||
| print( | |||
| log_msg.format( | |||
| i, | |||
| len(iterable), | |||
| eta=eta_string, | |||
| meters=str(self), | |||
| time=str(iter_time), | |||
| data=str(data_time), | |||
| memory=torch.cuda.max_memory_allocated() / MB, | |||
| ) | |||
| ) | |||
| else: | |||
| print( | |||
| log_msg.format( | |||
| i, | |||
| len(iterable), | |||
| eta=eta_string, | |||
| meters=str(self), | |||
| time=str(iter_time), | |||
| data=str(data_time), | |||
| ) | |||
| ) | |||
| i += 1 | |||
| end = time.time() | |||
| total_time = time.time() - start_time | |||
| total_time_str = str(datetime.timedelta(seconds=int(total_time))) | |||
| print( | |||
| "{} Total time: {} ({:.4f} s / it)".format( | |||
| header, total_time_str, total_time / len(iterable) | |||
| ) | |||
| ) | |||
| def get_sha(): | |||
| cwd = os.path.dirname(os.path.abspath(__file__)) | |||
| def _run(command): | |||
| return subprocess.check_output(command, cwd=cwd).decode("ascii").strip() | |||
| sha = "N/A" | |||
| diff = "clean" | |||
| branch = "N/A" | |||
| try: | |||
| sha = _run(["git", "rev-parse", "HEAD"]) | |||
| subprocess.check_output(["git", "diff"], cwd=cwd) | |||
| diff = _run(["git", "diff-index", "HEAD"]) | |||
| diff = "has uncommited changes" if diff else "clean" | |||
| branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"]) | |||
| except Exception: | |||
| pass | |||
| message = f"sha: {sha}, status: {diff}, branch: {branch}" | |||
| return message | |||
| def collate_fn(batch): | |||
| batch = list(zip(*batch)) | |||
| batch[0] = nested_tensor_from_tensor_list(batch[0]) | |||
| return tuple(batch) | |||
| def _max_by_axis(the_list): | |||
| # type: (List[List[int]]) -> List[int] | |||
| maxes = the_list[0] | |||
| for sublist in the_list[1:]: | |||
| for index, item in enumerate(sublist): | |||
| maxes[index] = max(maxes[index], item) | |||
| return maxes | |||
| class NestedTensor(object): | |||
| def __init__(self, tensors, mask: Optional[Tensor]): | |||
| self.tensors = tensors | |||
| self.mask = mask | |||
| def to(self, device): | |||
| cast_tensor = self.tensors.to(device) | |||
| mask = self.mask | |||
| if mask is not None: | |||
| assert mask is not None | |||
| cast_mask = mask.to(device) | |||
| else: | |||
| cast_mask = None | |||
| return NestedTensor(cast_tensor, cast_mask) | |||
| def decompose(self): | |||
| return self.tensors, self.mask | |||
| def __repr__(self): | |||
| return str(self.tensors) | |||
| def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): | |||
| # TODO make this more general | |||
| if tensor_list[0].ndim == 3: | |||
| if torchvision._is_tracing(): | |||
| # nested_tensor_from_tensor_list() does not export well to ONNX | |||
| # call _onnx_nested_tensor_from_tensor_list() instead | |||
| return _onnx_nested_tensor_from_tensor_list(tensor_list) | |||
| # TODO make it support different-sized images | |||
| max_size = _max_by_axis([list(img.shape) for img in tensor_list]) | |||
| # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) | |||
| batch_shape = [len(tensor_list)] + max_size | |||
| b, c, h, w = batch_shape | |||
| dtype = tensor_list[0].dtype | |||
| device = tensor_list[0].device | |||
| tensor = torch.zeros(batch_shape, dtype=dtype, device=device) | |||
| mask = torch.ones((b, h, w), dtype=torch.bool, device=device) | |||
| for img, pad_img, m in zip(tensor_list, tensor, mask): | |||
| pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) | |||
| m[: img.shape[1], : img.shape[2]] = False | |||
| else: | |||
| raise ValueError("not supported") | |||
| return NestedTensor(tensor, mask) | |||
| # _onnx_nested_tensor_from_tensor_list() is an implementation of | |||
| # nested_tensor_from_tensor_list() that is supported by ONNX tracing. | |||
| @torch.jit.unused | |||
| def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: | |||
| max_size = [] | |||
| for i in range(tensor_list[0].dim()): | |||
| max_size_i = torch.max( | |||
| torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32) | |||
| ).to(torch.int64) | |||
| max_size.append(max_size_i) | |||
| max_size = tuple(max_size) | |||
| # work around for | |||
| # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) | |||
| # m[: img.shape[1], :img.shape[2]] = False | |||
| # which is not yet supported in onnx | |||
| padded_imgs = [] | |||
| padded_masks = [] | |||
| for img in tensor_list: | |||
| padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] | |||
| padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) | |||
| padded_imgs.append(padded_img) | |||
| m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) | |||
| padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) | |||
| padded_masks.append(padded_mask.to(torch.bool)) | |||
| tensor = torch.stack(padded_imgs) | |||
| mask = torch.stack(padded_masks) | |||
| return NestedTensor(tensor, mask=mask) | |||
| def setup_for_distributed(is_master): | |||
| """ | |||
| This function disables printing when not in master process | |||
| """ | |||
| import builtins as __builtin__ | |||
| builtin_print = __builtin__.print | |||
| def print(*args, **kwargs): | |||
| force = kwargs.pop("force", False) | |||
| if is_master or force: | |||
| builtin_print(*args, **kwargs) | |||
| __builtin__.print = print | |||
| def is_dist_avail_and_initialized(): | |||
| if not dist.is_available(): | |||
| return False | |||
| if not dist.is_initialized(): | |||
| return False | |||
| return True | |||
| def get_world_size(): | |||
| if not is_dist_avail_and_initialized(): | |||
| return 1 | |||
| return dist.get_world_size() | |||
| def get_rank(): | |||
| if not is_dist_avail_and_initialized(): | |||
| return 0 | |||
| return dist.get_rank() | |||
| def is_main_process(): | |||
| return get_rank() == 0 | |||
| def save_on_master(*args, **kwargs): | |||
| if is_main_process(): | |||
| torch.save(*args, **kwargs) | |||
| def init_distributed_mode(args): | |||
| if "RANK" in os.environ and "WORLD_SIZE" in os.environ: | |||
| args.rank = int(os.environ["RANK"]) | |||
| args.world_size = int(os.environ["WORLD_SIZE"]) | |||
| args.gpu = int(os.environ["LOCAL_RANK"]) | |||
| elif "SLURM_PROCID" in os.environ: | |||
| args.rank = int(os.environ["SLURM_PROCID"]) | |||
| args.gpu = args.rank % torch.cuda.device_count() | |||
| else: | |||
| print("Not using distributed mode") | |||
| args.distributed = False | |||
| return | |||
| args.distributed = True | |||
| torch.cuda.set_device(args.gpu) | |||
| args.dist_backend = "nccl" | |||
| print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) | |||
| torch.distributed.init_process_group( | |||
| backend=args.dist_backend, | |||
| init_method=args.dist_url, | |||
| world_size=args.world_size, | |||
| rank=args.rank, | |||
| ) | |||
| torch.distributed.barrier() | |||
| setup_for_distributed(args.rank == 0) | |||
| @torch.no_grad() | |||
| def accuracy(output, target, topk=(1,)): | |||
| """Computes the precision@k for the specified values of k""" | |||
| if target.numel() == 0: | |||
| return [torch.zeros([], device=output.device)] | |||
| maxk = max(topk) | |||
| batch_size = target.size(0) | |||
| _, pred = output.topk(maxk, 1, True, True) | |||
| pred = pred.t() | |||
| correct = pred.eq(target.view(1, -1).expand_as(pred)) | |||
| res = [] | |||
| for k in topk: | |||
| correct_k = correct[:k].view(-1).float().sum(0) | |||
| res.append(correct_k.mul_(100.0 / batch_size)) | |||
| return res | |||
| def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): | |||
| # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor | |||
| """ | |||
| Equivalent to nn.functional.interpolate, but with support for empty batch sizes. | |||
| This will eventually be supported natively by PyTorch, and this | |||
| class can go away. | |||
| """ | |||
| if version.parse(torchvision.__version__) < version.parse("0.7"): | |||
| if input.numel() > 0: | |||
| return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners) | |||
| output_shape = _output_size(2, input, size, scale_factor) | |||
| output_shape = list(input.shape[:-2]) + list(output_shape) | |||
| return _new_empty_tensor(input, output_shape) | |||
| else: | |||
| return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) | |||
| @@ -0,0 +1,116 @@ | |||
| """ | |||
| Plotting utilities to visualize training logs. | |||
| """ | |||
| from pathlib import Path, PurePath | |||
| import matplotlib.pyplot as plt | |||
| import numpy as np | |||
| import pandas as pd | |||
| import seaborn as sns | |||
| import torch | |||
| def plot_logs( | |||
| logs, fields=("class_error", "loss_bbox_unscaled", "mAP"), ewm_col=0, log_name="log.txt" | |||
| ): | |||
| """ | |||
| Function to plot specific fields from training log(s). Plots both training and test results. | |||
| :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file | |||
| - fields = which results to plot from each log file - plots both training and test for each field. | |||
| - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots | |||
| - log_name = optional, name of log file if different than default 'log.txt'. | |||
| :: Outputs - matplotlib plots of results in fields, color coded for each log file. | |||
| - solid lines are training results, dashed lines are test results. | |||
| """ | |||
| func_name = "plot_utils.py::plot_logs" | |||
| # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, | |||
| # convert single Path to list to avoid 'not iterable' error | |||
| if not isinstance(logs, list): | |||
| if isinstance(logs, PurePath): | |||
| logs = [logs] | |||
| print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") | |||
| else: | |||
| raise ValueError( | |||
| f"{func_name} - invalid argument for logs parameter.\n \ | |||
| Expect list[Path] or single Path obj, received {type(logs)}" | |||
| ) | |||
| # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir | |||
| for i, dir in enumerate(logs): | |||
| if not isinstance(dir, PurePath): | |||
| raise ValueError( | |||
| f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}" | |||
| ) | |||
| if not dir.exists(): | |||
| raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") | |||
| # verify log_name exists | |||
| fn = Path(dir / log_name) | |||
| if not fn.exists(): | |||
| print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") | |||
| print(f"--> full path of missing log file: {fn}") | |||
| return | |||
| # load log file(s) and plot | |||
| dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] | |||
| fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) | |||
| for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): | |||
| for j, field in enumerate(fields): | |||
| if field == "mAP": | |||
| coco_eval = ( | |||
| pd.DataFrame(np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]) | |||
| .ewm(com=ewm_col) | |||
| .mean() | |||
| ) | |||
| axs[j].plot(coco_eval, c=color) | |||
| else: | |||
| df.interpolate().ewm(com=ewm_col).mean().plot( | |||
| y=[f"train_{field}", f"test_{field}"], | |||
| ax=axs[j], | |||
| color=[color] * 2, | |||
| style=["-", "--"], | |||
| ) | |||
| for ax, field in zip(axs, fields): | |||
| ax.legend([Path(p).name for p in logs]) | |||
| ax.set_title(field) | |||
| def plot_precision_recall(files, naming_scheme="iter"): | |||
| if naming_scheme == "exp_id": | |||
| # name becomes exp_id | |||
| names = [f.parts[-3] for f in files] | |||
| elif naming_scheme == "iter": | |||
| names = [f.stem for f in files] | |||
| else: | |||
| raise ValueError(f"not supported {naming_scheme}") | |||
| fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) | |||
| for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): | |||
| data = torch.load(f) | |||
| # precision is n_iou, n_points, n_cat, n_area, max_det | |||
| precision = data["precision"] | |||
| recall = data["params"].recThrs | |||
| scores = data["scores"] | |||
| # take precision for all classes, all areas and 100 detections | |||
| precision = precision[0, :, :, 0, -1].mean(1) | |||
| scores = scores[0, :, :, 0, -1].mean(1) | |||
| prec = precision.mean() | |||
| rec = data["recall"][0, :, 0, -1].mean() | |||
| print( | |||
| f"{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, " | |||
| + f"score={scores.mean():0.3f}, " | |||
| + f"f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}" | |||
| ) | |||
| axs[0].plot(recall, precision, c=color) | |||
| axs[1].plot(recall, scores, c=color) | |||
| axs[0].set_title("Precision / Recall") | |||
| axs[0].legend(names) | |||
| axs[1].set_title("Scores / Recall") | |||
| axs[1].legend(names) | |||
| return fig, axs | |||
| @@ -0,0 +1,282 @@ | |||
| import argparse | |||
| import csv | |||
| import os | |||
| import pickle | |||
| import sys | |||
| from datetime import datetime | |||
| from pathlib import Path | |||
| import time | |||
| import cv2 | |||
| import numpy as np | |||
| import torch | |||
| from einops import rearrange | |||
| from constants import TASK_CONFIGS | |||
| from policy import ACTPolicy | |||
| from utils import set_seed | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| from inference import ActInference | |||
| class ActInferenceReal(ActInference): | |||
| def __init__(self): | |||
| super().__init__() | |||
| self.parser.add_argument("--num_rollout", action="store", type=int, default=1, required=False) | |||
| args = vars(self.parser.parse_args()) | |||
| # command line parameters | |||
| ckpt_dir = args["ckpt_dir"] | |||
| policy_class = args["policy_class"] | |||
| task_name = args["task_name"] | |||
| num_epochs = args["num_epochs"] | |||
| set_seed(args["seed"]) | |||
| # get task parameters | |||
| task_config = TASK_CONFIGS[task_name] | |||
| episode_len = task_config["episode_len"] | |||
| camera_names = task_config["camera_names"] | |||
| state_dim = task_config["state_dim"] | |||
| # fixed parameters | |||
| lr_backbone = 1e-5 | |||
| backbone = "resnet18" | |||
| if policy_class == "ACT": | |||
| enc_layers = 4 | |||
| dec_layers = 7 | |||
| nheads = 8 | |||
| policy_config = { | |||
| "state_dim": state_dim, | |||
| "lr": args["lr"], | |||
| "num_queries": args["chunk_size"], | |||
| "kl_weight": args["kl_weight"], | |||
| "hidden_dim": args["hidden_dim"], | |||
| "dim_feedforward": args["dim_feedforward"], | |||
| "lr_backbone": lr_backbone, | |||
| "backbone": backbone, | |||
| "enc_layers": enc_layers, | |||
| "dec_layers": dec_layers, | |||
| "nheads": nheads, | |||
| "camera_names": camera_names, | |||
| } | |||
| else: | |||
| raise NotImplementedError | |||
| self.config = { | |||
| "num_epochs": num_epochs, | |||
| "ckpt_dir": ckpt_dir, | |||
| "episode_len": episode_len, | |||
| "state_dim": state_dim, | |||
| "policy_class": policy_class, | |||
| "policy_config": policy_config, | |||
| "task_name": task_name, | |||
| "seed": args["seed"], | |||
| "temporal_agg": args["temporal_agg"], | |||
| "camera_names": camera_names, | |||
| # "num_rollout": args["num_rollout"], | |||
| "num_rollout": 1, | |||
| } | |||
| self.IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 640)) | |||
| self.IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 360)) | |||
| def make_policy(self, policy_class, policy_config): | |||
| if policy_class == "ACT": | |||
| policy = ACTPolicy(policy_config) | |||
| else: | |||
| raise NotImplementedError | |||
| return policy | |||
| def make_optimizer(self, policy_class, policy): | |||
| if policy_class == "ACT": | |||
| optimizer = policy.configure_optimizers() | |||
| else: | |||
| raise NotImplementedError | |||
| return optimizer | |||
| def get_image(self, request=True): | |||
| super().get_image(request) | |||
| self.image = self.image.reshape((self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3)) | |||
| def process_image(self): | |||
| cur_images = [] | |||
| cur_image = rearrange(self.image, "h w c -> c h w") | |||
| cur_images.append(cur_image) | |||
| cur_image = np.stack(cur_images, axis=0) | |||
| cur_image = torch.from_numpy(cur_image / 255.0).float().cuda().unsqueeze(0) | |||
| return cur_image | |||
| def inference(self): | |||
| seed = self.config["seed"] | |||
| ckpt_dir = self.config["ckpt_dir"] | |||
| state_dim = self.config["state_dim"] | |||
| policy_class = self.config["policy_class"] | |||
| policy_config = self.config["policy_config"] | |||
| camera_names = self.config["camera_names"] | |||
| max_timesteps = self.config["episode_len"] | |||
| temporal_agg = self.config["temporal_agg"] | |||
| num_rollout = self.config["num_rollout"] | |||
| task_name = self.config["task_name"] | |||
| set_seed(seed) | |||
| # load policy and stats | |||
| ckpt_path = os.path.join(ckpt_dir, f"policy_best.ckpt") | |||
| policy = self.make_policy(policy_class, policy_config) | |||
| loading_status = policy.load_state_dict(torch.load(ckpt_path)) | |||
| print(loading_status) | |||
| policy.cuda() | |||
| policy.eval() | |||
| print(f"Loaded: {ckpt_path}") | |||
| stats_path = os.path.join(ckpt_dir, f"dataset_stats.pkl") | |||
| with open(stats_path, "rb") as f: | |||
| stats = pickle.load(f) | |||
| # preprocess and postprocess | |||
| pre_process = lambda s_qpos: (s_qpos - stats["qpos_mean"]) / stats["qpos_std"] | |||
| post_process = lambda a: a * stats["action_std"] + stats["action_mean"] | |||
| try: | |||
| query_frequency = policy_config["num_queries"] # num_queries == chunk_size | |||
| if temporal_agg: # temporal aggregation | |||
| query_frequency = 1 | |||
| num_queries = policy_config["num_queries"] | |||
| max_timesteps = int(max_timesteps * 3) # may increase for real-world tasks | |||
| image_history = [] | |||
| qpos_history = [] | |||
| target_qpos_history = [] | |||
| for rollout_id in range(num_rollout): | |||
| # input(f"Rollout {rollout_id + 1}/{num_rollout} ready. Press Enter to start...") | |||
| self.get_image() | |||
| self.get_qpos() | |||
| ### evaluation loop | |||
| if temporal_agg: | |||
| all_time_actions = torch.zeros( | |||
| [max_timesteps, max_timesteps + num_queries, state_dim] | |||
| ).cuda() | |||
| image_list = [] | |||
| qpos_list = [] | |||
| target_qpos_list = [] | |||
| with torch.inference_mode(): | |||
| for t in range(max_timesteps): | |||
| ### process previous timestep to get qpos and image_list | |||
| qpos_numpy = np.array(self.qpos) | |||
| qpos = pre_process(qpos_numpy) | |||
| qpos = torch.from_numpy(qpos).float().cuda().unsqueeze(0) | |||
| curr_image = self.process_image() | |||
| image_list.append(self.image) | |||
| qpos_list.append(self.qpos) | |||
| ### query policy | |||
| if policy_class == "ACT": | |||
| if t % query_frequency == 0: | |||
| all_actions = policy(qpos, curr_image) | |||
| if temporal_agg: | |||
| all_time_actions[[t], t : t + num_queries] = all_actions | |||
| actions_for_curr_step = all_time_actions[:, t] | |||
| actions_populated = torch.all(actions_for_curr_step != 0, axis=1) | |||
| actions_for_curr_step = actions_for_curr_step[actions_populated] | |||
| k = 0.01 | |||
| exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step))) | |||
| exp_weights = exp_weights / exp_weights.sum() | |||
| exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) | |||
| raw_action = (actions_for_curr_step * exp_weights).sum( | |||
| dim=0, keepdim=True | |||
| ) | |||
| else: | |||
| raw_action = all_actions[:, t % query_frequency] | |||
| else: | |||
| raise NotImplementedError | |||
| ### post-process actions | |||
| raw_action = raw_action.squeeze(0).cpu().numpy() | |||
| action = post_process(raw_action) | |||
| target_qpos = action.tolist() | |||
| ### step the environment | |||
| self.pub_action(action=target_qpos) | |||
| self.get_image() | |||
| self.get_qpos() | |||
| time.sleep(0.01) | |||
| target_qpos_list.append(target_qpos) | |||
| print(f"Rollout {rollout_id + 1}/{num_rollout} finished") | |||
| image_history.append(image_list) | |||
| qpos_history.append(qpos_list) | |||
| target_qpos_history.append(target_qpos_list) | |||
| finally: | |||
| # close environment | |||
| print("Environment closed") | |||
| # save images and qpos | |||
| current_time = datetime.now().strftime("%Y_%m_%d_%H_%M") | |||
| save_path = f"data/output/{task_name}/{current_time}" | |||
| os.makedirs(save_path, exist_ok=True) | |||
| for i in range(len(image_history)): | |||
| images_path = os.path.join(save_path, f"image_list_{i}") | |||
| os.makedirs(images_path, exist_ok=True) | |||
| video_writer = cv2.VideoWriter( | |||
| os.path.join(save_path, f"video_{i}.mp4"), | |||
| cv2.VideoWriter_fourcc(*"mp4v"), | |||
| 20, | |||
| (640, 360), | |||
| ) | |||
| for j, image_np in enumerate(image_history[i]): | |||
| video_writer.write(image_np) | |||
| image_path = os.path.join(images_path, f"image_{j}.png") | |||
| cv2.imwrite(image_path, image_np) | |||
| video_writer.release() | |||
| for i in range(len(qpos_history)): | |||
| qpos_path = os.path.join(save_path, f"qpos_{i}.csv") | |||
| with open(qpos_path, "w", newline="") as file: | |||
| writer = csv.writer(file) | |||
| writer.writerow( | |||
| [ | |||
| "joint_0", | |||
| "joint_1", | |||
| "joint_2", | |||
| "joint_3", | |||
| "joint_4", | |||
| "joint_5", | |||
| "joint_6", | |||
| "gripper width", | |||
| ] | |||
| ) | |||
| for j in range(len(qpos_history[i])): | |||
| writer.writerow(qpos_history[i][j]) | |||
| for i in range(len(target_qpos_history)): | |||
| target_qpos_path = os.path.join(save_path, f"target_qpos_{i}.csv") | |||
| with open(target_qpos_path, "w", newline="") as file: | |||
| writer = csv.writer(file) | |||
| writer.writerow( | |||
| [ | |||
| "joint_0", | |||
| "joint_1", | |||
| "joint_2", | |||
| "joint_3", | |||
| "joint_4", | |||
| "joint_5", | |||
| "joint_6", | |||
| "gripper width", | |||
| ] | |||
| ) | |||
| for j in range(len(target_qpos_history[i])): | |||
| writer.writerow(target_qpos_history[i][j]) | |||
| print(f"Saved all images and qpos") | |||
| @@ -0,0 +1,246 @@ | |||
| import argparse | |||
| import os | |||
| import pickle | |||
| import numpy as np | |||
| import torch | |||
| from einops import rearrange | |||
| from policy import ACTPolicy, CNNMLPPolicy | |||
| from inference import ActInference | |||
| import pyarrow as pa | |||
| class ActInferenceSim(ActInference): | |||
| def __init__(self): | |||
| super().__init__() | |||
| self.parser.add_argument("--eval", action="store_true") | |||
| self.parser.add_argument("--ckpt_path", type=str, default="policy_best.ckpt") | |||
| args = vars(self.parser.parse_args()) | |||
| self.STATE_DIM = 8 | |||
| self.exp_weight = 0.1 | |||
| self.is_degree = False | |||
| # command line parameters | |||
| is_eval = args["eval"] | |||
| ckpt_dir = args["ckpt_dir"] | |||
| policy_class = args["policy_class"] | |||
| onscreen_render = args["onscreen_render"] | |||
| task_name = args["task_name"] | |||
| batch_size_train = args["batch_size"] | |||
| batch_size_val = args["batch_size"] | |||
| num_epochs = args["num_epochs"] | |||
| # get task parameters | |||
| is_sim = task_name[:4] == "sim_" | |||
| if is_sim: | |||
| from constants import SIM_TASK_CONFIGS | |||
| task_config = SIM_TASK_CONFIGS[task_name] | |||
| else: | |||
| from constants import TASK_CONFIGS # TODO | |||
| task_config = TASK_CONFIGS[task_name] | |||
| dataset_dir = task_config["dataset_dir"] | |||
| num_episodes = task_config["num_episodes"] | |||
| episode_len = task_config["episode_len"] | |||
| state_dim = task_config["state_dim"] | |||
| camera_names = task_config["camera_names"] | |||
| lr_backbone = 1e-5 | |||
| backbone = "resnet18" | |||
| if policy_class == "ACT": | |||
| enc_layers = 4 | |||
| dec_layers = 7 | |||
| nheads = 8 | |||
| policy_config = { | |||
| "state_dim": state_dim, | |||
| "lr": args["lr"], | |||
| "num_queries": args["chunk_size"], | |||
| "kl_weight": args["kl_weight"], | |||
| "hidden_dim": args["hidden_dim"], | |||
| "dim_feedforward": args["dim_feedforward"], | |||
| "lr_backbone": lr_backbone, | |||
| "backbone": backbone, | |||
| "enc_layers": enc_layers, | |||
| "dec_layers": dec_layers, | |||
| "nheads": nheads, | |||
| "camera_names": camera_names, | |||
| } | |||
| elif policy_class == "CNNMLP": | |||
| policy_config = { | |||
| "lr": args["lr"], | |||
| "lr_backbone": lr_backbone, | |||
| "backbone": backbone, | |||
| "num_queries": 1, | |||
| "camera_names": camera_names, | |||
| } | |||
| else: | |||
| raise NotImplementedError | |||
| self.config = { | |||
| "num_epochs": num_epochs, | |||
| "ckpt_dir": ckpt_dir, | |||
| "ckpt_name": args["ckpt_path"], | |||
| "episode_len": episode_len, | |||
| "state_dim": state_dim, | |||
| "lr": args["lr"], | |||
| "policy_class": policy_class, | |||
| "onscreen_render": onscreen_render, | |||
| "policy_config": policy_config, | |||
| "task_name": task_name, | |||
| "seed": args["seed"], | |||
| "temporal_agg": args["temporal_agg"], | |||
| "camera_names": camera_names, | |||
| "real_robot": not is_sim, | |||
| } | |||
| def make_policy(self, policy_class, policy_config): | |||
| if policy_class == "ACT": | |||
| policy = ACTPolicy(policy_config) | |||
| elif policy_class == "CNNMLP": | |||
| policy = CNNMLPPolicy(policy_config) | |||
| else: | |||
| raise NotImplementedError | |||
| return policy | |||
| def make_optimizer(self, policy_class, policy): | |||
| if policy_class == "ACT": | |||
| optimizer = policy.configure_optimizers() | |||
| elif policy_class == "CNNMLP": | |||
| optimizer = policy.configure_optimizers() | |||
| else: | |||
| raise NotImplementedError | |||
| return optimizer | |||
| def get_image(self, request=True): | |||
| got_image = False | |||
| while not got_image: | |||
| if request: | |||
| self.node.send_output(output_id="request_image", data=pa.array([]), metadata={}) | |||
| event = self.node.next(1) | |||
| if event is not None and event["type"] == "INPUT" and event["id"] == "image": | |||
| self.image = event["value"].to_numpy() | |||
| metadata = event["metadata"] | |||
| self.IMAGE_WIDTH = metadata['w'] | |||
| self.IMAGE_HEIGHT = metadata['h'] | |||
| got_image = True | |||
| def process_image(self, image): | |||
| image = rearrange(image, "h w c -> c h w") # 形状变为 (c, h, w) | |||
| # 转换为 PyTorch 张量,归一化,并移动到 GPU 上 | |||
| image = torch.from_numpy(image / 255.0).float().cuda().unsqueeze(0) # 添加批量维度 | |||
| return image | |||
| def inference(self): | |||
| ckpt_name = self.config["ckpt_name"] | |||
| ckpt_dir = self.config["ckpt_dir"] | |||
| state_dim = self.config["state_dim"] | |||
| policy_class = self.config["policy_class"] | |||
| policy_config = self.config["policy_config"] | |||
| max_timesteps = self.config["episode_len"] | |||
| temporal_agg = self.config["temporal_agg"] | |||
| # load policy and stats | |||
| ckpt_path = os.path.join(ckpt_dir, ckpt_name) | |||
| self.policy = self.make_policy(policy_class, policy_config) | |||
| loading_status = self.policy.load_state_dict(torch.load(ckpt_path)) | |||
| print(loading_status) | |||
| self.policy.cuda() | |||
| self.policy.eval() | |||
| print(f"Loaded: {ckpt_path}") | |||
| stats_path = os.path.join(ckpt_dir, f"dataset_stats.pkl") | |||
| with open(stats_path, "rb") as f: | |||
| stats = pickle.load(f) | |||
| pre_process = lambda s_qpos: (s_qpos - stats["qpos_mean"]) / stats["qpos_std"] | |||
| post_process = lambda a: a * stats["action_std"] + stats["action_mean"] | |||
| # load environment | |||
| query_frequency = policy_config["num_queries"] # num_queries == chunk_size | |||
| if temporal_agg: # temporal aggregation | |||
| query_frequency = 1 | |||
| num_queries = policy_config["num_queries"] | |||
| max_timesteps = int(2 * max_timesteps * 1) # may increase for real-world tasks | |||
| if temporal_agg: | |||
| all_time_actions = torch.zeros([max_timesteps, max_timesteps + num_queries, state_dim]).cuda() | |||
| qpos_history = torch.zeros((1, max_timesteps, state_dim)).cuda() | |||
| image_list = [] # for visualization | |||
| qpos_list = [] | |||
| target_qpos_list = [] | |||
| all_actions = [] | |||
| t = 0 | |||
| while True: | |||
| self.get_image() | |||
| self.get_qpos() | |||
| cur_joint_pos = np.array(self.qpos) | |||
| img_np = self.image.astype(dtype=np.uint8) | |||
| img_np = img_np.reshape((self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3)) | |||
| self.policy.eval() | |||
| # 3. 处理并返回 | |||
| with torch.inference_mode(): | |||
| obs_image = img_np | |||
| obs_qpos = cur_joint_pos[:self.STATE_DIM] | |||
| qpos_numpy = np.array(obs_qpos) | |||
| if self.is_degree: | |||
| qpos_numpy = qpos_numpy / np.pi * 180 | |||
| qpos_numpy[-1] = obs_qpos[-1] / 0.04 | |||
| qpos = pre_process(qpos_numpy) | |||
| qpos = torch.from_numpy(qpos).float().cuda().unsqueeze(0) | |||
| qpos_history[:, t] = qpos | |||
| curr_image = self.process_image(obs_image) | |||
| curr_image = curr_image.unsqueeze(1) | |||
| # print(curr_image.shape) | |||
| # print(qpos.shape, curr_image.shape) | |||
| ### query policy | |||
| if self.config["policy_class"] == "ACT": | |||
| if t % query_frequency == 0: | |||
| all_actions = self.policy(qpos, curr_image) | |||
| if temporal_agg: | |||
| all_time_actions[[t], t : t + num_queries] = all_actions[:, :num_queries] | |||
| actions_for_curr_step = all_time_actions[:, t] | |||
| actions_populated = torch.all(actions_for_curr_step != 0, axis=1) | |||
| actions_for_curr_step = actions_for_curr_step[actions_populated] | |||
| exp_weights = np.exp(-self.exp_weight * np.arange(len(actions_for_curr_step))) | |||
| exp_weights = exp_weights / exp_weights.sum() | |||
| exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) | |||
| raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True) | |||
| else: | |||
| raw_action = all_actions[:, t % query_frequency] | |||
| elif self.config["policy_class"] == "CNNMLP": | |||
| raw_action = self.policy(qpos, curr_image) | |||
| else: | |||
| raise NotImplementedError | |||
| ### post-process actions | |||
| raw_action = raw_action.squeeze(0).cpu().numpy() | |||
| action = post_process(raw_action) | |||
| target_qpos = action | |||
| # TODO 仿真环境中机械臂采取target_qos | |||
| t = t + 1 | |||
| ### for visualization | |||
| qpos_list.append(qpos_numpy) | |||
| target_qpos_list.append(target_qpos) | |||
| if self.is_degree: | |||
| temp = target_qpos[-1] | |||
| target_qpos = target_qpos / 180 * np.pi | |||
| target_qpos[-1] = temp * 0.04 | |||
| self.pub_action(target_qpos) | |||
| @@ -0,0 +1,62 @@ | |||
| import argparse | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| class ActInference: | |||
| def __init__(self): | |||
| self.parser = argparse.ArgumentParser() | |||
| self.parser.add_argument("--ckpt_dir", action="store", type=str, help="ckpt_dir", required=True) | |||
| self.parser.add_argument( | |||
| "--policy_class", action="store", type=str, help="policy_class, capitalize", required=True | |||
| ) | |||
| self.parser.add_argument("--task_name", action="store", type=str, help="task_name", required=True) | |||
| self.parser.add_argument("--batch_size", action="store", type=int, help="batch_size", required=True) | |||
| self.parser.add_argument("--seed", action="store", type=int, help="seed", required=True) | |||
| self.parser.add_argument("--num_epochs", action="store", type=int, help="num_epochs", required=True) | |||
| self.parser.add_argument("--lr", action="store", type=float, help="lr", required=True) | |||
| self.parser.add_argument("--kl_weight", action="store", type=int, help="KL Weight", required=False) | |||
| self.parser.add_argument("--chunk_size", action="store", type=int, help="chunk_size", required=False) | |||
| self.parser.add_argument("--hidden_dim", action="store", type=int, help="hidden_dim", required=False) | |||
| self.parser.add_argument( | |||
| "--dim_feedforward", action="store", type=int, help="dim_feedforward", required=False | |||
| ) | |||
| self.parser.add_argument("--temporal_agg", action="store_true") | |||
| self.node = Node() | |||
| self.image = None | |||
| self.qpos = None | |||
| def get_image(self, request=True): | |||
| got_image = False | |||
| while not got_image: | |||
| if request: | |||
| self.node.send_output(output_id="request_image", data=pa.array([]), metadata={}) | |||
| event = self.node.next(1) | |||
| if event is not None and event["type"] == "INPUT" and event["id"] == "image": | |||
| self.image = event["value"].to_numpy() | |||
| got_image = True | |||
| def get_qpos(self, request=True): | |||
| got_qpos = False | |||
| while not got_qpos: | |||
| if request: | |||
| self.node.send_output(output_id="request_qpos", data=pa.array([]), metadata={}) | |||
| event = self.node.next(1) | |||
| if event is not None and event["type"] == "INPUT" and event["id"] == "qpos": | |||
| self.qpos = event["value"].to_pylist() | |||
| got_qpos = True | |||
| def pub_action(self, action): | |||
| self.node.send_output(output_id="action", data=pa.array(action), metadata={}) | |||
| def make_policy(self, policy_calss, policy_config): | |||
| pass | |||
| def make_optimizer(self, policy_class, policy): | |||
| pass | |||
| def inference(self): | |||
| pass | |||
| @@ -0,0 +1,20 @@ | |||
| import argparse | |||
| import os | |||
| from dora import Node | |||
| import pyarrow as pa | |||
| from infer_real import * | |||
| from infer_sim import * | |||
| if __name__ == "__main__": | |||
| scenario = os.getenv("SCENARIO", "real") | |||
| if scenario == "real": | |||
| act_inference_real = ActInferenceReal() | |||
| act_inference_real.inference() | |||
| elif scenario == "sim": | |||
| act_inference_sim = ActInferenceSim() | |||
| act_inference_sim.inference() | |||
| else: | |||
| print(f"do not have sceanrio {scenario}") | |||
| raise NotImplementedError | |||
| @@ -0,0 +1,81 @@ | |||
| import torch.nn as nn | |||
| import torchvision.transforms as transforms | |||
| from detr.main import build_ACT_model_and_optimizer, build_CNNMLP_model_and_optimizer | |||
| from torch.nn import functional as F | |||
| class ACTPolicy(nn.Module): | |||
| def __init__(self, args_override): | |||
| super().__init__() | |||
| model, optimizer = build_ACT_model_and_optimizer(args_override) | |||
| self.model = model # CVAE decoder | |||
| self.optimizer = optimizer | |||
| self.kl_weight = args_override["kl_weight"] | |||
| print(f"KL Weight {self.kl_weight}") | |||
| def __call__(self, qpos, image, actions=None, is_pad=None): | |||
| env_state = None | |||
| normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |||
| image = normalize(image) | |||
| if actions is not None: # training time | |||
| actions = actions[:, : self.model.num_queries] | |||
| is_pad = is_pad[:, : self.model.num_queries] | |||
| a_hat, is_pad_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) | |||
| total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar) | |||
| loss_dict = dict() | |||
| all_l1 = F.l1_loss(actions, a_hat, reduction="none") | |||
| l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() | |||
| loss_dict["l1"] = l1 | |||
| loss_dict["kl"] = total_kld[0] | |||
| loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.kl_weight | |||
| return loss_dict | |||
| else: # inference time | |||
| a_hat, _, (_, _) = self.model(qpos, image, env_state) # no action, sample from prior | |||
| return a_hat | |||
| def configure_optimizers(self): | |||
| return self.optimizer | |||
| class CNNMLPPolicy(nn.Module): | |||
| def __init__(self, args_override): | |||
| super().__init__() | |||
| model, optimizer = build_CNNMLP_model_and_optimizer(args_override) | |||
| self.model = model # decoder | |||
| self.optimizer = optimizer | |||
| def __call__(self, qpos, image, actions=None, is_pad=None): | |||
| env_state = None # TODO | |||
| normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |||
| image = normalize(image) | |||
| if actions is not None: # training time | |||
| actions = actions[:, 0] | |||
| a_hat = self.model(qpos, image, env_state, actions) | |||
| mse = F.mse_loss(actions, a_hat) | |||
| loss_dict = dict() | |||
| loss_dict["mse"] = mse | |||
| loss_dict["loss"] = loss_dict["mse"] | |||
| return loss_dict | |||
| else: # inference time | |||
| a_hat = self.model(qpos, image, env_state) # no action, sample from prior | |||
| return a_hat | |||
| def configure_optimizers(self): | |||
| return self.optimizer | |||
| def kl_divergence(mu, logvar): | |||
| batch_size = mu.size(0) | |||
| assert batch_size != 0 | |||
| if mu.data.ndimension() == 4: | |||
| mu = mu.view(mu.size(0), mu.size(1)) | |||
| if logvar.data.ndimension() == 4: | |||
| logvar = logvar.view(logvar.size(0), logvar.size(1)) | |||
| klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) | |||
| total_kld = klds.sum(1).mean(0, True) | |||
| dimension_wise_kld = klds.mean(0) | |||
| mean_kld = klds.mean(1).mean(0, True) | |||
| return total_kld, dimension_wise_kld, mean_kld | |||
| @@ -0,0 +1,241 @@ | |||
| import argparse | |||
| import os | |||
| import pickle | |||
| import sys | |||
| from copy import deepcopy | |||
| from pathlib import Path | |||
| import matplotlib.pyplot as plt | |||
| import numpy as np | |||
| import torch | |||
| from tqdm import tqdm | |||
| sys.path.append(str(Path(__file__).parent.parent.parent.parent)) # add root to path | |||
| from constants import TASK_CONFIGS | |||
| from policy import ACTPolicy | |||
| from utils import compute_dict_mean, detach_dict, load_data, set_seed | |||
| def main(args): | |||
| # command line parameters | |||
| ckpt_dir = args["ckpt_dir"] | |||
| policy_class = args["policy_class"] | |||
| task_name = args["task_name"] | |||
| batch_size_train = args["batch_size"] | |||
| batch_size_val = args["batch_size"] | |||
| num_epochs = args["num_epochs"] | |||
| set_seed(args["seed"]) | |||
| # get task parameters | |||
| task_config = TASK_CONFIGS[task_name] | |||
| dataset_dir = task_config["dataset_dir"] | |||
| num_episodes = task_config["num_episodes"] | |||
| episode_len = task_config["episode_len"] | |||
| camera_names = task_config["camera_names"] | |||
| state_dim = task_config["state_dim"] | |||
| # fixed parameters | |||
| lr_backbone = 1e-5 | |||
| backbone = "resnet18" | |||
| if policy_class == "ACT": | |||
| enc_layers = 4 | |||
| dec_layers = 7 | |||
| nheads = 8 | |||
| policy_config = { | |||
| "state_dim": state_dim, | |||
| "lr": args["lr"], | |||
| "num_queries": args["chunk_size"], | |||
| "kl_weight": args["kl_weight"], | |||
| "hidden_dim": args["hidden_dim"], | |||
| "dim_feedforward": args["dim_feedforward"], | |||
| "lr_backbone": lr_backbone, | |||
| "backbone": backbone, | |||
| "enc_layers": enc_layers, | |||
| "dec_layers": dec_layers, | |||
| "nheads": nheads, | |||
| "camera_names": camera_names, | |||
| } | |||
| else: | |||
| raise NotImplementedError | |||
| config = { | |||
| "num_epochs": num_epochs, | |||
| "ckpt_dir": ckpt_dir, | |||
| "episode_len": episode_len, | |||
| "state_dim": state_dim, | |||
| "lr": args["lr"], | |||
| "policy_class": policy_class, | |||
| "policy_config": policy_config, | |||
| "task_name": task_name, | |||
| "seed": args["seed"], | |||
| "camera_names": camera_names, | |||
| } | |||
| train_dataloader, val_dataloader, stats, _ = load_data( | |||
| dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val | |||
| ) | |||
| # save dataset stats | |||
| if not os.path.isdir(ckpt_dir): | |||
| os.makedirs(ckpt_dir) | |||
| stats_path = os.path.join(ckpt_dir, f"dataset_stats.pkl") | |||
| with open(stats_path, "wb") as f: | |||
| pickle.dump(stats, f) | |||
| best_ckpt_info = train_bc(train_dataloader, val_dataloader, config) | |||
| best_epoch, min_val_loss, best_state_dict = best_ckpt_info | |||
| # save best checkpoint | |||
| ckpt_path = os.path.join(ckpt_dir, f"policy_best.ckpt") | |||
| torch.save(best_state_dict, ckpt_path) | |||
| print(f"Best ckpt, val loss {min_val_loss:.6f} @ epoch{best_epoch}") | |||
| def make_policy(policy_class, policy_config): | |||
| if policy_class == "ACT": | |||
| policy = ACTPolicy(policy_config) | |||
| else: | |||
| raise NotImplementedError | |||
| return policy | |||
| def make_optimizer(policy_class, policy): | |||
| if policy_class == "ACT": | |||
| optimizer = policy.configure_optimizers() | |||
| else: | |||
| raise NotImplementedError | |||
| return optimizer | |||
| def forward_pass(data, policy): | |||
| image_data, qpos_data, action_data, is_pad = data | |||
| image_data, qpos_data, action_data, is_pad = ( | |||
| image_data.cuda(), | |||
| qpos_data.cuda(), | |||
| action_data.cuda(), | |||
| is_pad.cuda(), | |||
| ) | |||
| return policy(qpos_data, image_data, action_data, is_pad) # TODO remove None | |||
| def train_bc(train_dataloader, val_dataloader, config): | |||
| num_epochs = config["num_epochs"] | |||
| ckpt_dir = config["ckpt_dir"] | |||
| seed = config["seed"] | |||
| policy_class = config["policy_class"] | |||
| policy_config = config["policy_config"] | |||
| set_seed(seed) | |||
| policy = make_policy(policy_class, policy_config) | |||
| policy.cuda() | |||
| optimizer = make_optimizer(policy_class, policy) | |||
| train_history = [] | |||
| validation_history = [] | |||
| min_val_loss = np.inf | |||
| best_ckpt_info = None | |||
| for epoch in tqdm(range(num_epochs)): | |||
| print(f"\nEpoch {epoch}") | |||
| # validation | |||
| with torch.inference_mode(): | |||
| policy.eval() | |||
| epoch_dicts = [] | |||
| for batch_idx, data in enumerate(val_dataloader): | |||
| forward_dict = forward_pass(data, policy) | |||
| epoch_dicts.append(forward_dict) | |||
| epoch_summary = compute_dict_mean(epoch_dicts) | |||
| validation_history.append(epoch_summary) | |||
| epoch_val_loss = epoch_summary["loss"] | |||
| if epoch_val_loss < min_val_loss: | |||
| min_val_loss = epoch_val_loss | |||
| best_ckpt_info = (epoch, min_val_loss, deepcopy(policy.state_dict())) | |||
| print(f"Val loss: {epoch_val_loss:.5f}") | |||
| summary_string = "" | |||
| for k, v in epoch_summary.items(): | |||
| summary_string += f"{k}: {v.item():.3f} " | |||
| print(summary_string) | |||
| # training | |||
| policy.train() | |||
| optimizer.zero_grad() | |||
| for batch_idx, data in enumerate(train_dataloader): | |||
| forward_dict = forward_pass(data, policy) | |||
| # backward | |||
| loss = forward_dict["loss"] | |||
| loss.backward() | |||
| optimizer.step() | |||
| optimizer.zero_grad() | |||
| train_history.append(detach_dict(forward_dict)) | |||
| epoch_summary = compute_dict_mean( | |||
| train_history[(batch_idx + 1) * epoch : (batch_idx + 1) * (epoch + 1)] | |||
| ) | |||
| epoch_train_loss = epoch_summary["loss"] | |||
| print(f"Train loss: {epoch_train_loss:.5f}") | |||
| summary_string = "" | |||
| for k, v in epoch_summary.items(): | |||
| summary_string += f"{k}: {v.item():.3f} " | |||
| print(summary_string) | |||
| if epoch % 100 == 0: | |||
| ckpt_path = os.path.join(ckpt_dir, f"policy_epoch_{epoch}_seed_{seed}.ckpt") | |||
| torch.save(policy.state_dict(), ckpt_path) | |||
| plot_history(train_history, validation_history, epoch, ckpt_dir, seed) | |||
| ckpt_path = os.path.join(ckpt_dir, f"policy_last.ckpt") | |||
| torch.save(policy.state_dict(), ckpt_path) | |||
| best_epoch, min_val_loss, best_state_dict = best_ckpt_info | |||
| ckpt_path = os.path.join(ckpt_dir, f"policy_epoch_{best_epoch}_seed_{seed}.ckpt") | |||
| torch.save(best_state_dict, ckpt_path) | |||
| print(f"Training finished:\nSeed {seed}, val loss {min_val_loss:.6f} at epoch {best_epoch}") | |||
| # save training curves | |||
| plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed) | |||
| return best_ckpt_info | |||
| def plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed): | |||
| # save training curves | |||
| for key in train_history[0]: | |||
| plot_path = os.path.join(ckpt_dir, f"train_val_{key}_seed_{seed}.png") | |||
| plt.figure() | |||
| train_values = [summary[key].item() for summary in train_history] | |||
| val_values = [summary[key].item() for summary in validation_history] | |||
| plt.plot(np.linspace(0, num_epochs - 1, len(train_history)), train_values, label="train") | |||
| plt.plot( | |||
| np.linspace(0, num_epochs - 1, len(validation_history)), val_values, label="validation" | |||
| ) | |||
| # plt.ylim([-0.1, 1]) | |||
| plt.tight_layout() | |||
| plt.legend() | |||
| plt.title(key) | |||
| plt.savefig(plot_path) | |||
| print(f"Saved plots to {ckpt_dir}") | |||
| if __name__ == "__main__": | |||
| parser = argparse.ArgumentParser() | |||
| parser.add_argument("--ckpt_dir", action="store", type=str, help="ckpt_dir", required=True) | |||
| parser.add_argument( | |||
| "--policy_class", action="store", type=str, help="policy_class, capitalize", required=True | |||
| ) | |||
| parser.add_argument("--task_name", action="store", type=str, help="task_name", required=True) | |||
| parser.add_argument("--batch_size", action="store", type=int, help="batch_size", required=True) | |||
| parser.add_argument("--seed", action="store", type=int, help="seed", required=True) | |||
| parser.add_argument("--num_epochs", action="store", type=int, help="num_epochs", required=True) | |||
| parser.add_argument("--lr", action="store", type=float, help="lr", required=True) | |||
| # for ACT | |||
| parser.add_argument("--kl_weight", action="store", type=int, help="KL Weight", required=False) | |||
| parser.add_argument("--chunk_size", action="store", type=int, help="chunk_size", required=False) | |||
| parser.add_argument("--hidden_dim", action="store", type=int, help="hidden_dim", required=False) | |||
| parser.add_argument( | |||
| "--dim_feedforward", action="store", type=int, help="dim_feedforward", required=False | |||
| ) | |||
| main(vars(parser.parse_args())) | |||
| @@ -0,0 +1,168 @@ | |||
| import os | |||
| import cv2 | |||
| import h5py | |||
| import numpy as np | |||
| import torch | |||
| from torch.utils.data import DataLoader | |||
| from constants import IMG_H, IMG_W | |||
| class EpisodicDataset(torch.utils.data.Dataset): | |||
| def __init__(self, episode_ids, dataset_dir, camera_names, norm_stats): | |||
| super(EpisodicDataset).__init__() | |||
| self.episode_ids = episode_ids | |||
| self.dataset_dir = dataset_dir | |||
| self.camera_names = camera_names | |||
| self.norm_stats = norm_stats | |||
| self.is_sim = None | |||
| self.__getitem__(0) # initialize self.is_sim | |||
| def __len__(self): | |||
| return len(self.episode_ids) | |||
| def __getitem__(self, index): | |||
| sample_full_episode = False # hardcode | |||
| episode_id = self.episode_ids[index] | |||
| dataset_path = os.path.join(self.dataset_dir, f"episode_{episode_id}.hdf5") | |||
| with h5py.File(dataset_path, "r") as root: | |||
| is_sim = False # hardcode | |||
| original_action_shape = root["/action"].shape | |||
| episode_len = original_action_shape[0] | |||
| if sample_full_episode: | |||
| start_ts = 0 | |||
| else: | |||
| start_ts = np.random.choice(episode_len) | |||
| # get observation at start_ts only | |||
| qpos = root["/observations/qpos"][start_ts] | |||
| image_dict = dict() | |||
| for cam_name in self.camera_names: | |||
| raw_img = root[f"/observations/images/{cam_name}"][start_ts] | |||
| resize_img = cv2.resize( | |||
| np.array(raw_img), (IMG_W, IMG_H), interpolation=cv2.INTER_LINEAR | |||
| ) | |||
| image_dict[cam_name] = resize_img | |||
| # get all actions after and including start_ts | |||
| action = root["/action"][start_ts:] | |||
| action_len = episode_len - start_ts | |||
| self.is_sim = is_sim | |||
| padded_action = np.zeros(original_action_shape, dtype=np.float32) | |||
| padded_action[:action_len] = action | |||
| is_pad = np.zeros(episode_len) | |||
| is_pad[action_len:] = 1 | |||
| # new axis for different cameras | |||
| all_cam_images = [] | |||
| for cam_name in self.camera_names: | |||
| all_cam_images.append(image_dict[cam_name]) | |||
| all_cam_images = np.stack(all_cam_images, axis=0) | |||
| # construct observations | |||
| image_data = torch.from_numpy(all_cam_images) | |||
| qpos_data = torch.tensor(qpos, dtype=torch.float32) | |||
| action_data = torch.tensor(padded_action, dtype=torch.float32) | |||
| is_pad = torch.from_numpy(is_pad).bool() | |||
| # channel last | |||
| image_data = torch.einsum("k h w c -> k c h w", image_data) | |||
| # normalize image and change dtype to float | |||
| image_data = image_data / 255.0 | |||
| action_data = (action_data - self.norm_stats["action_mean"]) / self.norm_stats["action_std"] | |||
| qpos_data = (qpos_data - self.norm_stats["qpos_mean"]) / self.norm_stats["qpos_std"] | |||
| return image_data, qpos_data, action_data, is_pad | |||
| def get_norm_stats(dataset_dir, num_episodes): | |||
| all_qpos_data = [] | |||
| all_action_data = [] | |||
| for episode_idx in range(num_episodes): | |||
| dataset_path = os.path.join(dataset_dir, f"episode_{episode_idx}.hdf5") | |||
| with h5py.File(dataset_path, "r") as root: | |||
| qpos = root["/observations/qpos"][()] | |||
| action = root["/action"][()] | |||
| all_qpos_data.append(torch.from_numpy(qpos)) | |||
| all_action_data.append(torch.from_numpy(action)) | |||
| all_qpos_data = torch.stack(all_qpos_data) | |||
| all_action_data = torch.stack(all_action_data) | |||
| # normalize action data | |||
| action_mean = all_action_data.mean(dim=[0, 1], keepdim=True) | |||
| action_std = all_action_data.std(dim=[0, 1], keepdim=True) | |||
| action_std = torch.clip(action_std, 1e-2, np.inf) # clipping | |||
| # normalize qpos data | |||
| qpos_mean = all_qpos_data.mean(dim=[0, 1], keepdim=True) | |||
| qpos_std = all_qpos_data.std(dim=[0, 1], keepdim=True) | |||
| qpos_std = torch.clip(qpos_std, 1e-2, np.inf) # clipping | |||
| stats = { | |||
| "action_mean": action_mean.numpy().squeeze(), | |||
| "action_std": action_std.numpy().squeeze(), | |||
| "qpos_mean": qpos_mean.numpy().squeeze(), | |||
| "qpos_std": qpos_std.numpy().squeeze(), | |||
| "example_qpos": qpos, | |||
| } | |||
| return stats | |||
| def load_data(dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val): | |||
| print(f"\nData from: {dataset_dir}\n") | |||
| # obtain train test split | |||
| train_ratio = 0.8 | |||
| shuffled_indices = np.random.permutation(num_episodes) | |||
| train_indices = shuffled_indices[: int(train_ratio * num_episodes)] | |||
| val_indices = shuffled_indices[int(train_ratio * num_episodes) :] | |||
| # obtain normalization stats for qpos and action | |||
| norm_stats = get_norm_stats(dataset_dir, num_episodes) | |||
| # construct dataset and dataloader | |||
| train_dataset = EpisodicDataset(train_indices, dataset_dir, camera_names, norm_stats) | |||
| val_dataset = EpisodicDataset(val_indices, dataset_dir, camera_names, norm_stats) | |||
| train_dataloader = DataLoader( | |||
| train_dataset, | |||
| batch_size=batch_size_train, | |||
| shuffle=True, | |||
| pin_memory=True, | |||
| num_workers=1, | |||
| prefetch_factor=1, | |||
| ) | |||
| val_dataloader = DataLoader( | |||
| val_dataset, | |||
| batch_size=batch_size_val, | |||
| shuffle=True, | |||
| pin_memory=True, | |||
| num_workers=1, | |||
| prefetch_factor=1, | |||
| ) | |||
| return train_dataloader, val_dataloader, norm_stats, train_dataset.is_sim | |||
| def compute_dict_mean(epoch_dicts): | |||
| result = {k: None for k in epoch_dicts[0]} | |||
| num_items = len(epoch_dicts) | |||
| for k in result: | |||
| value_sum = 0 | |||
| for epoch_dict in epoch_dicts: | |||
| value_sum += epoch_dict[k] | |||
| result[k] = value_sum / num_items | |||
| return result | |||
| def detach_dict(d): | |||
| new_d = dict() | |||
| for k, v in d.items(): | |||
| new_d[k] = v.detach() | |||
| return new_d | |||
| def set_seed(seed): | |||
| torch.manual_seed(seed) | |||
| np.random.seed(seed) | |||
| @@ -0,0 +1,22 @@ | |||
| [project] | |||
| name = "dora-act" | |||
| version = "0.0.0" | |||
| authors = [{ name = "Your Name", email = "email@email.com" }] | |||
| description = "dora-act" | |||
| license = { text = "MIT" } | |||
| readme = "README.md" | |||
| requires-python = ">=3.8" | |||
| dependencies = ["dora-rs >= 0.3.9"] | |||
| [dependency-groups] | |||
| dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-act = "dora_act.main:main" | |||
| [tool.ruff.lint] | |||
| extend-select = [ | |||
| "D", # pydocstyle | |||
| "UP" | |||
| ] | |||
| @@ -0,0 +1,13 @@ | |||
| """Test module for dora_act package.""" | |||
| import pytest | |||
| def test_import_main(): | |||
| """Test importing and running the main function.""" | |||
| from dora_act.main import main | |||
| # Check that everything is working, and catch Dora RuntimeError | |||
| # as we're not running in a Dora dataflow. | |||
| with pytest.raises(RuntimeError): | |||
| main() | |||
| @@ -0,0 +1,57 @@ | |||
| # dora-isaacsim | |||
| ## Getting started | |||
| - Install it with uv: | |||
| ```bash | |||
| uv venv -p 3.11 --seed | |||
| uv pip install -e . | |||
| ``` | |||
| ## Contribution Guide | |||
| - Format with [ruff](https://docs.astral.sh/ruff/): | |||
| ```bash | |||
| uv pip install ruff | |||
| uv run ruff check . --fix | |||
| ``` | |||
| - Lint with ruff: | |||
| ```bash | |||
| uv run ruff check . | |||
| ``` | |||
| - Test with [pytest](https://github.com/pytest-dev/pytest) | |||
| ```bash | |||
| uv pip install pytest | |||
| uv run pytest . # Test | |||
| ``` | |||
| ## YAML Specification | |||
| ```yaml | |||
| - id: dora-isaacsim | |||
| build: pip install -e ../../node-hub/dora-isaacsim | |||
| path: ../../node-hub/dora-isaacsim/dora_isaacsim/main.py | |||
| env: | |||
| # substitute to your own "<path of isaacsim>/python.sh" | |||
| ISAAC_PYTHON_PATH: "/home/lv/isaacsim/python.sh" | |||
| CONFIG_NAME: "stack_cube_act" | |||
| inputs: | |||
| request_camera: policy-act/request_camera | |||
| request_joint_pos: policy-act/request_joint_pos | |||
| action: policy-act/action | |||
| outputs: | |||
| - camera | |||
| - joint_pos | |||
| ``` | |||
| ## Examples | |||
| ## License | |||
| dora-isaacsim's code are released under the MIT License | |||
| @@ -0,0 +1,13 @@ | |||
| """TODO: Add docstring.""" | |||
| import os | |||
| # Define the path to the README file relative to the package directory | |||
| readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md") | |||
| # Read the content of the README file | |||
| try: | |||
| with open(readme_path, encoding="utf-8") as f: | |||
| __doc__ = f.read() | |||
| except FileNotFoundError: | |||
| __doc__ = "README file not found." | |||
| @@ -0,0 +1,6 @@ | |||
| """TODO: Add docstring.""" | |||
| from .main import main | |||
| if __name__ == "__main__": | |||
| main() | |||
| @@ -0,0 +1,58 @@ | |||
| # config.yaml | |||
| frequencies: | |||
| control: 20 | |||
| camera: 20 | |||
| sim: 60 | |||
| seed: 110 | |||
| task: | |||
| _target_: src.task.base.BaseTask | |||
| scenary: | |||
| _target_: src.scenary.stack_cube.StackCubeScenary | |||
| usd_file_path: "../../node-hub/dora-isaacsim/dora_isaacsim/assets/stack_cube_franka.usd" | |||
| random: False | |||
| src_cube_cfg: { | |||
| name: cube_red, | |||
| position: [0.511521,0.154694,0.120000], | |||
| orientation: [0.966067,0.000000,0.000000,0.258293], | |||
| prim_path: /World/Cube, | |||
| scale: [0.05, 0.05, 0.05], | |||
| size: 1.0, | |||
| color: [1, 0, 0] | |||
| } | |||
| target_cube_cfg: { | |||
| name: cube_blue, | |||
| position: [0.535,-0.122161,0.130000], | |||
| orientation: [0.966067,0.000000,0.000000,0.258293], | |||
| prim_path: /World/Cube2, | |||
| scale: [0.05, 0.05, 0.05], | |||
| size: 1.1, | |||
| color: [0, 0, 1] | |||
| } | |||
| robot: | |||
| _target_: src.robots.base.BaseRobot | |||
| name: franka | |||
| robot_prim_path: /World/franka | |||
| joints_name: [ | |||
| panda_joint1,panda_joint2,panda_joint3,panda_joint4,panda_joint5,panda_joint6,panda_joint7,panda_finger_joint1 | |||
| ] | |||
| ee_prim_path: /World/franka/panda_hand/tool_center | |||
| gripper_joint_name: panda_finger_joint1 | |||
| controller: | |||
| _target_: src.controller.dora_controller.DoraSubscriberController | |||
| sync: False | |||
| sim_freq: ${frequencies.sim} | |||
| control_freq: ${frequencies.control} | |||
| control_mode: joint | |||
| sensors: | |||
| camera_rgb_wrist: | |||
| _target_: src.sensor.camera.Camera | |||
| camera_prim_path: /World/franka/panda_hand/owl/camera | |||
| sim_freq: ${frequencies.sim} | |||
| sensor_freq: ${frequencies.camera} | |||
| resolution: [640,480] | |||
| joint_sensor0: | |||
| _target_: src.sensor.joint_sensor.JointSensor | |||
| robot_name: franka | |||
| @@ -0,0 +1,9 @@ | |||
| import os | |||
| python_path = os.getenv("ISAAC_PYTHON_PATH", "python") | |||
| isaac_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "start.py") | |||
| config_name = os.getenv("CONFIG_NAME", "stack_cube_act") | |||
| os.system(f"{python_path} {isaac_path} --config-name {config_name}") | |||
| @@ -0,0 +1,18 @@ | |||
| class BaseController: | |||
| def __init__(self, sim_freq=60, control_freq=60): | |||
| self.control_dt = 1 / control_freq | |||
| self.sim_dt = 1 / sim_freq | |||
| self.sim_time = 0 | |||
| def spawn(self, *args, **kwargs): | |||
| pass | |||
| def reset(self): | |||
| self.sim_time = 0 | |||
| def forward(self, *args, **kwargs): | |||
| self.sim_time += self.sim_dt | |||
| if self.sim_time >= self.control_dt: | |||
| pass | |||
| # do something here | |||
| self.sim_time -= self.control_dt | |||
| @@ -0,0 +1,119 @@ | |||
| import queue | |||
| # import rospy | |||
| # from act_dp_service.msg import DualArmState, SingleArmState | |||
| # from act_dp_service.srv import get_action, get_action_bimanual | |||
| # from std_msgs.msg import ( | |||
| # Float64, | |||
| # Float64MultiArray, | |||
| # MultiArrayDimension, | |||
| # MultiArrayLayout, | |||
| # UInt8MultiArray, | |||
| # ) | |||
| from .base import BaseController | |||
| from dora import Node | |||
| class DoraSubscriberController(BaseController): | |||
| def __init__(self, sync=False, queue_size=1, *args, **kwargs): | |||
| super().__init__(*args, **kwargs) | |||
| self.queue_size = queue_size | |||
| self.action_queue = queue.Queue(maxsize=queue_size) | |||
| self.new_available = False | |||
| # self.subscribe_topic_name = subscribe_topic_name | |||
| self.sync = sync | |||
| self.node = Node() | |||
| # 检查 topic 是否存在 | |||
| # published_topics = rospy.get_published_topics() | |||
| # topic_names = [t[0] for t in published_topics] | |||
| # if self.subscribe_topic_name not in topic_names: | |||
| # raise ValueError( | |||
| # f"Topic '{self.subscribe_topic_name}' does not exist. " | |||
| # "Please make sure the publisher is running." | |||
| # ) | |||
| # rospy.Subscriber(subscribe_topic_name, Float64MultiArray, self._action_callback) | |||
| # def _action_callback(self, msg): | |||
| # action_data = np.array(msg.data, dtype=np.float64) | |||
| # try: | |||
| # self.action_queue.put_nowait(action_data) # <--- 向 queue.Queue 中放入数据 | |||
| # except queue.Full: | |||
| # rospy.logwarn( | |||
| # f"Action queue is full for topic '{self.subscribe_topic_name}'. Discarding new action." | |||
| # ) | |||
| def _action_callback(self): | |||
| event = self.node.next(0.01) | |||
| if event is not None and event["type"] == "INPUT" and event["id"] == "action": | |||
| action_data = event["value"].to_pylist() | |||
| try: | |||
| self.action_queue.put_nowait(action_data) | |||
| except queue.Full: | |||
| pass | |||
| def forward(self): | |||
| self._action_callback() | |||
| self.sim_time += self.sim_dt | |||
| if self.sim_time >= self.control_dt: | |||
| self.sim_time -= self.control_dt | |||
| if not self.sync: | |||
| try: | |||
| next_action = self.action_queue.get_nowait() | |||
| except queue.Empty: # 如果队列为空 | |||
| return None | |||
| else: | |||
| next_action = self.action_queue.get(block=True) # 如果队列中没有数据则一直阻塞 | |||
| return next_action | |||
| # class DualArmROSController(ROSSubscriberController): | |||
| # def process_proprioception(self, observation): | |||
| # # 处理机器人的本体数据 | |||
| # ee_length = observation["ee_pose"].shape[0] // 2 | |||
| # joint_length = observation["joint_pos"].shape[0] // 2 | |||
| # states = {} | |||
| # for arm_side in ["left", "right"]: | |||
| # if arm_side == "left": | |||
| # s = slice(None, ee_length) | |||
| # s2 = slice(None, joint_length) | |||
| # else: | |||
| # s = slice(ee_length, None) | |||
| # s2 = slice(joint_length, None) | |||
| # ee_pose = observation["ee_pose"][s] | |||
| # ee_pose = Float64MultiArray(data=list(ee_pose)) | |||
| # joint_pos = observation["joint_pos"][s2] | |||
| # joint_pos = Float64MultiArray(data=list(joint_pos)) | |||
| # gripper_width = Float64(data=observation["joint_pos"][s2][-1]) | |||
| # arm_state = SingleArmState( | |||
| # ee_pose, | |||
| # joint_pos, | |||
| # gripper_width, | |||
| # ) | |||
| # states[arm_side] = arm_state | |||
| # return states | |||
| # def get_action_from_ros(self, states, rgb_data, reset): | |||
| # rospy.wait_for_service(self.ros_service_name) | |||
| # get_control_action = rospy.ServiceProxy(self.ros_service_name, get_action_bimanual) | |||
| # target_action = get_control_action(states=states, font_camera=rgb_data, reset=reset) | |||
| # return target_action | |||
| # def forward(self, observation): | |||
| # rgb_data = self.process_rgb_data(observation["env_sensors"]["camera_rgb_front"]) | |||
| # arm_states_dict = self.process_proprioception(observation["robot"]["states"]) | |||
| # left_arm_state = arm_states_dict["left"] | |||
| # right_arm_states = arm_states_dict["right"] | |||
| # arm_states = DualArmState(left_arm_state, right_arm_states) | |||
| # reset = Float64(data=observation["reset"]) | |||
| # target_action = self.get_action_from_ros(arm_states, rgb_data, reset) | |||
| # return np.array(target_action.actions.data) | |||
| @@ -0,0 +1,417 @@ | |||
| <?xml version="1.0" encoding="utf-8"?> | |||
| <!-- =================================================================================== --> | |||
| <!-- | This document was autogenerated by xacro from lula_franka.urdf.xacro | --> | |||
| <!-- | EDITING THIS FILE BY HAND IS NOT RECOMMENDED | --> | |||
| <!-- =================================================================================== --> | |||
| <robot name="franka" xmlns:xacro="http://www.ros.org/wiki/xacro"> | |||
| <material name="aluminum"> | |||
| <color rgba="0.5 0.5 0.5 1"/> | |||
| </material> | |||
| <!-- camera body, with origin at bottom screw mount --> | |||
| <joint name="camera_joint" type="fixed"> | |||
| <origin rpy="0 -1.57079632679 3.14159265359" xyz="0.0358116 0 0.0360562"/> | |||
| <parent link="panda_hand"/> | |||
| <child link="camera_bottom_screw_frame"/> | |||
| </joint> | |||
| <link name="camera_bottom_screw_frame"/> | |||
| <joint name="camera_link_joint" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0 0.02 0.0115"/> | |||
| <parent link="camera_bottom_screw_frame"/> | |||
| <child link="camera_link"/> | |||
| </joint> | |||
| <link name="camera_link"> | |||
| <visual> | |||
| <origin rpy="1.57079632679 0 1.57079632679" xyz="0.00987 -0.02 0"/> | |||
| <geometry> | |||
| <box size="0.099 0.023 0.02005"/> | |||
| <!--<mesh filename="package://realsense2_camera/meshes/d415.stl" />--> | |||
| </geometry> | |||
| <material name="aluminum"/> | |||
| </visual> | |||
| <collision> | |||
| <origin rpy="0 0 0" xyz="0 -0.02 0"/> | |||
| <geometry> | |||
| <box size="0.02005 0.099 0.023"/> | |||
| </geometry> | |||
| </collision> | |||
| <inertial> | |||
| <!-- The following are not reliable values, and should not be used for modeling --> | |||
| <mass value="0.564"/> | |||
| <origin xyz="0 0 0"/> | |||
| <inertia ixx="0.003881243" ixy="0.0" ixz="0.0" iyy="0.000498940" iyz="0.0" izz="0.003879257"/> | |||
| </inertial> | |||
| </link> | |||
| <!-- camera depth joints and links --> | |||
| <joint name="camera_depth_joint" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0 0 0"/> | |||
| <parent link="camera_link"/> | |||
| <child link="camera_depth_frame"/> | |||
| </joint> | |||
| <link name="camera_depth_frame"/> | |||
| <joint name="camera_depth_optical_joint" type="fixed"> | |||
| <origin rpy="-1.57079632679 0 -1.57079632679" xyz="0 0 0"/> | |||
| <parent link="camera_depth_frame"/> | |||
| <child link="camera_depth_optical_frame"/> | |||
| </joint> | |||
| <link name="camera_depth_optical_frame"/> | |||
| <!-- camera left IR joints and links --> | |||
| <joint name="camera_left_ir_joint" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0 0.0 0"/> | |||
| <parent link="camera_depth_frame"/> | |||
| <child link="camera_left_ir_frame"/> | |||
| </joint> | |||
| <link name="camera_left_ir_frame"/> | |||
| <joint name="camera_left_ir_optical_joint" type="fixed"> | |||
| <origin rpy="-1.57079632679 0 -1.57079632679" xyz="0 0 0"/> | |||
| <parent link="camera_left_ir_frame"/> | |||
| <child link="camera_left_ir_optical_frame"/> | |||
| </joint> | |||
| <link name="camera_left_ir_optical_frame"/> | |||
| <!-- camera right IR joints and links --> | |||
| <joint name="camera_right_ir_joint" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0 -0.055 0"/> | |||
| <parent link="camera_depth_frame"/> | |||
| <child link="camera_right_ir_frame"/> | |||
| </joint> | |||
| <link name="camera_right_ir_frame"/> | |||
| <joint name="camera_right_ir_optical_joint" type="fixed"> | |||
| <origin rpy="-1.57079632679 0 -1.57079632679" xyz="0 0 0"/> | |||
| <parent link="camera_right_ir_frame"/> | |||
| <child link="camera_right_ir_optical_frame"/> | |||
| </joint> | |||
| <link name="camera_right_ir_optical_frame"/> | |||
| <!-- camera color joints and links --> | |||
| <joint name="camera_color_joint" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0 0.015 0"/> | |||
| <parent link="camera_depth_frame"/> | |||
| <child link="camera_color_frame"/> | |||
| </joint> | |||
| <link name="camera_color_frame"/> | |||
| <joint name="camera_color_optical_joint" type="fixed"> | |||
| <origin rpy="-1.57079632679 0 -1.57079632679" xyz="0 0 0"/> | |||
| <parent link="camera_color_frame"/> | |||
| <child link="camera_color_optical_frame"/> | |||
| </joint> | |||
| <link name="camera_color_optical_frame"/> | |||
| <link name="panda_link0"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link0.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link0.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <link name="panda_link1"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link1.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link1.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint1" type="revolute"> | |||
| <origin rpy="0 0 0" xyz="0 0 0.333"/> | |||
| <parent link="panda_link0"/> | |||
| <child link="panda_link1"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="87" lower="-2.8973" upper="2.8973" velocity="2.1750"/> | |||
| </joint> | |||
| <link name="panda_link2"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link2.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link2.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint2" type="revolute"> | |||
| <safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-1.7628" soft_upper_limit="1.7628"/> | |||
| <origin rpy="-1.57079632679 0 0" xyz="0 0 0"/> | |||
| <parent link="panda_link1"/> | |||
| <child link="panda_link2"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="87" lower="-1.7628" upper="1.7628" velocity="2.1750"/> | |||
| </joint> | |||
| <link name="panda_link3"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link3.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link3.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint3" type="revolute"> | |||
| <safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/> | |||
| <origin rpy="1.57079632679 0 0" xyz="0 -0.316 0"/> | |||
| <parent link="panda_link2"/> | |||
| <child link="panda_link3"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="87" lower="-2.8973" upper="2.8973" velocity="2.1750"/> | |||
| </joint> | |||
| <link name="panda_link4"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link4.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link4.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint4" type="revolute"> | |||
| <safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-3.0718" soft_upper_limit="-0.0698"/> | |||
| <origin rpy="1.57079632679 0 0" xyz="0.0825 0 0"/> | |||
| <parent link="panda_link3"/> | |||
| <child link="panda_link4"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="87" lower="-3.0718" upper="-0.0698" velocity="2.1750"/> | |||
| </joint> | |||
| <link name="panda_link5"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link5.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link5.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint5" type="revolute"> | |||
| <safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/> | |||
| <origin rpy="-1.57079632679 0 0" xyz="-0.0825 0.384 0"/> | |||
| <parent link="panda_link4"/> | |||
| <child link="panda_link5"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="12" lower="-2.8973" upper="2.8973" velocity="2.6100"/> | |||
| </joint> | |||
| <link name="panda_link6"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link6.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link6.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint6" type="revolute"> | |||
| <safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-0.0175" soft_upper_limit="3.7525"/> | |||
| <origin rpy="1.57079632679 0 0" xyz="0 0 0"/> | |||
| <parent link="panda_link5"/> | |||
| <child link="panda_link6"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="12" lower="-0.0175" upper="3.7525" velocity="2.6100"/> | |||
| </joint> | |||
| <link name="panda_link7"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/link7.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/link7.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_joint7" type="revolute"> | |||
| <safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/> | |||
| <origin rpy="1.57079632679 0 0" xyz="0.088 0 0"/> | |||
| <parent link="panda_link6"/> | |||
| <child link="panda_link7"/> | |||
| <axis xyz="0 0 1"/> | |||
| <limit effort="12" lower="-2.8973" upper="2.8973" velocity="2.6100"/> | |||
| </joint> | |||
| <link name="panda_link8"/> | |||
| <joint name="panda_joint8" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0 0 0.107"/> | |||
| <parent link="panda_link7"/> | |||
| <child link="panda_link8"/> | |||
| <axis xyz="0 0 0"/> | |||
| </joint> | |||
| <joint name="panda_hand_joint" type="fixed"> | |||
| <parent link="panda_link8"/> | |||
| <child link="panda_hand"/> | |||
| <origin rpy="0 0 -0.785398163397" xyz="0 0 0"/> | |||
| </joint> | |||
| <link name="panda_hand"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/hand.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/hand.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <link name="panda_leftfinger"> | |||
| <visual> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/finger.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/finger.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <link name="panda_rightfinger"> | |||
| <visual> | |||
| <origin rpy="0 0 3.14159265359" xyz="0 0 0"/> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/visual/finger.dae"/> | |||
| </geometry> | |||
| </visual> | |||
| <collision> | |||
| <origin rpy="0 0 3.14159265359" xyz="0 0 0"/> | |||
| <geometry> | |||
| <mesh filename="package://franka_description/meshes/collision/finger.stl"/> | |||
| </geometry> | |||
| </collision> | |||
| </link> | |||
| <joint name="panda_finger_joint1" type="prismatic"> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_leftfinger"/> | |||
| <origin rpy="0 0 0" xyz="0 0 0.0584"/> | |||
| <axis xyz="0 1 0"/> | |||
| <limit effort="20" lower="0.0" upper="0.04" velocity="0.2"/> | |||
| </joint> | |||
| <joint name="panda_finger_joint2" type="prismatic"> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_rightfinger"/> | |||
| <origin rpy="0 0 0" xyz="0 0 0.0584"/> | |||
| <axis xyz="0 -1 0"/> | |||
| <limit effort="20" lower="0.0" upper="0.04" velocity="0.2"/> | |||
| <mimic joint="panda_finger_joint1"/> | |||
| </joint> | |||
| <link name="base_link"/> | |||
| <joint name="base_link_robot" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 0.0 0.0"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="base_link"/> | |||
| <child link="panda_link0"/> | |||
| </joint> | |||
| <link name="right_gripper"/> | |||
| <joint name="right_gripper" type="fixed"> | |||
| <origin rpy="0 0 2.35619449019" xyz="0.0 0 0.1"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_link8"/> | |||
| <child link="right_gripper"/> | |||
| </joint> | |||
| <link name="panda_rightfingertip"/> | |||
| <joint name="panda_rightfingertip" type="fixed"> | |||
| <origin rpy="0 0 3.14159265359" xyz="0.0 0 0.045"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_rightfinger"/> | |||
| <child link="panda_rightfingertip"/> | |||
| </joint> | |||
| <link name="panda_leftfingertip"/> | |||
| <joint name="panda_leftfingertip" type="fixed"> | |||
| <origin rpy="0 0 3.14159265359" xyz="0.0 0 0.045"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_leftfinger"/> | |||
| <child link="panda_leftfingertip"/> | |||
| </joint> | |||
| <link name="panda_forearm_end_pt"/> | |||
| <joint name="panda_forearm_end_pt" type="fixed"> | |||
| <origin rpy="0 0 0.0" xyz="-0.09 0.02 0.0"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_link4"/> | |||
| <child link="panda_forearm_end_pt"/> | |||
| </joint> | |||
| <link name="panda_forearm_mid_pt"/> | |||
| <joint name="panda_forearm_mid_pt" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 0.175 0.0"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_forearm_end_pt"/> | |||
| <child link="panda_forearm_mid_pt"/> | |||
| </joint> | |||
| <link name="panda_forearm_mid_pt_shifted"/> | |||
| <joint name="panda_forearm_mid_pt_shifted" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 0.0 -0.05"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_forearm_mid_pt"/> | |||
| <child link="panda_forearm_mid_pt_shifted"/> | |||
| </joint> | |||
| <link name="panda_forearm_distal"/> | |||
| <joint name="panda_forearm_distal" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 0.1 0.0"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_link5"/> | |||
| <child link="panda_forearm_distal"/> | |||
| </joint> | |||
| <link name="panda_wrist_end_pt"/> | |||
| <joint name="panda_wrist_end_pt" type="fixed"> | |||
| <origin rpy="0 0 0.0" xyz="0.0 0.0 -0.07"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_link7"/> | |||
| <child link="panda_wrist_end_pt"/> | |||
| </joint> | |||
| <link name="panda_face_left"/> | |||
| <joint name="panda_face_left" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 0.07 0.04"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_face_left"/> | |||
| </joint> | |||
| <link name="panda_face_right"/> | |||
| <joint name="panda_face_right" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 -0.07 0.04"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_face_right"/> | |||
| </joint> | |||
| <link name="panda_face_back_left"/> | |||
| <joint name="panda_face_back_left" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 0.07 0.0"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_face_back_left"/> | |||
| </joint> | |||
| <link name="panda_face_back_right"/> | |||
| <joint name="panda_face_back_right" type="fixed"> | |||
| <origin rpy="0 0 0" xyz="0.0 -0.07 0.0"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_face_back_right"/> | |||
| </joint> | |||
| <!--<link name="left_gripper"/> | |||
| <joint name="left_gripper" type="fixed"> | |||
| <origin rpy="0 0 ${pi/2}" xyz="0 0 0.13"/> | |||
| <axis xyz="0 0 1"/> | |||
| <parent link="gripper_l_base"/> | |||
| <child link="left_gripper"/> | |||
| </joint> | |||
| --> | |||
| <link name="panda_ee"/> | |||
| <joint name="end_effector" type="fixed"> | |||
| <parent link="panda_hand"/> | |||
| <child link="panda_ee"/> | |||
| <origin xyz="0 0 0.1018" rpy="0 0 0"/> <!-- 末端执行器相对于 panda_hand 的偏移 --> | |||
| </joint> | |||
| </robot> | |||
| @@ -0,0 +1,165 @@ | |||
| # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. | |||
| # | |||
| # NVIDIA CORPORATION and its licensors retain all intellectual property | |||
| # and proprietary rights in and to this software, related documentation | |||
| # and any modifications thereto. Any use, reproduction, disclosure or | |||
| # distribution of this software and related documentation without an express | |||
| # license agreement from NVIDIA CORPORATION is strictly prohibited. | |||
| # The robot descriptor defines the generalized coordinates and how to map those | |||
| # to the underlying URDF dofs. | |||
| api_version: 1.0 | |||
| # Defines the generalized coordinates. Each generalized coordinate is assumed | |||
| # to have an entry in the URDF, except when otherwise specified below under | |||
| # cspace_urdf_bridge | |||
| cspace: | |||
| - panda_joint1 | |||
| - panda_joint2 | |||
| - panda_joint3 | |||
| - panda_joint4 | |||
| - panda_joint5 | |||
| - panda_joint6 | |||
| - panda_joint7 | |||
| root_link: base_link | |||
| default_q: [ | |||
| # Original version | |||
| # 0.00, 0.00, 0.00, -1.57, 0.00, 1.50, 0.75 | |||
| # New config | |||
| 0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75 | |||
| ] | |||
| acceleration_limits: [15.0, 7.5, 10.0, 12.5, 15.0, 20.0, 20.0] | |||
| jerk_limits: [7500.0, 3750.0, 5000.0, 6250.0, 7500.0, 10000.0, 10000.0] | |||
| # Most dimensions of the cspace have a direct corresponding element | |||
| # in the URDF. This list of rules defines how unspecified coordinates | |||
| # should be extracted. | |||
| # cspace_to_urdf_rules: | |||
| # - {name: panda_finger_joint1, rule: fixed, value: 0.04} | |||
| # - {name: panda_finger_joint2, rule: fixed, value: 0.04} | |||
| collision_spheres: | |||
| - panda_link0: | |||
| - "center": [0.0, 0.0, 0.05] | |||
| "radius": 0.045 | |||
| - panda_link1: | |||
| - "center": [0.0, -0.08, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.0, -0.03, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.0, 0.0, -0.12] | |||
| "radius": 0.06 | |||
| - "center": [0.0, 0.0, -0.17] | |||
| "radius": 0.06 | |||
| - panda_link2: | |||
| - "center": [0.0, 0.0, 0.03] | |||
| "radius": 0.06 | |||
| - "center": [0.0, 0.0, 0.08] | |||
| "radius": 0.06 | |||
| - "center": [0.0, -0.12, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.0, -0.17, 0.0] | |||
| "radius": 0.06 | |||
| - panda_link3: | |||
| - "center": [0.0, 0.0, -0.06] | |||
| "radius": 0.05 | |||
| - "center": [0.0, 0.0, -0.1] | |||
| "radius": 0.06 | |||
| - "center": [0.08, 0.06, 0.0] | |||
| "radius": 0.055 | |||
| - "center": [0.08, 0.02, 0.0] | |||
| "radius": 0.055 | |||
| - panda_link4: | |||
| - "center": [0.0, 0.0, 0.02] | |||
| "radius": 0.055 | |||
| - "center": [0.0, 0.0, 0.06] | |||
| "radius": 0.055 | |||
| - "center": [-0.08, 0.095, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [-0.08, 0.06, 0.0] | |||
| "radius": 0.055 | |||
| - panda_link5: | |||
| - "center": [0.0, 0.055, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.0, 0.075, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.0, 0.000, -0.22] | |||
| "radius": 0.06 | |||
| - "center": [0.0, 0.05, -0.18] | |||
| "radius": 0.05 | |||
| - "center": [0.01, 0.08, -0.14] | |||
| "radius": 0.025 | |||
| - "center": [0.01, 0.085, -0.11] | |||
| "radius": 0.025 | |||
| - "center": [0.01, 0.09, -0.08] | |||
| "radius": 0.025 | |||
| - "center": [0.01, 0.095, -0.05] | |||
| "radius": 0.025 | |||
| - "center": [-0.01, 0.08, -0.14] | |||
| "radius": 0.025 | |||
| - "center": [-0.01, 0.085, -0.11] | |||
| "radius": 0.025 | |||
| - "center": [-0.01, 0.09, -0.08] | |||
| "radius": 0.025 | |||
| - "center": [-0.01, 0.095, -0.05] | |||
| "radius": 0.025 | |||
| - panda_link6: | |||
| - "center": [0.0, 0.0, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.08, 0.03, 0.0] | |||
| "radius": 0.06 | |||
| - "center": [0.08, -0.01, 0.0] | |||
| "radius": 0.06 | |||
| - panda_link7: | |||
| - "center": [0.0, 0.0, 0.07] | |||
| "radius": 0.05 | |||
| - "center": [0.02, 0.04, 0.08] | |||
| "radius": 0.025 | |||
| - "center": [0.04, 0.02, 0.08] | |||
| "radius": 0.025 | |||
| - "center": [0.04, 0.06, 0.085] | |||
| "radius": 0.02 | |||
| - "center": [0.06, 0.04, 0.085] | |||
| "radius": 0.02 | |||
| - panda_hand: | |||
| - "center": [0.0, -0.075, 0.01] | |||
| "radius": 0.028 | |||
| - "center": [0.0, -0.045, 0.01] | |||
| "radius": 0.028 | |||
| - "center": [0.0, -0.015, 0.01] | |||
| "radius": 0.028 | |||
| - "center": [0.0, 0.015, 0.01] | |||
| "radius": 0.028 | |||
| - "center": [0.0, 0.045, 0.01] | |||
| "radius": 0.028 | |||
| - "center": [0.0, 0.075, 0.01] | |||
| "radius": 0.028 | |||
| - "center": [0.0, -0.075, 0.03] | |||
| "radius": 0.026 | |||
| - "center": [0.0, -0.045, 0.03] | |||
| "radius": 0.026 | |||
| - "center": [0.0, -0.015, 0.03] | |||
| "radius": 0.026 | |||
| - "center": [0.0, 0.015, 0.03] | |||
| "radius": 0.026 | |||
| - "center": [0.0, 0.045, 0.03] | |||
| "radius": 0.026 | |||
| - "center": [0.0, 0.075, 0.03] | |||
| "radius": 0.026 | |||
| - "center": [0.0, -0.075, 0.05] | |||
| "radius": 0.024 | |||
| - "center": [0.0, -0.045, 0.05] | |||
| "radius": 0.024 | |||
| - "center": [0.0, -0.015, 0.05] | |||
| "radius": 0.024 | |||
| - "center": [0.0, 0.015, 0.05] | |||
| "radius": 0.024 | |||
| - "center": [0.0, 0.045, 0.05] | |||
| "radius": 0.024 | |||
| - "center": [0.0, 0.075, 0.05] | |||
| "radius": 0.024 | |||
| @@ -0,0 +1,165 @@ | |||
| import os | |||
| from PIL import Image | |||
| class BaseDataCollect: | |||
| def __init__( | |||
| self, | |||
| scenary, | |||
| robot, | |||
| sensors={}, | |||
| save_dir="./output_data", | |||
| save_trajectory=True, | |||
| *args, | |||
| **kwargs, | |||
| ): | |||
| self.scenary = scenary | |||
| self.robot = robot | |||
| self.sensors = sensors | |||
| self.save_trajectory = save_trajectory | |||
| self.img_count = 0 | |||
| self.save_dir = self.get_next_episode_dir(save_dir) | |||
| os.makedirs(self.save_dir, exist_ok=True) | |||
| self.reset_needed = False # set to true if you restart the simulator | |||
| self.done_flag = False | |||
| # build task | |||
| self.build() | |||
| self.collected_data = [] | |||
| def build(self): | |||
| # 1. 加载场景 | |||
| self.scenary.load_stage() | |||
| # 2. 创建机器人 | |||
| self.robot.spawn(self.scenary.world) | |||
| # 3. 创建传感器 | |||
| for _, sensor in self.sensors.items(): | |||
| sensor.spawn(self.scenary.world) | |||
| # 5. 重置环境 | |||
| self.reset() | |||
| def get_raw_data(self): | |||
| data = {"sensors": {}, "robot": {}, "action": {}} | |||
| # 获取传感器数据 | |||
| for name, sensor in self.sensors.items(): | |||
| data["sensors"][name] = sensor.get_data() | |||
| # 获取机器人本体数据 | |||
| robot_states = self.robot.get_states() | |||
| data["robot"]["states"] = robot_states | |||
| return data | |||
| def get_next_episode_dir(self, base_dir): | |||
| """在 `base_dir` 下创建一个新的 `epo_xx` 目录,编号递增""" | |||
| os.makedirs(base_dir, exist_ok=True) # 确保基础目录存在 | |||
| existing_dirs = [ | |||
| d for d in os.listdir(base_dir) if d.startswith("epo_") and d[4:].isdigit() | |||
| ] | |||
| existing_dirs.sort(key=lambda x: int(x[4:])) # 按编号排序 | |||
| if existing_dirs: | |||
| last_index = int(existing_dirs[-1][4:]) # 获取最后一个编号 | |||
| new_index = last_index + 1 | |||
| else: | |||
| new_index = 0 # 第一次运行,从 0 开始 | |||
| new_episode_dir = os.path.join(base_dir, f"epo_{new_index}") | |||
| os.makedirs(new_episode_dir) # 创建新目录 | |||
| return new_episode_dir | |||
| def reset(self): | |||
| self.scenary.reset() | |||
| self.robot.reset() | |||
| for _, sensor in self.sensors.items(): | |||
| sensor.reset() | |||
| def save_data(self, *args, **kwargs): | |||
| pass | |||
| def step(self, reset): | |||
| # 获取传感器数据 | |||
| data = self.get_raw_data() | |||
| # 获取动作 | |||
| next_action = self.robot.compute_action() | |||
| if self.save_trajectory: | |||
| # 直接存储专家轨迹作为 action | |||
| trajectory = self.robot.get_trajectory() | |||
| data["action"]["ee_pose"] = trajectory | |||
| data["action"]["joint_pos"] = next_action | |||
| self.collected_data.append(data) | |||
| if self.robot.controller.is_done(): | |||
| print("done picking and placing") | |||
| self.save_data( | |||
| self.save_dir, | |||
| self.collected_data, | |||
| ) | |||
| self.done_flag = True | |||
| # 控制机器人 | |||
| self.robot.apply_action(next_action) | |||
| # 保存采集的图像 | |||
| for name, sensor_data in data["sensors"].items(): | |||
| if "camera" in name or "img" in name or "image" in name: | |||
| self.save_img(sensor_data, name) | |||
| self.img_count += 1 | |||
| def run(self, simulation_app): | |||
| world = self.scenary.world | |||
| warm_up = 100 | |||
| i = 0 | |||
| step_count = 0 | |||
| while simulation_app.is_running(): | |||
| # 推进仿真并渲染 | |||
| self.scenary.step(render=True) | |||
| if world.is_stopped() and not self.reset_needed: | |||
| self.reset_needed = True | |||
| if i < warm_up: | |||
| i += 1 | |||
| continue | |||
| if world.is_playing(): | |||
| reset = self.reset_needed | |||
| if self.reset_needed: | |||
| self.reset() | |||
| self.replay_count = 0 | |||
| i = 0 | |||
| self.reset_needed = False | |||
| self.step(reset) | |||
| step_count += 1 | |||
| i = i + 1 | |||
| if self.done_flag: | |||
| break | |||
| simulation_app.close() | |||
| def save_img(self, rgb_data, sensor_name="camera"): | |||
| output_dir = self.save_dir | |||
| if not os.path.exists(output_dir): | |||
| os.makedirs(output_dir) | |||
| camera_dir = os.path.join(output_dir, "camera") | |||
| if not os.path.exists(camera_dir): | |||
| os.makedirs(camera_dir) | |||
| img = Image.fromarray(rgb_data) # 展平 | |||
| file_path = os.path.join(camera_dir, f"{sensor_name}_{self.img_count}.png") | |||
| if os.path.exists(file_path): | |||
| print(f"Frame {file_path} already recorded. Skipping save.") | |||
| else: | |||
| img.save(file_path) | |||
| @@ -0,0 +1,164 @@ | |||
| import os | |||
| import numpy as np | |||
| from isaacsim.core.utils.rotations import euler_angles_to_quat | |||
| from .base import BaseDataCollect | |||
| class StackCubeDataCollect(BaseDataCollect): | |||
| def __init__(self, *args, **kwargs): | |||
| super().__init__(*args, **kwargs) | |||
| # 设定随机范围 | |||
| x_range = (0.5, 0.6) # X 轴范围 | |||
| x_range_shifted = (x_range[0] + 0.05, x_range[1] + 0.05) | |||
| y_range = (-0.15, 0.15) # Y 轴范围 | |||
| # 随机 yaw 角度(单位:弧度) | |||
| yaw = np.random.uniform(-np.pi / 4, np.pi / 4) | |||
| # 欧拉角(roll, pitch, yaw) -> 四元数 [w, x, y, z] | |||
| quat = euler_angles_to_quat([0, 0, yaw]) # 只绕 Z 轴旋转 | |||
| pos_red = np.array([np.random.uniform(*x_range), np.random.uniform(*y_range), 0.12]) | |||
| pos_green = np.array( | |||
| [np.random.uniform(*x_range_shifted), np.random.uniform(*y_range), 0.12] | |||
| ) | |||
| while np.linalg.norm(pos_red - pos_green) < 0.1: | |||
| pos_green = np.array([np.random.uniform(*x_range), np.random.uniform(*y_range), 0.12]) | |||
| self.cube_red_cfg = { | |||
| "name": "cube", | |||
| "position": pos_red, | |||
| "orientation": quat, | |||
| "prim_path": "/World/Cube", | |||
| "scale": np.array([0.05, 0.05, 0.05]), | |||
| "size": 1.0, | |||
| "color": np.array([1, 0, 0]), | |||
| } | |||
| self.cube_green_cfg = { | |||
| "name": "cube2", | |||
| "position": pos_green, | |||
| "orientation": quat, | |||
| "prim_path": "/World/Cube2", | |||
| "scale": np.array([0.05, 0.05, 0.05]), | |||
| "size": 1.0, | |||
| "color": np.array([0, 0, 1]), | |||
| } | |||
| states = self.robot.get_ee_pose() | |||
| init_pos = states[:3] | |||
| init_orientation = states[3:7] | |||
| gripper_quat = np.array( | |||
| [ | |||
| -quat[1], | |||
| quat[0], | |||
| quat[3], | |||
| -quat[2], | |||
| ] | |||
| ) | |||
| trajectory = [ | |||
| {"t": 0, "xyz": init_pos, "quat": init_orientation, "gripper": 1.0}, # Start | |||
| { | |||
| "t": 40, | |||
| "xyz": [pos_red[0], pos_red[1], pos_red[2] + 0.20], | |||
| "quat": init_orientation, | |||
| "gripper": 1.0, | |||
| }, # 靠近 | |||
| { | |||
| "t": 60, | |||
| "xyz": [pos_red[0], pos_red[1], pos_red[2] - 0.02], | |||
| "quat": gripper_quat, | |||
| "gripper": 1.0, | |||
| }, # 下沉 | |||
| { | |||
| "t": 68, | |||
| "xyz": [pos_red[0], pos_red[1], pos_red[2] - 0.02], | |||
| "quat": gripper_quat, | |||
| "gripper": 0.1, | |||
| }, # 抓取 | |||
| { | |||
| "t": 93, | |||
| "xyz": [pos_green[0], pos_green[1], pos_green[2] + 0.20], | |||
| "quat": gripper_quat, | |||
| "gripper": 0.1, | |||
| }, # 移动 | |||
| { | |||
| "t": 112, | |||
| "xyz": [pos_green[0], pos_green[1], pos_green[2] + 0.10], | |||
| "quat": gripper_quat, | |||
| "gripper": 0.1, | |||
| }, # 下沉 | |||
| { | |||
| "t": 125, | |||
| "xyz": [pos_green[0], pos_green[1], pos_green[2] + 0.10], | |||
| "quat": gripper_quat, | |||
| "gripper": 1.0, | |||
| }, # 释放 | |||
| ] | |||
| self.scenary.add_cube(self.cube_red_cfg) | |||
| self.scenary.add_cube(self.cube_green_cfg) | |||
| self.robot.set_trajectory(trajectory) | |||
| self.reset() | |||
| def save_data(self, output_dir, data_list, cube_pose1=None, cube_pose2=None): | |||
| cube_pose1 = np.concatenate( | |||
| [self.cube_red_cfg["position"], self.cube_red_cfg["orientation"]] | |||
| ) | |||
| cube_pose2 = np.concatenate( | |||
| [self.cube_green_cfg["position"], self.cube_green_cfg["orientation"]] | |||
| ) | |||
| # 创建目录结构 | |||
| meta_dir = os.path.join(output_dir, "meta") | |||
| data_dir = os.path.join(output_dir, "data") | |||
| os.makedirs(meta_dir, exist_ok=True) | |||
| os.makedirs(data_dir, exist_ok=True) | |||
| ee_pose_list = [] | |||
| joint_pos_list = [] | |||
| gripper_list = [] | |||
| action_ee_pose_list = [] | |||
| action_joint_pose_list = [] | |||
| action_gripper_list = [] | |||
| for t in range(len(data_list)): | |||
| ee_pose = data_list[t]["robot"]["states"]["ee_pose"] | |||
| joint_pos = data_list[t]["robot"]["states"]["joint_pos"] | |||
| action_ee_pose = data_list[t]["action"]["ee_pose"] | |||
| action_joint_pose = data_list[t]["action"]["joint_pos"] | |||
| ee_pose_list.append(ee_pose) | |||
| joint_pos_list.append(joint_pos) | |||
| gripper_list.append(joint_pos[-1]) | |||
| action_ee_pose_list.append(action_ee_pose) | |||
| action_joint_pose_list.append(action_joint_pose) | |||
| action_gripper_list.append(action_joint_pose[-1]) | |||
| # 存储机器人初始关节pos数据 | |||
| np.savetxt(os.path.join(data_dir, "ee_pose.txt"), ee_pose_list, fmt="%.6f") | |||
| np.savetxt(os.path.join(data_dir, "joint_pos.txt"), joint_pos_list, fmt="%.6f") | |||
| np.savetxt(os.path.join(data_dir, "gripper_width.txt"), gripper_list, fmt="%.6f") | |||
| # 存储动作 | |||
| np.savetxt(os.path.join(data_dir, "action_ee_pose.txt"), action_ee_pose_list, fmt="%.6f") | |||
| np.savetxt( | |||
| os.path.join(data_dir, "action_joint_pos.txt"), action_joint_pose_list, fmt="%.6f" | |||
| ) | |||
| np.savetxt( | |||
| os.path.join(data_dir, "action_gripper_width.txt"), action_gripper_list, fmt="%.6f" | |||
| ) | |||
| if cube_pose1 is not None: | |||
| np.savetxt(os.path.join(data_dir, "cube_red"), cube_pose1.reshape(1, -1), fmt="%.6f") | |||
| if cube_pose2 is not None: | |||
| np.savetxt(os.path.join(data_dir, "cube_blue"), cube_pose2.reshape(1, -1), fmt="%.6f") | |||
| print(f"Data saved successfully in {output_dir}") | |||
| @@ -0,0 +1,188 @@ | |||
| import os | |||
| import numpy as np | |||
| from isaacsim.core.utils.types import ArticulationAction | |||
| from isaacsim.robot_motion.motion_generation import ( | |||
| ArticulationKinematicsSolver, | |||
| LulaKinematicsSolver, | |||
| ) | |||
| from omni.isaac.core.prims import RigidPrim, XFormPrim | |||
| from omni.isaac.core.robots import Robot | |||
| class BaseRobot: | |||
| def __init__( | |||
| self, | |||
| robot_prim_path, | |||
| name="franka", | |||
| joints_name=[], | |||
| ee_prim_path=None, | |||
| gripper_joint_name=None, | |||
| controller=None, | |||
| control_mode="joint", | |||
| kinematics_solver_cfg=dict(), | |||
| ): | |||
| self.robot_prim_path = robot_prim_path | |||
| self.robot = None | |||
| self.name = name | |||
| self.joints_name = joints_name | |||
| self.joint_indices = [] | |||
| self.ee_prim_path = ee_prim_path # usd 中 ee 的 prim 路径 | |||
| self.init_ee_pose = None # 初始时刻的 ee 位姿 | |||
| self.init_joint_pos = None # 初始时刻的关节角f度 | |||
| self.gripper_joint_name = gripper_joint_name | |||
| self.controller = controller | |||
| self.control_mode = control_mode | |||
| self.kinematics_solver_cfg = kinematics_solver_cfg | |||
| def spawn(self, world): | |||
| self.robot = world.scene.add(Robot(prim_path=self.robot_prim_path, name=self.name)) | |||
| if self.controller is not None: | |||
| self.controller.spawn(self) | |||
| if self.kinematics_solver_cfg: | |||
| self.kinematics_solver, self.articulation_kinematics_solver = self.init_IK_controller( | |||
| self.robot, | |||
| self.kinematics_solver_cfg["end_effector_name"], | |||
| self.kinematics_solver_cfg["kinematics_config_dir"], | |||
| self.kinematics_solver_cfg["robot_description_path"], | |||
| self.kinematics_solver_cfg["urdf_path"], | |||
| ) | |||
| def init_IK_controller( | |||
| self, | |||
| articulation, | |||
| end_effector_name, | |||
| kinematics_config_dir, | |||
| robot_description_path, | |||
| urdf_path, | |||
| ): | |||
| # 获取 IK Controller | |||
| kinematics_solver = LulaKinematicsSolver( | |||
| robot_description_path=os.path.join(kinematics_config_dir, robot_description_path), | |||
| urdf_path=os.path.join(kinematics_config_dir, urdf_path), | |||
| ) | |||
| articulation_kinematics_solver = ArticulationKinematicsSolver( | |||
| articulation, kinematics_solver, end_effector_name | |||
| ) | |||
| return kinematics_solver, articulation_kinematics_solver | |||
| def compute_IK( | |||
| self, | |||
| target_position, | |||
| target_orientation=None, | |||
| position_tolerance=None, | |||
| orientation_tolerance=None, | |||
| frame="world", | |||
| ): | |||
| """ | |||
| Compute inverse kinematics for the end effector frame using the current robot position as a warm start. The result is returned | |||
| in an articulation action that can be directly applied to the robot. | |||
| Args: | |||
| target_position (np.array): target translation of the target frame (in stage units) relative to the USD stage origin | |||
| target_orientation (np.array): target orientation of the target frame relative to the USD stage global frame. Defaults to None. | |||
| position_tolerance (float): l-2 norm of acceptable position error (in stage units) between the target and achieved translations. Defaults to None. | |||
| orientation tolerance (float): magnitude of rotation (in radians) separating the target orientation from the achieved orientation. | |||
| orientation_tolerance is well defined for values between 0 and pi. Defaults to None. | |||
| Returns: | |||
| Tuple[ArticulationAction, bool]: | |||
| ik_result: An ArticulationAction that can be applied to the robot to move the end effector frame to the desired position. | |||
| success: Solver converged successfully | |||
| """ | |||
| if frame == "world": | |||
| # set the robot base pose respect to the world frame | |||
| robot_base_translation, robot_base_orientation = self.robot.get_world_pose() | |||
| self.kinematics_solver.set_robot_base_pose( | |||
| robot_base_translation, robot_base_orientation | |||
| ) | |||
| action, success = self.articulation_kinematics_solver.compute_inverse_kinematics( | |||
| target_position, target_orientation, position_tolerance, orientation_tolerance | |||
| ) | |||
| if not success: | |||
| print("IK did not converge to a solution. No action is being taken") | |||
| return action.joint_positions, success | |||
| def reset(self): | |||
| self.gripper_index = self.robot.get_dof_index(self.gripper_joint_name) | |||
| if len(self.joints_name) > 0: | |||
| self.joint_indices = [ | |||
| self.robot.get_dof_index(joint_name) for joint_name in self.joints_name | |||
| ] | |||
| if self.controller is not None: | |||
| self.controller.reset() | |||
| def apply_action(self, joint_positions, joint_indices=None): | |||
| if joint_indices is None: | |||
| joint_indices = self.joint_indices | |||
| target_joint_action = ArticulationAction( | |||
| joint_positions=joint_positions, joint_indices=joint_indices | |||
| ) | |||
| articulation_controller = self.robot.get_articulation_controller() | |||
| articulation_controller.apply_action(target_joint_action) | |||
| def apply_gripper_width(self, gripper_width): | |||
| gripper_index = self.gripper_index | |||
| self.apply_action(joint_positions=[gripper_width], joint_indices=[gripper_index]) | |||
| def get_joint_position(self, joint_indices=None): | |||
| if joint_indices is None: | |||
| joint_indices = self.joint_indices | |||
| joint_pos = [self.robot.get_joint_positions(joint_idx) for joint_idx in joint_indices] | |||
| return np.column_stack(joint_pos)[0] | |||
| def get_ee_pose(self): | |||
| # 获取 panda_hand 的 Prim | |||
| end_effector_path = self.ee_prim_path | |||
| try: | |||
| hand_prim = XFormPrim(end_effector_path) | |||
| # 获取全局坐标系下的位姿 | |||
| state = hand_prim.get_default_state() | |||
| return np.concatenate([state.position, state.orientation], axis=-1) | |||
| except Exception: | |||
| hand_prim = RigidPrim(end_effector_path) | |||
| # 获取全局坐标系下的位姿 | |||
| return hand_prim.get_world_pose() | |||
| def get_gripper_width(self): | |||
| gripper_width = self.robot.get_joint_positions(self.gripper_index) | |||
| return gripper_width | |||
| def get_states(self): | |||
| joint_pos = self.get_joint_position() | |||
| ee_pose = self.get_ee_pose() | |||
| gripper_width = self.get_gripper_width() | |||
| states = { | |||
| "joint_pos": joint_pos, | |||
| "ee_pose": ee_pose, | |||
| "gripper_width": gripper_width, | |||
| } | |||
| return states | |||
| def compute_action(self): | |||
| action = self.controller.forward() | |||
| if self.control_mode == "end_effector_control": | |||
| if action is not None: | |||
| next_action, success = self.compute_IK( | |||
| target_position=action[:3], target_orientation=action[3:7] | |||
| ) | |||
| return np.append(next_action, action[-1]) | |||
| return action | |||
| def set_trajectory(self, trajectory): | |||
| self.controller.set_trajectory(trajectory) | |||
| def get_trajectory(self): | |||
| return self.controller.get_trajectory() | |||
| @@ -0,0 +1,30 @@ | |||
| import os | |||
| import omni.usd | |||
| from omni.isaac.core import World | |||
| from omni.isaac.core.utils.stage import add_reference_to_stage | |||
| class BaseScenary: | |||
| def __init__(self, usd_file_path, sim_freq=60): | |||
| self.usd_file_path = os.path.join(os.getcwd(), usd_file_path) | |||
| self.world = None | |||
| self.stage = None | |||
| self.loaded = False | |||
| self.sim_freq = sim_freq | |||
| def load_stage(self): | |||
| if self.loaded: | |||
| return | |||
| if not os.path.isfile(self.usd_file_path): | |||
| raise FileNotFoundError(f"USD file not found: {self.usd_file_path}") | |||
| self.world = World(physics_dt=1 / self.sim_freq) | |||
| add_reference_to_stage(self.usd_file_path, "/World") | |||
| self.stage = omni.usd.get_context().get_stage() | |||
| self.loaded = True | |||
| def step(self, render=True): | |||
| self.world.step(render=render) | |||
| def reset(self): | |||
| self.world.reset() | |||
| @@ -0,0 +1,69 @@ | |||
| import numpy as np | |||
| from isaacsim.core.utils.rotations import euler_angles_to_quat | |||
| from omni.isaac.core.objects import DynamicCuboid | |||
| from src.utils import to_numpy_recursive | |||
| from .base import BaseScenary | |||
| class StackCubeScenary(BaseScenary): | |||
| def __init__(self, src_cube_cfg=None, target_cube_cfg=None, random=False, *args, **kwargs): | |||
| super().__init__(*args, **kwargs) | |||
| if random: | |||
| src_cube_cfg, target_cube_cfg = self.random_gen() | |||
| if src_cube_cfg is not None: | |||
| self.source_cube = self.add_cube(src_cube_cfg) | |||
| if target_cube_cfg is not None: | |||
| self.target_cube = self.add_cube(target_cube_cfg) | |||
| def add_cube(self, cube_cfg): | |||
| self.load_stage() | |||
| cube = self.world.scene.add(DynamicCuboid(**to_numpy_recursive(cube_cfg))) | |||
| return cube | |||
| def random_gen(self): | |||
| # 随机生成位置 | |||
| # 设定随机范围 | |||
| # 设定随机范围 | |||
| x_range = (0.5, 0.6) # X 轴范围 | |||
| x_range_shifted = (x_range[0] + 0.05, x_range[1] + 0.05) | |||
| y_range = (-0.15, 0.15) # Y 轴范围 | |||
| # 随机 yaw 角度(单位:弧度) | |||
| yaw = np.random.uniform(-np.pi / 4, np.pi / 4) | |||
| # 欧拉角(roll, pitch, yaw) -> 四元数 [w, x, y, z] | |||
| quat = euler_angles_to_quat([0, 0, yaw]) # 只绕 Z 轴旋转 | |||
| pos_red = np.array([np.random.uniform(*x_range), np.random.uniform(*y_range), 0.12]) | |||
| pos_green = np.array( | |||
| [np.random.uniform(*x_range_shifted), np.random.uniform(*y_range), 0.12] | |||
| ) | |||
| # 保证两个物体不重叠 | |||
| while np.linalg.norm(pos_red - pos_green) < 0.1: | |||
| pos_green = np.array([np.random.uniform(*x_range), np.random.uniform(*y_range), 0.12]) | |||
| cube_red_cfg = { | |||
| "name": "cube", | |||
| "position": pos_red, | |||
| "orientation": quat, | |||
| "prim_path": "/World/Cube", | |||
| "scale": np.array([0.05, 0.05, 0.05]), | |||
| "size": 1.0, | |||
| "color": np.array([1, 0, 0]), | |||
| } | |||
| cube_green_cfg = { | |||
| "name": "cube2", | |||
| "position": pos_green, | |||
| "orientation": quat, | |||
| "prim_path": "/World/Cube2", | |||
| "scale": np.array([0.05, 0.05, 0.05]), | |||
| "size": 1.0, | |||
| "color": np.array([0, 0, 1]), | |||
| } | |||
| return cube_red_cfg, cube_green_cfg | |||
| @@ -0,0 +1,12 @@ | |||
| class BaseSensor: | |||
| def __init__(self): | |||
| pass | |||
| def reset(self): | |||
| pass | |||
| def spawn(self): | |||
| pass | |||
| def get_data(self): | |||
| pass | |||
| @@ -0,0 +1,56 @@ | |||
| # import rospy | |||
| from omni.isaac.sensor import Camera as IsaacCamera | |||
| # from sensor_msgs.msg import Image | |||
| # from std_msgs.msg import Header | |||
| import pyarrow as pa | |||
| # from .base import BaseSensor | |||
| class Camera(IsaacCamera): | |||
| def __init__( | |||
| self, camera_prim_path, sim_freq, sensor_freq, resolution, queue_size=10, *args, **kwargs | |||
| ): | |||
| super().__init__( | |||
| prim_path=camera_prim_path, # 使用 USD 中的路径,不重新定义 | |||
| frequency=sensor_freq, | |||
| resolution=(resolution[0], resolution[1]), | |||
| *args, | |||
| **kwargs, | |||
| ) | |||
| self.sim_dt = 1 / sim_freq | |||
| self.sensor_dt = 1 / sensor_freq | |||
| self.sim_time = 0 | |||
| def reset(self): | |||
| self.sim_time = 0 | |||
| def spawn(self, *args): | |||
| self.initialize() | |||
| def get_data(self): | |||
| rgb_data = self.get_rgb() # 获取 RGB 数据 | |||
| return rgb_data | |||
| def send_dora_message(self, node, seq=None, time_stamp=None, frame_id="sim_camera", encoding="rgb8"): | |||
| self.sim_time += self.sim_dt | |||
| if self.sim_time >= self.sensor_dt: | |||
| self.sim_time -= self.sensor_dt | |||
| image_np = self.get_data() | |||
| if len(image_np.shape) == 3: | |||
| height = image_np.shape[0] | |||
| width = image_np.shape[1] | |||
| num_channels = image_np.shape[2] | |||
| elif len(image_np.shape) == 2: # Grayscale | |||
| height = image_np.shape[0] | |||
| width = image_np.shape[1] | |||
| num_channels = 1 | |||
| else: | |||
| raise ValueError("Image NumPy array must be 2D (grayscale) or 3D (color).") | |||
| node.send_output( | |||
| output_id="camera", | |||
| data=pa.array(image_np.flatten()), | |||
| metadata={"w": width, "h": height, "seq": seq, "stamp": time_stamp, "frame_id": frame_id, "encoding": encoding, "is_bigendian": 0, "step": width*num_channels*image_np.itemsize} | |||
| ) | |||
| @@ -0,0 +1,56 @@ | |||
| import numpy as np | |||
| import rospy | |||
| from geometry_msgs.msg import PoseStamped | |||
| from omni.isaac.core.prims import XFormPrim | |||
| import pyarrow as pa | |||
| from .base import BaseSensor | |||
| class EEPoseSensor(BaseSensor): | |||
| def __init__(self, robot_name, ee_prim_path, queue_size=10, *args, **kwargs): | |||
| super().__init__(*args, **kwargs) | |||
| self.robot_name = robot_name | |||
| self.ee_prim_path = ee_prim_path | |||
| # rospy.init_node(name, anonymous=True) | |||
| topic_name = "/" + self.robot_name + "_ee_pose" # 拼接 topic 名 | |||
| self.ee_pose_pub = rospy.Publisher(topic_name, PoseStamped, queue_size=queue_size) | |||
| def reset(self): | |||
| pass | |||
| def spawn(self, world): | |||
| pass | |||
| def get_data(self): | |||
| ee_prim = XFormPrim(self.ee_prim_path) | |||
| state = ee_prim.get_default_state() | |||
| return np.concatenate([state.position, state.orientation], axis=-1) | |||
| def send_ros_message(self, seq=None, time_stamp=None): | |||
| ee_pose = self.get_data() # 假设 ee_pose 是一个包含 position 和 orientation 的对象/dict | |||
| msg = PoseStamped() | |||
| msg.header.seq = seq | |||
| msg.header.stamp = rospy.Time.from_sec(time_stamp) | |||
| msg.header.frame_id = "world" # 可根据需要设置坐标系 | |||
| # 设定位置 | |||
| msg.pose.position.x = ee_pose[0] | |||
| msg.pose.position.y = ee_pose[1] | |||
| msg.pose.position.z = ee_pose[2] | |||
| # 设定姿态(wxyz四元数) | |||
| msg.pose.orientation.w = ee_pose[3] | |||
| msg.pose.orientation.x = ee_pose[4] | |||
| msg.pose.orientation.y = ee_pose[5] | |||
| msg.pose.orientation.z = ee_pose[6] | |||
| self.ee_pose_pub.publish(msg) | |||
| def send_dora_message(self, node, seq=None, time_stamp=None): | |||
| ee_pose = self.get_data() | |||
| node.send_output( | |||
| output_id="ee_pose", | |||
| data=pa.array(ee_pose), | |||
| metadata={"seq":seq, "stamp": time_stamp, "frame_id": "world"} | |||
| ) | |||
| @@ -0,0 +1,31 @@ | |||
| # import rospy | |||
| import pyarrow as pa | |||
| from .base import BaseSensor | |||
| class JointSensor(BaseSensor): | |||
| def __init__(self, robot_name, articulation=None, queue_size=10, *args, **kwargs): | |||
| super().__init__(*args, **kwargs) | |||
| self.name = robot_name + "_joint" | |||
| self.robot_name = robot_name | |||
| self.articulation = articulation | |||
| def reset(self): | |||
| pass | |||
| def spawn(self, world): | |||
| robot = world.scene.get_object(self.robot_name) | |||
| self.articulation = robot | |||
| def get_data(self): | |||
| states = self.articulation.get_joints_state() | |||
| return states | |||
| def send_dora_message(self, node, seq=None, time_stamp=None): | |||
| joint_state = self.get_data() | |||
| node.send_output( | |||
| output_id="joint_pos", | |||
| data=pa.array(joint_state.positions), | |||
| metadata={"seq": seq, "stamp": time_stamp, "name": self.articulation.dof_names} | |||
| ) | |||
| @@ -0,0 +1,98 @@ | |||
| from src.utils import draw_single_point | |||
| class BaseTask: | |||
| def __init__( | |||
| self, | |||
| scenary, | |||
| robot, # A dictory contains many robots | |||
| sensors={}, | |||
| dataset=None, | |||
| replay_horizon=0, | |||
| replay_trajectory_index=0, | |||
| visualize_trajectory=False, | |||
| ): | |||
| self.scenary = scenary | |||
| self.robot = robot | |||
| self.sensors = sensors | |||
| self.reset_needed = False # set to true if you restart the simulator | |||
| self.dataset = dataset | |||
| self.replay_horizon = ( | |||
| replay_horizon if replay_horizon != -1 else self.dataset[0]["action"].shape[0] | |||
| ) | |||
| self.replay_trajectory_index = replay_trajectory_index | |||
| self.done_flag = False | |||
| # build task | |||
| self.build() | |||
| self.replay_count = 0 | |||
| self.visualize_trajectory = visualize_trajectory | |||
| def reset(self): | |||
| self.scenary.reset() | |||
| self.robot.reset() | |||
| for _, sensor in self.sensors.items(): | |||
| sensor.reset() | |||
| def build(self): | |||
| # 1. 加载场景 | |||
| self.scenary.load_stage() | |||
| # 2. 创建机器人 | |||
| self.robot.spawn(self.scenary.world) | |||
| # 3. 创建传感器 | |||
| for _, sensor in self.sensors.items(): | |||
| sensor.spawn(self.scenary.world) | |||
| # 5. 重置环境 | |||
| self.reset() | |||
| def step(self, reset): | |||
| # 获取动作 | |||
| next_action = self.robot.compute_action() | |||
| # 控制机器人 | |||
| self.robot.apply_action(next_action) | |||
| def send_ros_data(self, seq, time_stamp): | |||
| for name, sensor in self.sensors.items(): | |||
| sensor.send_ros_message(seq=seq, time_stamp=time_stamp) | |||
| def send_dora_data(self, seq, time_stamp): | |||
| for name, sensor in self.sensors.items(): | |||
| sensor.send_dora_message(self.robot.controller.node, seq=seq, time_stamp=time_stamp) | |||
| def run(self, simulation_app): | |||
| world = self.scenary.world | |||
| warm_up = 100 | |||
| i = 0 | |||
| while simulation_app.is_running(): | |||
| # 推进仿真并渲染 | |||
| self.scenary.step(render=True) | |||
| if world.is_stopped() and not self.reset_needed: | |||
| self.reset_needed = True | |||
| if i < warm_up: | |||
| i += 1 | |||
| continue | |||
| if world.is_playing(): | |||
| reset = self.reset_needed | |||
| if self.reset_needed: | |||
| self.reset() | |||
| self.replay_count = 0 | |||
| i = 0 | |||
| self.reset_needed = False | |||
| self.send_dora_data(seq=i, time_stamp=world.current_time) | |||
| if self.visualize_trajectory: | |||
| ee_pose = self.robot.get_ee_pose() | |||
| draw_single_point(ee_pose[:3], color="model") | |||
| self.step(reset) | |||
| i = i + 1 | |||
| if self.done_flag: | |||
| break | |||
| simulation_app.close() | |||
| @@ -0,0 +1,102 @@ | |||
| import numpy as np | |||
| from omegaconf.dictconfig import DictConfig | |||
| from omegaconf.listconfig import ListConfig | |||
| from omni.isaac.dynamic_control import _dynamic_control | |||
| from scipy.spatial.transform import Rotation | |||
| from omni.isaac.core.prims import XFormPrim | |||
| import random | |||
| import torch | |||
| dc = _dynamic_control.acquire_dynamic_control_interface() | |||
| def debug(): | |||
| import debugpy | |||
| # 启动调试器,指定调试端口 | |||
| debugpy.listen(5679) | |||
| print("Waiting for debugger to attach...") | |||
| # 在这里设置断点 | |||
| debugpy.wait_for_client() | |||
| def to_numpy_recursive(cfg): | |||
| # 将 OmegaConf 里面的所有 list 转换成 numpy | |||
| if isinstance(cfg, dict) or isinstance(cfg, DictConfig): | |||
| return {k: to_numpy_recursive(v) for k, v in cfg.items()} | |||
| elif isinstance(cfg, ListConfig): | |||
| return np.array(cfg) | |||
| else: | |||
| return cfg | |||
| def set_seed(seed, deterministic=False): | |||
| random.seed(seed) # Python 内置随机模块 | |||
| np.random.seed(seed) | |||
| torch.manual_seed(seed) | |||
| torch.cuda.manual_seed(seed) # 为当前GPU设置种子 | |||
| torch.cuda.manual_seed_all(seed) # 如果使用多个GPU | |||
| if deterministic: | |||
| torch.backends.cudnn.deterministic = True | |||
| torch.backends.cudnn.benchmark = False | |||
| def quaternion_to_rotvec(quat): | |||
| """ | |||
| 将四元数 (w, x, y, z) 转换为旋转向量 (Rodrigues 旋转公式). | |||
| 参数: | |||
| quat (array-like): 长度为 4 的数组,表示四元数 (w, x, y, z). | |||
| 返回: | |||
| np.ndarray: 旋转向量 (3D). | |||
| """ | |||
| # scipy 需要 [x, y, z, w] 格式,因此调整顺序 | |||
| rot = Rotation.from_quat([quat[1], quat[2], quat[3], quat[0]]) | |||
| return rot.as_rotvec() | |||
| def set_prim_transform(stage, prim_path, translate=None, rotate=None, scale=None): | |||
| """ | |||
| 修改指定 USD 元素的局部变换。 | |||
| 参数: | |||
| stage (Usd.Stage):当前打开的 USD Stage。 | |||
| prim_path (str):元素路径,例如 "/World/Cube"。 | |||
| translate (tuple or list of 3 floats, optional):平移量 (x, y, z)。 | |||
| rotate (tuple or list of 3 floats, optional):旋转角度 (X, Y, Z),单位:度。 | |||
| scale (tuple or list of 3 floats, optional):缩放比例 (X, Y, Z)。 | |||
| """ | |||
| prim = stage.GetPrimAtPath(prim_path) | |||
| if not prim or not prim.IsValid(): | |||
| print(f"未找到 {prim_path},请检查路径是否正确!") | |||
| return | |||
| xprim = XFormPrim(prim_path=prim_path) | |||
| xprim.set_world_pose(translate, rotate) | |||
| # def draw_single_point(pos, color="expert", size=15): | |||
| # """ | |||
| # 实时绘制单个轨迹点。 | |||
| # Args: | |||
| # pos: (x, y, z) 的三元组坐标 | |||
| # color: "expert"(绿色)或 "model"(蓝色),也支持自定义 RGB 元组 | |||
| # size: 点的大小 | |||
| # """ | |||
| # if color == "expert": | |||
| # rgba = (0, 1, 0, 1) # 绿色 | |||
| # elif color == "model": | |||
| # rgba = (0, 0, 1, 1) # 蓝色 | |||
| # elif isinstance(color, tuple) and len(color) in (3, 4): | |||
| # if len(color) == 3: | |||
| # rgba = (*color, 1.0) | |||
| # else: | |||
| # rgba = color | |||
| # else: | |||
| # raise ValueError("Unsupported color type") | |||
| # _draw.draw_points([pos], [rgba], [size]) | |||
| @@ -0,0 +1,27 @@ | |||
| from omni.isaac.kit import SimulationApp | |||
| import hydra | |||
| from omegaconf import DictConfig | |||
| from src.utils import set_seed | |||
| simulation_app = SimulationApp({ | |||
| "headless": False, | |||
| }) | |||
| @hydra.main(config_path="./configs", config_name="stack_cube_dp_franka", version_base="1.3.2") | |||
| def main(cfg: DictConfig): | |||
| if "seed" in cfg.keys(): | |||
| set_seed(cfg.seed) | |||
| task_cfg = cfg.task | |||
| task = hydra.utils.instantiate(task_cfg) | |||
| print(f"Task '{type(task).__name__}' initialized.") | |||
| task.run(simulation_app) | |||
| if __name__ == "__main__": | |||
| # debug() | |||
| main() | |||
| @@ -0,0 +1,22 @@ | |||
| [project] | |||
| name = "dora-isaacsim" | |||
| version = "0.0.0" | |||
| authors = [{ name = "berrylvz", email = "berrylvz@163.com" }] | |||
| description = "dora-isaacsim" | |||
| license = { text = "MIT" } | |||
| readme = "README.md" | |||
| requires-python = ">=3.8" | |||
| dependencies = ["dora-rs >= 0.3.9"] | |||
| [dependency-groups] | |||
| dev = ["pytest >=8.1.1", "ruff >=0.9.1"] | |||
| [project.scripts] | |||
| dora-isaacsim = "dora_isaacsim.main:main" | |||
| [tool.ruff] | |||
| exclude = ["dora_isaacsim/Isaacsim"] | |||
| [tool.black] | |||
| extend.exclude = "dora_isaacsim/Isaacsim" | |||
| @@ -0,0 +1,9 @@ | |||
| import pytest | |||
| def test_import_main(): | |||
| from dora_isaacsim.main import main | |||
| # Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow. | |||
| with pytest.raises(RuntimeError): | |||
| main() | |||
| @@ -0,0 +1,405 @@ | |||
| version = 1 | |||
| revision = 1 | |||
| requires-python = ">=3.8" | |||
| resolution-markers = [ | |||
| "python_full_version >= '3.9'", | |||
| "python_full_version < '3.9'", | |||
| ] | |||
| [[package]] | |||
| name = "colorama" | |||
| version = "0.4.6" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, | |||
| ] | |||
| [[package]] | |||
| name = "dora-isaacsim" | |||
| version = "0.0.0" | |||
| source = { virtual = "." } | |||
| dependencies = [ | |||
| { name = "dora-rs" }, | |||
| ] | |||
| [package.dev-dependencies] | |||
| dev = [ | |||
| { name = "pytest", version = "8.3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" }, | |||
| { name = "pytest", version = "8.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" }, | |||
| { name = "ruff" }, | |||
| ] | |||
| [package.metadata] | |||
| requires-dist = [{ name = "dora-rs", specifier = ">=0.3.9" }] | |||
| [package.metadata.requires-dev] | |||
| dev = [ | |||
| { name = "pytest", specifier = ">=8.1.1" }, | |||
| { name = "ruff", specifier = ">=0.9.1" }, | |||
| ] | |||
| [[package]] | |||
| name = "dora-rs" | |||
| version = "0.3.11" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| dependencies = [ | |||
| { name = "pyarrow", version = "17.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" }, | |||
| { name = "pyarrow", version = "20.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" }, | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/ce/24/7de862dbd34d454d698e7df684b6c2bc67e178c741e48e84f41d63feaeca/dora_rs-0.3.11.tar.gz", hash = "sha256:a588ef542fdf0aad93fb0c3e60bd7fa5ad02bf993d8c0cb604c16692a70af69c", size = 245536 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/e6/14/00f13ace6fa0844a2c659e26b7c46ee714707e8bcbedc33c62933f2212b1/dora_rs-0.3.11-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:6ba036506343b15a7f4d155bed49d11b45568f2aa443dafcc856f901429141d0", size = 13998494 }, | |||
| { url = "https://files.pythonhosted.org/packages/a9/47/b6c2e858c36ac69d65f9f8b9ae52562087ae14f1373e91437976d3b4cb79/dora_rs-0.3.11-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:155f6e5ba9fc06a782b9e503cd154d53f69b5229c9853e1b52e47411fda67b51", size = 13454072 }, | |||
| { url = "https://files.pythonhosted.org/packages/9b/58/1218eeacdab586291d3b85017c9f9092dea97169e9c690cd33f0a28fdce6/dora_rs-0.3.11-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:47cf3ad2739731e9b274fc84ba1713ab3e4cf60f339f3905d877ae58a9f49053", size = 12056963 }, | |||
| { url = "https://files.pythonhosted.org/packages/4c/cb/79c453904525cd3f397179d82ba20d0bdfea4f727f6d62ac78b14b5b1509/dora_rs-0.3.11-cp37-abi3-manylinux_2_28_armv7l.whl", hash = "sha256:181e2af568c2e21977a40973def2f664ec16a70f46a9b552397e99c21889054a", size = 11578280 }, | |||
| { url = "https://files.pythonhosted.org/packages/6e/ae/c35be0cc46f93186299e96a336cf22515a33d8340a4e55b9c1b806290a16/dora_rs-0.3.11-cp37-abi3-manylinux_2_28_i686.whl", hash = "sha256:488bc68a1d75449fd5ab6d079e5738663a2a18b618ac8d3fca55c2b3e7f1d0bb", size = 13819951 }, | |||
| { url = "https://files.pythonhosted.org/packages/4e/66/59e837d5fc432e44a6c429f86ed6c76d7702c8c1564b2bfa8284c005111c/dora_rs-0.3.11-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a8a2c01efc9c21124b920b22cf0c56ee4bb1eb37b3d848c581dd349eab6be5e3", size = 12738760 }, | |||
| { url = "https://files.pythonhosted.org/packages/8e/9b/ccdc02c1e1ac2df78245473e97c24ab71ad013c503d70a90b240df1f6a63/dora_rs-0.3.11-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:17265019ecaa82d9a4d1674d01a7d26219b24341e44e5c8749dfe00007e4a2f7", size = 15062967 }, | |||
| { url = "https://files.pythonhosted.org/packages/81/8f/b61982a6d9eac4cd682d23327d3d977b262f7b8782cff4f5b56dd7b02af5/dora_rs-0.3.11-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:8ecd51b4c57caece840a67a6d27a10bd63898216b07527010f066979654da794", size = 14938034 }, | |||
| { url = "https://files.pythonhosted.org/packages/43/13/788c2f1921381c9643f9e8be736086d62809fc21ebb8405a287963b40d1c/dora_rs-0.3.11-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:c96fe58a3b380158d76a147376825c555eab63e68507838a5dfd951004b9d46d", size = 15495103 }, | |||
| { url = "https://files.pythonhosted.org/packages/26/23/395ecea6b7d54b38be29d7af51aa7b02ac78af53cbfaf9a5983fd058a1c0/dora_rs-0.3.11-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:65fd77f51689b2153a070ab1f32e901a5f4dd937045dc8130ea92ebd98eee654", size = 15469995 }, | |||
| { url = "https://files.pythonhosted.org/packages/d1/90/43594c2e58d580e2e7244ddeeb3cbcfaf690eb70309982c4a4a792b2b4a8/dora_rs-0.3.11-cp37-abi3-win_amd64.whl", hash = "sha256:dfebb56b2e429147786febfc295f510c3b302af7954ace4ffd1328e0fc882b22", size = 12185781 }, | |||
| ] | |||
| [[package]] | |||
| name = "exceptiongroup" | |||
| version = "1.3.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| dependencies = [ | |||
| { name = "typing-extensions", version = "4.13.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" }, | |||
| { name = "typing-extensions", version = "4.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9' and python_full_version < '3.13'" }, | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674 }, | |||
| ] | |||
| [[package]] | |||
| name = "iniconfig" | |||
| version = "2.1.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, | |||
| ] | |||
| [[package]] | |||
| name = "numpy" | |||
| version = "1.24.4" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/a4/9b/027bec52c633f6556dba6b722d9a0befb40498b9ceddd29cbe67a45a127c/numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463", size = 10911229 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/6b/80/6cdfb3e275d95155a34659163b83c09e3a3ff9f1456880bec6cc63d71083/numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64", size = 19789140 }, | |||
| { url = "https://files.pythonhosted.org/packages/64/5f/3f01d753e2175cfade1013eea08db99ba1ee4bdb147ebcf3623b75d12aa7/numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1", size = 13854297 }, | |||
| { url = "https://files.pythonhosted.org/packages/5a/b3/2f9c21d799fa07053ffa151faccdceeb69beec5a010576b8991f614021f7/numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4", size = 13995611 }, | |||
| { url = "https://files.pythonhosted.org/packages/10/be/ae5bf4737cb79ba437879915791f6f26d92583c738d7d960ad94e5c36adf/numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6", size = 17282357 }, | |||
| { url = "https://files.pythonhosted.org/packages/c0/64/908c1087be6285f40e4b3e79454552a701664a079321cff519d8c7051d06/numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc", size = 12429222 }, | |||
| { url = "https://files.pythonhosted.org/packages/22/55/3d5a7c1142e0d9329ad27cece17933b0e2ab4e54ddc5c1861fbfeb3f7693/numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e", size = 14841514 }, | |||
| { url = "https://files.pythonhosted.org/packages/a9/cc/5ed2280a27e5dab12994c884f1f4d8c3bd4d885d02ae9e52a9d213a6a5e2/numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810", size = 19775508 }, | |||
| { url = "https://files.pythonhosted.org/packages/c0/bc/77635c657a3668cf652806210b8662e1aff84b818a55ba88257abf6637a8/numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254", size = 13840033 }, | |||
| { url = "https://files.pythonhosted.org/packages/a7/4c/96cdaa34f54c05e97c1c50f39f98d608f96f0677a6589e64e53104e22904/numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7", size = 13991951 }, | |||
| { url = "https://files.pythonhosted.org/packages/22/97/dfb1a31bb46686f09e68ea6ac5c63fdee0d22d7b23b8f3f7ea07712869ef/numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5", size = 17278923 }, | |||
| { url = "https://files.pythonhosted.org/packages/35/e2/76a11e54139654a324d107da1d98f99e7aa2a7ef97cfd7c631fba7dbde71/numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d", size = 12422446 }, | |||
| { url = "https://files.pythonhosted.org/packages/d8/ec/ebef2f7d7c28503f958f0f8b992e7ce606fb74f9e891199329d5f5f87404/numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694", size = 14834466 }, | |||
| { url = "https://files.pythonhosted.org/packages/11/10/943cfb579f1a02909ff96464c69893b1d25be3731b5d3652c2e0cf1281ea/numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61", size = 19780722 }, | |||
| { url = "https://files.pythonhosted.org/packages/a7/ae/f53b7b265fdc701e663fbb322a8e9d4b14d9cb7b2385f45ddfabfc4327e4/numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f", size = 13843102 }, | |||
| { url = "https://files.pythonhosted.org/packages/25/6f/2586a50ad72e8dbb1d8381f837008a0321a3516dfd7cb57fc8cf7e4bb06b/numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e", size = 14039616 }, | |||
| { url = "https://files.pythonhosted.org/packages/98/5d/5738903efe0ecb73e51eb44feafba32bdba2081263d40c5043568ff60faf/numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc", size = 17316263 }, | |||
| { url = "https://files.pythonhosted.org/packages/d1/57/8d328f0b91c733aa9aa7ee540dbc49b58796c862b4fbcb1146c701e888da/numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2", size = 12455660 }, | |||
| { url = "https://files.pythonhosted.org/packages/69/65/0d47953afa0ad569d12de5f65d964321c208492064c38fe3b0b9744f8d44/numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706", size = 14868112 }, | |||
| { url = "https://files.pythonhosted.org/packages/9a/cd/d5b0402b801c8a8b56b04c1e85c6165efab298d2f0ab741c2406516ede3a/numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400", size = 19816549 }, | |||
| { url = "https://files.pythonhosted.org/packages/14/27/638aaa446f39113a3ed38b37a66243e21b38110d021bfcb940c383e120f2/numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f", size = 13879950 }, | |||
| { url = "https://files.pythonhosted.org/packages/8f/27/91894916e50627476cff1a4e4363ab6179d01077d71b9afed41d9e1f18bf/numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9", size = 14030228 }, | |||
| { url = "https://files.pythonhosted.org/packages/7a/7c/d7b2a0417af6428440c0ad7cb9799073e507b1a465f827d058b826236964/numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d", size = 17311170 }, | |||
| { url = "https://files.pythonhosted.org/packages/18/9d/e02ace5d7dfccee796c37b995c63322674daf88ae2f4a4724c5dd0afcc91/numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835", size = 12454918 }, | |||
| { url = "https://files.pythonhosted.org/packages/63/38/6cc19d6b8bfa1d1a459daf2b3fe325453153ca7019976274b6f33d8b5663/numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8", size = 14867441 }, | |||
| { url = "https://files.pythonhosted.org/packages/a4/fd/8dff40e25e937c94257455c237b9b6bf5a30d42dd1cc11555533be099492/numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef", size = 19156590 }, | |||
| { url = "https://files.pythonhosted.org/packages/42/e7/4bf953c6e05df90c6d351af69966384fed8e988d0e8c54dad7103b59f3ba/numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a", size = 16705744 }, | |||
| { url = "https://files.pythonhosted.org/packages/fc/dd/9106005eb477d022b60b3817ed5937a43dad8fd1f20b0610ea8a32fcb407/numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2", size = 14734290 }, | |||
| ] | |||
| [[package]] | |||
| name = "packaging" | |||
| version = "25.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 }, | |||
| ] | |||
| [[package]] | |||
| name = "pluggy" | |||
| version = "1.5.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version < '3.9'", | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, | |||
| ] | |||
| [[package]] | |||
| name = "pluggy" | |||
| version = "1.6.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version >= '3.9'", | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, | |||
| ] | |||
| [[package]] | |||
| name = "pyarrow" | |||
| version = "17.0.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version < '3.9'", | |||
| ] | |||
| dependencies = [ | |||
| { name = "numpy", marker = "python_full_version < '3.9'" }, | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/27/4e/ea6d43f324169f8aec0e57569443a38bab4b398d09769ca64f7b4d467de3/pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28", size = 1112479 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/39/5d/78d4b040bc5ff2fc6c3d03e80fca396b742f6c125b8af06bcf7427f931bc/pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07", size = 28994846 }, | |||
| { url = "https://files.pythonhosted.org/packages/3b/73/8ed168db7642e91180330e4ea9f3ff8bab404678f00d32d7df0871a4933b/pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655", size = 27165908 }, | |||
| { url = "https://files.pythonhosted.org/packages/81/36/e78c24be99242063f6d0590ef68c857ea07bdea470242c361e9a15bd57a4/pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545", size = 39264209 }, | |||
| { url = "https://files.pythonhosted.org/packages/18/4c/3db637d7578f683b0a8fb8999b436bdbedd6e3517bd4f90c70853cf3ad20/pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2", size = 39862883 }, | |||
| { url = "https://files.pythonhosted.org/packages/81/3c/0580626896c842614a523e66b351181ed5bb14e5dfc263cd68cea2c46d90/pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8", size = 38723009 }, | |||
| { url = "https://files.pythonhosted.org/packages/ee/fb/c1b47f0ada36d856a352da261a44d7344d8f22e2f7db3945f8c3b81be5dd/pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047", size = 39855626 }, | |||
| { url = "https://files.pythonhosted.org/packages/19/09/b0a02908180a25d57312ab5919069c39fddf30602568980419f4b02393f6/pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087", size = 25147242 }, | |||
| { url = "https://files.pythonhosted.org/packages/f9/46/ce89f87c2936f5bb9d879473b9663ce7a4b1f4359acc2f0eb39865eaa1af/pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977", size = 29028748 }, | |||
| { url = "https://files.pythonhosted.org/packages/8d/8e/ce2e9b2146de422f6638333c01903140e9ada244a2a477918a368306c64c/pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3", size = 27190965 }, | |||
| { url = "https://files.pythonhosted.org/packages/3b/c8/5675719570eb1acd809481c6d64e2136ffb340bc387f4ca62dce79516cea/pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15", size = 39269081 }, | |||
| { url = "https://files.pythonhosted.org/packages/5e/78/3931194f16ab681ebb87ad252e7b8d2c8b23dad49706cadc865dff4a1dd3/pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597", size = 39864921 }, | |||
| { url = "https://files.pythonhosted.org/packages/d8/81/69b6606093363f55a2a574c018901c40952d4e902e670656d18213c71ad7/pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420", size = 38740798 }, | |||
| { url = "https://files.pythonhosted.org/packages/4c/21/9ca93b84b92ef927814cb7ba37f0774a484c849d58f0b692b16af8eebcfb/pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4", size = 39871877 }, | |||
| { url = "https://files.pythonhosted.org/packages/30/d1/63a7c248432c71c7d3ee803e706590a0b81ce1a8d2b2ae49677774b813bb/pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03", size = 25151089 }, | |||
| { url = "https://files.pythonhosted.org/packages/d4/62/ce6ac1275a432b4a27c55fe96c58147f111d8ba1ad800a112d31859fae2f/pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22", size = 29019418 }, | |||
| { url = "https://files.pythonhosted.org/packages/8e/0a/dbd0c134e7a0c30bea439675cc120012337202e5fac7163ba839aa3691d2/pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053", size = 27152197 }, | |||
| { url = "https://files.pythonhosted.org/packages/cb/05/3f4a16498349db79090767620d6dc23c1ec0c658a668d61d76b87706c65d/pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a", size = 39263026 }, | |||
| { url = "https://files.pythonhosted.org/packages/c2/0c/ea2107236740be8fa0e0d4a293a095c9f43546a2465bb7df34eee9126b09/pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc", size = 39880798 }, | |||
| { url = "https://files.pythonhosted.org/packages/f6/b0/b9164a8bc495083c10c281cc65064553ec87b7537d6f742a89d5953a2a3e/pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a", size = 38715172 }, | |||
| { url = "https://files.pythonhosted.org/packages/f1/c4/9625418a1413005e486c006e56675334929fad864347c5ae7c1b2e7fe639/pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b", size = 39874508 }, | |||
| { url = "https://files.pythonhosted.org/packages/ae/49/baafe2a964f663413be3bd1cf5c45ed98c5e42e804e2328e18f4570027c1/pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7", size = 25099235 }, | |||
| { url = "https://files.pythonhosted.org/packages/8d/bd/8f52c1d7b430260f80a349cffa2df351750a737b5336313d56dcadeb9ae1/pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204", size = 28999345 }, | |||
| { url = "https://files.pythonhosted.org/packages/64/d9/51e35550f2f18b8815a2ab25948f735434db32000c0e91eba3a32634782a/pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8", size = 27168441 }, | |||
| { url = "https://files.pythonhosted.org/packages/18/d8/7161d87d07ea51be70c49f615004c1446d5723622a18b2681f7e4b71bf6e/pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155", size = 39363163 }, | |||
| { url = "https://files.pythonhosted.org/packages/3f/08/bc497130789833de09e345e3ce4647e3ce86517c4f70f2144f0367ca378b/pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145", size = 39965253 }, | |||
| { url = "https://files.pythonhosted.org/packages/d3/2e/493dd7db889402b4c7871ca7dfdd20f2c5deedbff802d3eb8576359930f9/pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c", size = 38805378 }, | |||
| { url = "https://files.pythonhosted.org/packages/e6/c1/4c6bcdf7a820034aa91a8b4d25fef38809be79b42ca7aaa16d4680b0bbac/pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c", size = 39958364 }, | |||
| { url = "https://files.pythonhosted.org/packages/d1/db/42ac644453cfdfc60fe002b46d647fe7a6dfad753ef7b28e99b4c936ad5d/pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca", size = 25229211 }, | |||
| { url = "https://files.pythonhosted.org/packages/43/e0/a898096d35be240aa61fb2d54db58b86d664b10e1e51256f9300f47565e8/pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb", size = 29007881 }, | |||
| { url = "https://files.pythonhosted.org/packages/59/22/f7d14907ed0697b5dd488d393129f2738629fa5bcba863e00931b7975946/pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df", size = 27178117 }, | |||
| { url = "https://files.pythonhosted.org/packages/bf/ee/661211feac0ed48467b1d5c57298c91403809ec3ab78b1d175e1d6ad03cf/pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687", size = 39273896 }, | |||
| { url = "https://files.pythonhosted.org/packages/af/61/bcd9b58e38ead6ad42b9ed00da33a3f862bc1d445e3d3164799c25550ac2/pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b", size = 39875438 }, | |||
| { url = "https://files.pythonhosted.org/packages/75/63/29d1bfcc57af73cde3fc3baccab2f37548de512dbe0ab294b033cd203516/pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5", size = 38735092 }, | |||
| { url = "https://files.pythonhosted.org/packages/39/f4/90258b4de753df7cc61cefb0312f8abcf226672e96cc64996e66afce817a/pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda", size = 39867610 }, | |||
| { url = "https://files.pythonhosted.org/packages/e7/f6/b75d4816c32f1618ed31a005ee635dd1d91d8164495d94f2ea092f594661/pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204", size = 25148611 }, | |||
| ] | |||
| [[package]] | |||
| name = "pyarrow" | |||
| version = "20.0.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version >= '3.9'", | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/a2/ee/a7810cb9f3d6e9238e61d312076a9859bf3668fd21c69744de9532383912/pyarrow-20.0.0.tar.gz", hash = "sha256:febc4a913592573c8d5805091a6c2b5064c8bd6e002131f01061797d91c783c1", size = 1125187 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/5b/23/77094eb8ee0dbe88441689cb6afc40ac312a1e15d3a7acc0586999518222/pyarrow-20.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c7dd06fd7d7b410ca5dc839cc9d485d2bc4ae5240851bcd45d85105cc90a47d7", size = 30832591 }, | |||
| { url = "https://files.pythonhosted.org/packages/c3/d5/48cc573aff00d62913701d9fac478518f693b30c25f2c157550b0b2565cb/pyarrow-20.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d5382de8dc34c943249b01c19110783d0d64b207167c728461add1ecc2db88e4", size = 32273686 }, | |||
| { url = "https://files.pythonhosted.org/packages/37/df/4099b69a432b5cb412dd18adc2629975544d656df3d7fda6d73c5dba935d/pyarrow-20.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6415a0d0174487456ddc9beaead703d0ded5966129fa4fd3114d76b5d1c5ceae", size = 41337051 }, | |||
| { url = "https://files.pythonhosted.org/packages/4c/27/99922a9ac1c9226f346e3a1e15e63dee6f623ed757ff2893f9d6994a69d3/pyarrow-20.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15aa1b3b2587e74328a730457068dc6c89e6dcbf438d4369f572af9d320a25ee", size = 42404659 }, | |||
| { url = "https://files.pythonhosted.org/packages/21/d1/71d91b2791b829c9e98f1e0d85be66ed93aff399f80abb99678511847eaa/pyarrow-20.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5605919fbe67a7948c1f03b9f3727d82846c053cd2ce9303ace791855923fd20", size = 40695446 }, | |||
| { url = "https://files.pythonhosted.org/packages/f1/ca/ae10fba419a6e94329707487835ec721f5a95f3ac9168500bcf7aa3813c7/pyarrow-20.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a5704f29a74b81673d266e5ec1fe376f060627c2e42c5c7651288ed4b0db29e9", size = 42278528 }, | |||
| { url = "https://files.pythonhosted.org/packages/7a/a6/aba40a2bf01b5d00cf9cd16d427a5da1fad0fb69b514ce8c8292ab80e968/pyarrow-20.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:00138f79ee1b5aca81e2bdedb91e3739b987245e11fa3c826f9e57c5d102fb75", size = 42918162 }, | |||
| { url = "https://files.pythonhosted.org/packages/93/6b/98b39650cd64f32bf2ec6d627a9bd24fcb3e4e6ea1873c5e1ea8a83b1a18/pyarrow-20.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f2d67ac28f57a362f1a2c1e6fa98bfe2f03230f7e15927aecd067433b1e70ce8", size = 44550319 }, | |||
| { url = "https://files.pythonhosted.org/packages/ab/32/340238be1eb5037e7b5de7e640ee22334417239bc347eadefaf8c373936d/pyarrow-20.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:4a8b029a07956b8d7bd742ffca25374dd3f634b35e46cc7a7c3fa4c75b297191", size = 25770759 }, | |||
| { url = "https://files.pythonhosted.org/packages/47/a2/b7930824181ceadd0c63c1042d01fa4ef63eee233934826a7a2a9af6e463/pyarrow-20.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:24ca380585444cb2a31324c546a9a56abbe87e26069189e14bdba19c86c049f0", size = 30856035 }, | |||
| { url = "https://files.pythonhosted.org/packages/9b/18/c765770227d7f5bdfa8a69f64b49194352325c66a5c3bb5e332dfd5867d9/pyarrow-20.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:95b330059ddfdc591a3225f2d272123be26c8fa76e8c9ee1a77aad507361cfdb", size = 32309552 }, | |||
| { url = "https://files.pythonhosted.org/packages/44/fb/dfb2dfdd3e488bb14f822d7335653092dde150cffc2da97de6e7500681f9/pyarrow-20.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f0fb1041267e9968c6d0d2ce3ff92e3928b243e2b6d11eeb84d9ac547308232", size = 41334704 }, | |||
| { url = "https://files.pythonhosted.org/packages/58/0d/08a95878d38808051a953e887332d4a76bc06c6ee04351918ee1155407eb/pyarrow-20.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ff87cc837601532cc8242d2f7e09b4e02404de1b797aee747dd4ba4bd6313f", size = 42399836 }, | |||
| { url = "https://files.pythonhosted.org/packages/f3/cd/efa271234dfe38f0271561086eedcad7bc0f2ddd1efba423916ff0883684/pyarrow-20.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:7a3a5dcf54286e6141d5114522cf31dd67a9e7c9133d150799f30ee302a7a1ab", size = 40711789 }, | |||
| { url = "https://files.pythonhosted.org/packages/46/1f/7f02009bc7fc8955c391defee5348f510e589a020e4b40ca05edcb847854/pyarrow-20.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a6ad3e7758ecf559900261a4df985662df54fb7fdb55e8e3b3aa99b23d526b62", size = 42301124 }, | |||
| { url = "https://files.pythonhosted.org/packages/4f/92/692c562be4504c262089e86757a9048739fe1acb4024f92d39615e7bab3f/pyarrow-20.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6bb830757103a6cb300a04610e08d9636f0cd223d32f388418ea893a3e655f1c", size = 42916060 }, | |||
| { url = "https://files.pythonhosted.org/packages/a4/ec/9f5c7e7c828d8e0a3c7ef50ee62eca38a7de2fa6eb1b8fa43685c9414fef/pyarrow-20.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:96e37f0766ecb4514a899d9a3554fadda770fb57ddf42b63d80f14bc20aa7db3", size = 44547640 }, | |||
| { url = "https://files.pythonhosted.org/packages/54/96/46613131b4727f10fd2ffa6d0d6f02efcc09a0e7374eff3b5771548aa95b/pyarrow-20.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3346babb516f4b6fd790da99b98bed9708e3f02e734c84971faccb20736848dc", size = 25781491 }, | |||
| { url = "https://files.pythonhosted.org/packages/a1/d6/0c10e0d54f6c13eb464ee9b67a68b8c71bcf2f67760ef5b6fbcddd2ab05f/pyarrow-20.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:75a51a5b0eef32727a247707d4755322cb970be7e935172b6a3a9f9ae98404ba", size = 30815067 }, | |||
| { url = "https://files.pythonhosted.org/packages/7e/e2/04e9874abe4094a06fd8b0cbb0f1312d8dd7d707f144c2ec1e5e8f452ffa/pyarrow-20.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:211d5e84cecc640c7a3ab900f930aaff5cd2702177e0d562d426fb7c4f737781", size = 32297128 }, | |||
| { url = "https://files.pythonhosted.org/packages/31/fd/c565e5dcc906a3b471a83273039cb75cb79aad4a2d4a12f76cc5ae90a4b8/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ba3cf4182828be7a896cbd232aa8dd6a31bd1f9e32776cc3796c012855e1199", size = 41334890 }, | |||
| { url = "https://files.pythonhosted.org/packages/af/a9/3bdd799e2c9b20c1ea6dc6fa8e83f29480a97711cf806e823f808c2316ac/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c3a01f313ffe27ac4126f4c2e5ea0f36a5fc6ab51f8726cf41fee4b256680bd", size = 42421775 }, | |||
| { url = "https://files.pythonhosted.org/packages/10/f7/da98ccd86354c332f593218101ae56568d5dcedb460e342000bd89c49cc1/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a2791f69ad72addd33510fec7bb14ee06c2a448e06b649e264c094c5b5f7ce28", size = 40687231 }, | |||
| { url = "https://files.pythonhosted.org/packages/bb/1b/2168d6050e52ff1e6cefc61d600723870bf569cbf41d13db939c8cf97a16/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4250e28a22302ce8692d3a0e8ec9d9dde54ec00d237cff4dfa9c1fbf79e472a8", size = 42295639 }, | |||
| { url = "https://files.pythonhosted.org/packages/b2/66/2d976c0c7158fd25591c8ca55aee026e6d5745a021915a1835578707feb3/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89e030dc58fc760e4010148e6ff164d2f44441490280ef1e97a542375e41058e", size = 42908549 }, | |||
| { url = "https://files.pythonhosted.org/packages/31/a9/dfb999c2fc6911201dcbf348247f9cc382a8990f9ab45c12eabfd7243a38/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6102b4864d77102dbbb72965618e204e550135a940c2534711d5ffa787df2a5a", size = 44557216 }, | |||
| { url = "https://files.pythonhosted.org/packages/a0/8e/9adee63dfa3911be2382fb4d92e4b2e7d82610f9d9f668493bebaa2af50f/pyarrow-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:96d6a0a37d9c98be08f5ed6a10831d88d52cac7b13f5287f1e0f625a0de8062b", size = 25660496 }, | |||
| { url = "https://files.pythonhosted.org/packages/9b/aa/daa413b81446d20d4dad2944110dcf4cf4f4179ef7f685dd5a6d7570dc8e/pyarrow-20.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a15532e77b94c61efadde86d10957950392999503b3616b2ffcef7621a002893", size = 30798501 }, | |||
| { url = "https://files.pythonhosted.org/packages/ff/75/2303d1caa410925de902d32ac215dc80a7ce7dd8dfe95358c165f2adf107/pyarrow-20.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:dd43f58037443af715f34f1322c782ec463a3c8a94a85fdb2d987ceb5658e061", size = 32277895 }, | |||
| { url = "https://files.pythonhosted.org/packages/92/41/fe18c7c0b38b20811b73d1bdd54b1fccba0dab0e51d2048878042d84afa8/pyarrow-20.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0d288143a8585806e3cc7c39566407aab646fb9ece164609dac1cfff45f6ae", size = 41327322 }, | |||
| { url = "https://files.pythonhosted.org/packages/da/ab/7dbf3d11db67c72dbf36ae63dcbc9f30b866c153b3a22ef728523943eee6/pyarrow-20.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6953f0114f8d6f3d905d98e987d0924dabce59c3cda380bdfaa25a6201563b4", size = 42411441 }, | |||
| { url = "https://files.pythonhosted.org/packages/90/c3/0c7da7b6dac863af75b64e2f827e4742161128c350bfe7955b426484e226/pyarrow-20.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:991f85b48a8a5e839b2128590ce07611fae48a904cae6cab1f089c5955b57eb5", size = 40677027 }, | |||
| { url = "https://files.pythonhosted.org/packages/be/27/43a47fa0ff9053ab5203bb3faeec435d43c0d8bfa40179bfd076cdbd4e1c/pyarrow-20.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:97c8dc984ed09cb07d618d57d8d4b67a5100a30c3818c2fb0b04599f0da2de7b", size = 42281473 }, | |||
| { url = "https://files.pythonhosted.org/packages/bc/0b/d56c63b078876da81bbb9ba695a596eabee9b085555ed12bf6eb3b7cab0e/pyarrow-20.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b71daf534f4745818f96c214dbc1e6124d7daf059167330b610fc69b6f3d3e3", size = 42893897 }, | |||
| { url = "https://files.pythonhosted.org/packages/92/ac/7d4bd020ba9145f354012838692d48300c1b8fe5634bfda886abcada67ed/pyarrow-20.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8b88758f9303fa5a83d6c90e176714b2fd3852e776fc2d7e42a22dd6c2fb368", size = 44543847 }, | |||
| { url = "https://files.pythonhosted.org/packages/9d/07/290f4abf9ca702c5df7b47739c1b2c83588641ddfa2cc75e34a301d42e55/pyarrow-20.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:30b3051b7975801c1e1d387e17c588d8ab05ced9b1e14eec57915f79869b5031", size = 25653219 }, | |||
| { url = "https://files.pythonhosted.org/packages/95/df/720bb17704b10bd69dde086e1400b8eefb8f58df3f8ac9cff6c425bf57f1/pyarrow-20.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:ca151afa4f9b7bc45bcc791eb9a89e90a9eb2772767d0b1e5389609c7d03db63", size = 30853957 }, | |||
| { url = "https://files.pythonhosted.org/packages/d9/72/0d5f875efc31baef742ba55a00a25213a19ea64d7176e0fe001c5d8b6e9a/pyarrow-20.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:4680f01ecd86e0dd63e39eb5cd59ef9ff24a9d166db328679e36c108dc993d4c", size = 32247972 }, | |||
| { url = "https://files.pythonhosted.org/packages/d5/bc/e48b4fa544d2eea72f7844180eb77f83f2030b84c8dad860f199f94307ed/pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f4c8534e2ff059765647aa69b75d6543f9fef59e2cd4c6d18015192565d2b70", size = 41256434 }, | |||
| { url = "https://files.pythonhosted.org/packages/c3/01/974043a29874aa2cf4f87fb07fd108828fc7362300265a2a64a94965e35b/pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1f8a47f4b4ae4c69c4d702cfbdfe4d41e18e5c7ef6f1bb1c50918c1e81c57b", size = 42353648 }, | |||
| { url = "https://files.pythonhosted.org/packages/68/95/cc0d3634cde9ca69b0e51cbe830d8915ea32dda2157560dda27ff3b3337b/pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:a1f60dc14658efaa927f8214734f6a01a806d7690be4b3232ba526836d216122", size = 40619853 }, | |||
| { url = "https://files.pythonhosted.org/packages/29/c2/3ad40e07e96a3e74e7ed7cc8285aadfa84eb848a798c98ec0ad009eb6bcc/pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:204a846dca751428991346976b914d6d2a82ae5b8316a6ed99789ebf976551e6", size = 42241743 }, | |||
| { url = "https://files.pythonhosted.org/packages/eb/cb/65fa110b483339add6a9bc7b6373614166b14e20375d4daa73483755f830/pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f3b117b922af5e4c6b9a9115825726cac7d8b1421c37c2b5e24fbacc8930612c", size = 42839441 }, | |||
| { url = "https://files.pythonhosted.org/packages/98/7b/f30b1954589243207d7a0fbc9997401044bf9a033eec78f6cb50da3f304a/pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e724a3fd23ae5b9c010e7be857f4405ed5e679db5c93e66204db1a69f733936a", size = 44503279 }, | |||
| { url = "https://files.pythonhosted.org/packages/37/40/ad395740cd641869a13bcf60851296c89624662575621968dcfafabaa7f6/pyarrow-20.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:82f1ee5133bd8f49d31be1299dc07f585136679666b502540db854968576faf9", size = 25944982 }, | |||
| { url = "https://files.pythonhosted.org/packages/10/53/421820fa125138c868729b930d4bc487af2c4b01b1c6104818aab7e98f13/pyarrow-20.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:1bcbe471ef3349be7714261dea28fe280db574f9d0f77eeccc195a2d161fd861", size = 30844702 }, | |||
| { url = "https://files.pythonhosted.org/packages/2e/70/fd75e03312b715e90d928fb91ed8d45c9b0520346e5231b1c69293afd4c7/pyarrow-20.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:a18a14baef7d7ae49247e75641fd8bcbb39f44ed49a9fc4ec2f65d5031aa3b96", size = 32287180 }, | |||
| { url = "https://files.pythonhosted.org/packages/c4/e3/21e5758e46219fdedf5e6c800574dd9d17e962e80014cfe08d6d475be863/pyarrow-20.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb497649e505dc36542d0e68eca1a3c94ecbe9799cb67b578b55f2441a247fbc", size = 41351968 }, | |||
| { url = "https://files.pythonhosted.org/packages/ac/f5/ed6a4c4b11f9215092a35097a985485bb7d879cb79d93d203494e8604f4e/pyarrow-20.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11529a2283cb1f6271d7c23e4a8f9f8b7fd173f7360776b668e509d712a02eec", size = 42415208 }, | |||
| { url = "https://files.pythonhosted.org/packages/44/e5/466a63668ba25788ee8d38d55f853a60469ae7ad1cda343db9f3f45e0b0a/pyarrow-20.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fc1499ed3b4b57ee4e090e1cea6eb3584793fe3d1b4297bbf53f09b434991a5", size = 40708556 }, | |||
| { url = "https://files.pythonhosted.org/packages/e8/d7/4c4d4e4cf6e53e16a519366dfe9223ee4a7a38e6e28c1c0d372b38ba3fe7/pyarrow-20.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:db53390eaf8a4dab4dbd6d93c85c5cf002db24902dbff0ca7d988beb5c9dd15b", size = 42291754 }, | |||
| { url = "https://files.pythonhosted.org/packages/07/d5/79effb32585b7c18897d3047a2163034f3f9c944d12f7b2fd8df6a2edc70/pyarrow-20.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:851c6a8260ad387caf82d2bbf54759130534723e37083111d4ed481cb253cc0d", size = 42936483 }, | |||
| { url = "https://files.pythonhosted.org/packages/09/5c/f707603552c058b2e9129732de99a67befb1f13f008cc58856304a62c38b/pyarrow-20.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e22f80b97a271f0a7d9cd07394a7d348f80d3ac63ed7cc38b6d1b696ab3b2619", size = 44558895 }, | |||
| { url = "https://files.pythonhosted.org/packages/26/cc/1eb6a01c1bbc787f596c270c46bcd2273e35154a84afcb1d0cb4cc72457e/pyarrow-20.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:9965a050048ab02409fb7cbbefeedba04d3d67f2cc899eff505cc084345959ca", size = 25785667 }, | |||
| ] | |||
| [[package]] | |||
| name = "pygments" | |||
| version = "2.19.2" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 }, | |||
| ] | |||
| [[package]] | |||
| name = "pytest" | |||
| version = "8.3.5" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version < '3.9'", | |||
| ] | |||
| dependencies = [ | |||
| { name = "colorama", marker = "python_full_version < '3.9' and sys_platform == 'win32'" }, | |||
| { name = "exceptiongroup", marker = "python_full_version < '3.9'" }, | |||
| { name = "iniconfig", marker = "python_full_version < '3.9'" }, | |||
| { name = "packaging", marker = "python_full_version < '3.9'" }, | |||
| { name = "pluggy", version = "1.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.9'" }, | |||
| { name = "tomli", marker = "python_full_version < '3.9'" }, | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634 }, | |||
| ] | |||
| [[package]] | |||
| name = "pytest" | |||
| version = "8.4.1" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version >= '3.9'", | |||
| ] | |||
| dependencies = [ | |||
| { name = "colorama", marker = "python_full_version >= '3.9' and sys_platform == 'win32'" }, | |||
| { name = "exceptiongroup", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" }, | |||
| { name = "iniconfig", marker = "python_full_version >= '3.9'" }, | |||
| { name = "packaging", marker = "python_full_version >= '3.9'" }, | |||
| { name = "pluggy", version = "1.6.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.9'" }, | |||
| { name = "pygments", marker = "python_full_version >= '3.9'" }, | |||
| { name = "tomli", marker = "python_full_version >= '3.9' and python_full_version < '3.11'" }, | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474 }, | |||
| ] | |||
| [[package]] | |||
| name = "ruff" | |||
| version = "0.12.1" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/97/38/796a101608a90494440856ccfb52b1edae90de0b817e76bfade66b12d320/ruff-0.12.1.tar.gz", hash = "sha256:806bbc17f1104fd57451a98a58df35388ee3ab422e029e8f5cf30aa4af2c138c", size = 4413426 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/06/bf/3dba52c1d12ab5e78d75bd78ad52fb85a6a1f29cc447c2423037b82bed0d/ruff-0.12.1-py3-none-linux_armv6l.whl", hash = "sha256:6013a46d865111e2edb71ad692fbb8262e6c172587a57c0669332a449384a36b", size = 10305649 }, | |||
| { url = "https://files.pythonhosted.org/packages/8c/65/dab1ba90269bc8c81ce1d499a6517e28fe6f87b2119ec449257d0983cceb/ruff-0.12.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b3f75a19e03a4b0757d1412edb7f27cffb0c700365e9d6b60bc1b68d35bc89e0", size = 11120201 }, | |||
| { url = "https://files.pythonhosted.org/packages/3f/3e/2d819ffda01defe857fa2dd4cba4d19109713df4034cc36f06bbf582d62a/ruff-0.12.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9a256522893cb7e92bb1e1153283927f842dea2e48619c803243dccc8437b8be", size = 10466769 }, | |||
| { url = "https://files.pythonhosted.org/packages/63/37/bde4cf84dbd7821c8de56ec4ccc2816bce8125684f7b9e22fe4ad92364de/ruff-0.12.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:069052605fe74c765a5b4272eb89880e0ff7a31e6c0dbf8767203c1fbd31c7ff", size = 10660902 }, | |||
| { url = "https://files.pythonhosted.org/packages/0e/3a/390782a9ed1358c95e78ccc745eed1a9d657a537e5c4c4812fce06c8d1a0/ruff-0.12.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a684f125a4fec2d5a6501a466be3841113ba6847827be4573fddf8308b83477d", size = 10167002 }, | |||
| { url = "https://files.pythonhosted.org/packages/6d/05/f2d4c965009634830e97ffe733201ec59e4addc5b1c0efa035645baa9e5f/ruff-0.12.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdecdef753bf1e95797593007569d8e1697a54fca843d78f6862f7dc279e23bd", size = 11751522 }, | |||
| { url = "https://files.pythonhosted.org/packages/35/4e/4bfc519b5fcd462233f82fc20ef8b1e5ecce476c283b355af92c0935d5d9/ruff-0.12.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:70d52a058c0e7b88b602f575d23596e89bd7d8196437a4148381a3f73fcd5010", size = 12520264 }, | |||
| { url = "https://files.pythonhosted.org/packages/85/b2/7756a6925da236b3a31f234b4167397c3e5f91edb861028a631546bad719/ruff-0.12.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84d0a69d1e8d716dfeab22d8d5e7c786b73f2106429a933cee51d7b09f861d4e", size = 12133882 }, | |||
| { url = "https://files.pythonhosted.org/packages/dd/00/40da9c66d4a4d51291e619be6757fa65c91b92456ff4f01101593f3a1170/ruff-0.12.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cc32e863adcf9e71690248607ccdf25252eeeab5193768e6873b901fd441fed", size = 11608941 }, | |||
| { url = "https://files.pythonhosted.org/packages/91/e7/f898391cc026a77fbe68dfea5940f8213622474cb848eb30215538a2dadf/ruff-0.12.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fd49a4619f90d5afc65cf42e07b6ae98bb454fd5029d03b306bd9e2273d44cc", size = 11602887 }, | |||
| { url = "https://files.pythonhosted.org/packages/f6/02/0891872fc6aab8678084f4cf8826f85c5d2d24aa9114092139a38123f94b/ruff-0.12.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ed5af6aaaea20710e77698e2055b9ff9b3494891e1b24d26c07055459bb717e9", size = 10521742 }, | |||
| { url = "https://files.pythonhosted.org/packages/2a/98/d6534322c74a7d47b0f33b036b2498ccac99d8d8c40edadb552c038cecf1/ruff-0.12.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:801d626de15e6bf988fbe7ce59b303a914ff9c616d5866f8c79eb5012720ae13", size = 10149909 }, | |||
| { url = "https://files.pythonhosted.org/packages/34/5c/9b7ba8c19a31e2b6bd5e31aa1e65b533208a30512f118805371dbbbdf6a9/ruff-0.12.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2be9d32a147f98a1972c1e4df9a6956d612ca5f5578536814372113d09a27a6c", size = 11136005 }, | |||
| { url = "https://files.pythonhosted.org/packages/dc/34/9bbefa4d0ff2c000e4e533f591499f6b834346025e11da97f4ded21cb23e/ruff-0.12.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:49b7ce354eed2a322fbaea80168c902de9504e6e174fd501e9447cad0232f9e6", size = 11648579 }, | |||
| { url = "https://files.pythonhosted.org/packages/6f/1c/20cdb593783f8f411839ce749ec9ae9e4298c2b2079b40295c3e6e2089e1/ruff-0.12.1-py3-none-win32.whl", hash = "sha256:d973fa626d4c8267848755bd0414211a456e99e125dcab147f24daa9e991a245", size = 10519495 }, | |||
| { url = "https://files.pythonhosted.org/packages/cf/56/7158bd8d3cf16394928f47c637d39a7d532268cd45220bdb6cd622985760/ruff-0.12.1-py3-none-win_amd64.whl", hash = "sha256:9e1123b1c033f77bd2590e4c1fe7e8ea72ef990a85d2484351d408224d603013", size = 11547485 }, | |||
| { url = "https://files.pythonhosted.org/packages/91/d0/6902c0d017259439d6fd2fd9393cea1cfe30169940118b007d5e0ea7e954/ruff-0.12.1-py3-none-win_arm64.whl", hash = "sha256:78ad09a022c64c13cc6077707f036bab0fac8cd7088772dcd1e5be21c5002efc", size = 10691209 }, | |||
| ] | |||
| [[package]] | |||
| name = "tomli" | |||
| version = "2.2.1" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, | |||
| { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, | |||
| { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, | |||
| { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, | |||
| { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, | |||
| { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, | |||
| { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, | |||
| { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, | |||
| { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, | |||
| { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, | |||
| { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, | |||
| { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, | |||
| { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, | |||
| { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, | |||
| { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, | |||
| { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, | |||
| { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, | |||
| { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, | |||
| { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, | |||
| { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, | |||
| { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, | |||
| { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, | |||
| { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, | |||
| { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, | |||
| { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, | |||
| { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, | |||
| { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, | |||
| { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, | |||
| { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, | |||
| { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, | |||
| { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, | |||
| ] | |||
| [[package]] | |||
| name = "typing-extensions" | |||
| version = "4.13.2" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version < '3.9'", | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 }, | |||
| ] | |||
| [[package]] | |||
| name = "typing-extensions" | |||
| version = "4.14.0" | |||
| source = { registry = "https://pypi.org/simple" } | |||
| resolution-markers = [ | |||
| "python_full_version >= '3.9'", | |||
| ] | |||
| sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423 } | |||
| wheels = [ | |||
| { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839 }, | |||
| ] | |||