diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..c04d1ddf2a06abb579dc1685c94a3fea2a92b178 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,25 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text +handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text +intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl filter=lfs diff=lfs merge=lfs -text +intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl filter=lfs diff=lfs merge=lfs -text +torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl filter=lfs diff=lfs merge=lfs -text +xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text diff --git a/depth-anything/depth_anything-2024.1.22.0-py2.py3-none-any.whl b/depth-anything/depth_anything-2024.1.22.0-py2.py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..7350a2b24bdc218c6c13ea59d8fb12e8352c37e6 Binary files /dev/null and b/depth-anything/depth_anything-2024.1.22.0-py2.py3-none-any.whl differ diff --git a/depth-anything/depth_anything-2024.1.22.0-py2.py3-none-any.whl.metadata b/depth-anything/depth_anything-2024.1.22.0-py2.py3-none-any.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..941cb2704994ddffc8c842040e2192b483750c66 --- /dev/null +++ b/depth-anything/depth_anything-2024.1.22.0-py2.py3-none-any.whl.metadata @@ -0,0 +1,171 @@ +Metadata-Version: 2.1 +Name: depth_anything +Version: 2024.1.22.0 +Project-URL: Documentation, https://github.com/LiheYoung/Depth-Anything +Project-URL: Issues, https://github.com/LiheYoung/Depth-Anything/issues +Project-URL: Source, https://github.com/LiheYoung/Depth-Anything +License-File: LICENSE +Requires-Dist: opencv-python +Requires-Dist: torch +Requires-Dist: torchvision +Description-Content-Type: text/markdown + +
+

Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data

+ +[**Lihe Yang**](https://liheyoung.github.io/)1 · [**Bingyi Kang**](https://scholar.google.com/citations?user=NmHgX-wAAAAJ)2+ · [**Zilong Huang**](http://speedinghzl.github.io/)2 · [**Xiaogang Xu**](https://xiaogang00.github.io/)3,4 · [**Jiashi Feng**](https://sites.google.com/site/jshfeng/)2 · [**Hengshuang Zhao**](https://hszhao.github.io/)1+ + +1The University of Hong Kong · 2TikTok · 3Zhejiang Lab · 4Zhejiang University + ++corresponding authors + +Paper PDF +Project Page + +
+ +This work presents Depth Anything, a highly practical solution for robust monocular depth estimation by training on a combination of 1.5M labeled images and **62M+ unlabeled images**. + +![teaser](assets/teaser.png) + +## News + +* **2024-01-22:** Paper, project page, code, models, and demo are released. + + +## Features of Depth Anything + +- **Relative depth estimation**: + + Our foundation models listed [here](https://huggingface.co/spaces/LiheYoung/Depth-Anything/tree/main/checkpoints) can provide relative depth estimation for any given image robustly. Please refer [here](#running) for details. + +- **Metric depth estimation** + + We fine-tune our Depth Anything model with metric depth information from NYUv2 or KITTI. It offers strong capabilities of both in-domain and zero-shot metric depth estimation. Please refer [here](./metric_depth) for details. + + +- **Better depth-conditioned ControlNet** + + We re-train **a better depth-conditioned ControlNet** based on Depth Anything. It offers more precise synthesis than the previous MiDaS-based ControlNet. Please refer [here](./controlnet/) for details. + +- **Downstream high-level scene understanding** + + The Depth Anything encoder can be fine-tuned to downstream high-level perception tasks, *e.g.*, semantic segmentation, 86.2 mIoU on Cityscapes and 59.4 mIoU on ADE20K. Please refer [here](./semseg/) for details. + + +## Performance + +Here we compare our Depth Anything with the previously best MiDaS v3.1 BEiTL-512 model. + +Please note that the latest MiDaS is also trained on KITTI and NYUv2, while we do not. + +| Method | Params | KITTI || NYUv2 || Sintel || DDAD || ETH3D || DIODE || +|-|-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| | | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | +| MiDaS | 345.0M | 0.127 | 0.850 | 0.048 | *0.980* | 0.587 | 0.699 | 0.251 | 0.766 | 0.139 | 0.867 | 0.075 | 0.942 | +| **Ours-S** | 24.8M | 0.080 | 0.936 | 0.053 | 0.972 | 0.464 | 0.739 | 0.247 | 0.768 | 0.127 | **0.885** | 0.076 | 0.939 | +| **Ours-B** | 97.5M | *0.080* | *0.939* | *0.046* | 0.979 | **0.432** | *0.756* | *0.232* | *0.786* | **0.126** | *0.884* | *0.069* | *0.946* | +| **Ours-L** | 335.3M | **0.076** | **0.947** | **0.043** | **0.981** | *0.458* | **0.760** | **0.230** | **0.789** | *0.127* | 0.882 | **0.066** | **0.952** | + +We highlight the **best** and *second best* results in **bold** and *italic* respectively (**better results**: AbsRel $\downarrow$ , $\delta_1 \uparrow$). + +## Pre-trained models + +We provide three models of varying scales for robust relatve depth estimation: + +- Depth-Anything-ViT-Small (24.8M) + +- Depth-Anything-ViT-Base (97.5M) + +- Depth-Anything-ViT-Large (335.3M) + +Download our pre-trained models [here](https://huggingface.co/spaces/LiheYoung/Depth-Anything/tree/main/checkpoints), and put them under the ``checkpoints`` directory. + +## Usage + +### Installation + +The setup is very simple. Just make ensure ``torch``, ``torchvision``, and ``cv2`` are supported in your environment. + +```bash +git clone https://github.com/LiheYoung/Depth-Anything +cd Depth-Anything +pip install -r requirements.txt +``` + +### Running + +```bash +python run.py --encoder --load-from --img-path --outdir --localhub +``` +For the ``img-path``, you can either 1) point it to an image directory storing all interested images, 2) point it to a single image, or 3) point it to a text file storing all image paths. + +For example: +```bash +python run.py --encoder vitl --load-from checkpoints/depth_anything_vitl14.pth --img-path demo_images --outdir depth_visualization --localhub +``` + + +### Gradio demo + +To use our gradio demo locally: + +```bash +python app.py +``` + +You can also try our [online demo](https://huggingface.co/spaces/LiheYoung/Depth-Anything). + +### Import Depth Anything to your project + +If you want to use Depth Anything in your own project, you can simply follow [``run.py``](run.py) to load our models and define data pre-processing. + +
+Code snippet (note the difference between our data pre-processing and that of MiDaS) + +```python +from depth_anything.dpt import DPT_DINOv2 +from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet + +import cv2 +import torch + +depth_anything = DPT_DINOv2(encoder='vitl', features=256, out_channels=[256, 512, 1024, 1024], localhub=True) +depth_anything.load_state_dict(torch.load('checkpoints/depth_anything_vitl14.pth')) + +transform = Compose([ + Resize( + width=518, + height=518, + resize_target=False, + keep_aspect_ratio=True, + ensure_multiple_of=14, + resize_method='lower_bound', + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), +]) + +image = cv2.cvtColor(cv2.imread('your image path'), cv2.COLOR_BGR2RGB) / 255.0 +image = transform({'image': image})['image'] +image = torch.from_numpy(image).unsqueeze(0) + +# depth shape: 1xHxW +depth = depth_anything(image) +``` +
+ + +## Citation + +If you find this project useful, please consider citing: + +```bibtex +@article{depthanything, + title={Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data}, + author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, + journal={arXiv:2401.10891}, + year={2024} +} +``` \ No newline at end of file diff --git a/depth-anything/depth_anything-2024.6.15.0-py2.py3-none-any.whl b/depth-anything/depth_anything-2024.6.15.0-py2.py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..b71377acc88bd3cf9925a0fb8c65ef42e9925ec8 Binary files /dev/null and b/depth-anything/depth_anything-2024.6.15.0-py2.py3-none-any.whl differ diff --git a/depth-anything/depth_anything-2024.6.15.0-py2.py3-none-any.whl.metadata b/depth-anything/depth_anything-2024.6.15.0-py2.py3-none-any.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..2f174f3a44ac302ab00e8a9b8813c8a3b5b7d5c4 --- /dev/null +++ b/depth-anything/depth_anything-2024.6.15.0-py2.py3-none-any.whl.metadata @@ -0,0 +1,288 @@ +Metadata-Version: 2.3 +Name: depth_anything +Version: 2024.6.15.0 +Project-URL: Documentation, https://github.com/LiheYoung/Depth-Anything +Project-URL: Issues, https://github.com/LiheYoung/Depth-Anything/issues +Project-URL: Source, https://github.com/LiheYoung/Depth-Anything +License-File: LICENSE +Requires-Dist: opencv-python +Requires-Dist: torch +Requires-Dist: torchvision +Description-Content-Type: text/markdown + +
+

Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data

+ +[**Lihe Yang**](https://liheyoung.github.io/)1 · [**Bingyi Kang**](https://scholar.google.com/citations?user=NmHgX-wAAAAJ)2† · [**Zilong Huang**](http://speedinghzl.github.io/)2 · [**Xiaogang Xu**](https://xiaogang00.github.io/)3,4 · [**Jiashi Feng**](https://sites.google.com/site/jshfeng/)2 · [**Hengshuang Zhao**](https://hszhao.github.io/)1* + +1HKU    2TikTok    3CUHK    4ZJU + +†project lead *corresponding author + +**CVPR 2024** + +Paper PDF +Project Page + + +
+ +This work presents Depth Anything, a highly practical solution for robust monocular depth estimation by training on a combination of 1.5M labeled images and **62M+ unlabeled images**. + +![teaser](assets/teaser.png) + +
+ Try our latest Depth Anything V2 models!
+ (Due to the issue with our V2 Github repositories, we temporarily upload the content to Huggingface space) +
+ +## News + +* **2024-06-14:** [Depth Anything V2](https://github.com/DepthAnything/Depth-Anything-V2) is released. +* **2024-02-27:** Depth Anything is accepted by CVPR 2024. +* **2024-02-05:** [Depth Anything Gallery](./gallery.md) is released. Thank all the users! +* **2024-02-02:** Depth Anything serves as the default depth processor for [InstantID](https://github.com/InstantID/InstantID) and [InvokeAI](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.6.1). +* **2024-01-25:** Support [video depth visualization](./run_video.py). An [online demo for video](https://huggingface.co/spaces/JohanDL/Depth-Anything-Video) is also available. +* **2024-01-23:** The new ControlNet based on Depth Anything is integrated into [ControlNet WebUI](https://github.com/Mikubill/sd-webui-controlnet) and [ComfyUI's ControlNet](https://github.com/Fannovel16/comfyui_controlnet_aux). +* **2024-01-23:** Depth Anything [ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX) and [TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt) versions are supported. +* **2024-01-22:** Paper, project page, code, models, and demo ([HuggingFace](https://huggingface.co/spaces/LiheYoung/Depth-Anything), [OpenXLab](https://openxlab.org.cn/apps/detail/yyfan/depth_anything)) are released. + + +## Features of Depth Anything + +***If you need other features, please first check [existing community supports](#community-support).*** + +- **Relative depth estimation**: + + Our foundation models listed [here](https://huggingface.co/spaces/LiheYoung/Depth-Anything/tree/main/checkpoints) can provide relative depth estimation for any given image robustly. Please refer [here](#running) for details. + +- **Metric depth estimation** + + We fine-tune our Depth Anything model with metric depth information from NYUv2 or KITTI. It offers strong capabilities of both in-domain and zero-shot metric depth estimation. Please refer [here](./metric_depth) for details. + + +- **Better depth-conditioned ControlNet** + + We re-train **a better depth-conditioned ControlNet** based on Depth Anything. It offers more precise synthesis than the previous MiDaS-based ControlNet. Please refer [here](./controlnet/) for details. You can also use our new ControlNet based on Depth Anything in [ControlNet WebUI](https://github.com/Mikubill/sd-webui-controlnet) or [ComfyUI's ControlNet](https://github.com/Fannovel16/comfyui_controlnet_aux). + +- **Downstream high-level scene understanding** + + The Depth Anything encoder can be fine-tuned to downstream high-level perception tasks, *e.g.*, semantic segmentation, 86.2 mIoU on Cityscapes and 59.4 mIoU on ADE20K. Please refer [here](./semseg/) for details. + + +## Performance + +Here we compare our Depth Anything with the previously best MiDaS v3.1 BEiTL-512 model. + +Please note that the latest MiDaS is also trained on KITTI and NYUv2, while we do not. + +| Method | Params | KITTI || NYUv2 || Sintel || DDAD || ETH3D || DIODE || +|-|-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| | | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | AbsRel | $\delta_1$ | +| MiDaS | 345.0M | 0.127 | 0.850 | 0.048 | *0.980* | 0.587 | 0.699 | 0.251 | 0.766 | 0.139 | 0.867 | 0.075 | 0.942 | +| **Ours-S** | 24.8M | 0.080 | 0.936 | 0.053 | 0.972 | 0.464 | 0.739 | 0.247 | 0.768 | 0.127 | **0.885** | 0.076 | 0.939 | +| **Ours-B** | 97.5M | *0.080* | *0.939* | *0.046* | 0.979 | **0.432** | *0.756* | *0.232* | *0.786* | **0.126** | *0.884* | *0.069* | *0.946* | +| **Ours-L** | 335.3M | **0.076** | **0.947** | **0.043** | **0.981** | *0.458* | **0.760** | **0.230** | **0.789** | *0.127* | 0.882 | **0.066** | **0.952** | + +We highlight the **best** and *second best* results in **bold** and *italic* respectively (**better results**: AbsRel $\downarrow$ , $\delta_1 \uparrow$). + +## Pre-trained models + +We provide three models of varying scales for robust relative depth estimation: + +| Model | Params | Inference Time on V100 (ms) | A100 | RTX4090 ([TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)) | +|:-|-:|:-:|:-:|:-:| +| Depth-Anything-Small | 24.8M | 12 | 8 | 3 | +| Depth-Anything-Base | 97.5M | 13 | 9 | 6 | +| Depth-Anything-Large | 335.3M | 20 | 13 | 12 | + +Note that the V100 and A100 inference time (*without TensorRT*) is computed by excluding the pre-processing and post-processing stages, whereas the last column RTX4090 (*with TensorRT*) is computed by including these two stages (please refer to [Depth-Anything-TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)). + +You can easily load our pre-trained models by: +```python +from depth_anything.dpt import DepthAnything + +encoder = 'vits' # can also be 'vitb' or 'vitl' +depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_{:}14'.format(encoder)) +``` + +Depth Anything is also supported in [``transformers``](https://github.com/huggingface/transformers). You can use it for depth prediction within [3 lines of code](https://huggingface.co/docs/transformers/main/model_doc/depth_anything) (credit to [@niels](https://huggingface.co/nielsr)). + +### *No network connection, cannot load these models?* + +
+Click here for solutions + +- First, manually download the three checkpoints: [depth-anything-large](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints/depth_anything_vitl14.pth), [depth-anything-base](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints/depth_anything_vitb14.pth), and [depth-anything-small](https://huggingface.co/spaces/LiheYoung/Depth-Anything/blob/main/checkpoints/depth_anything_vits14.pth). + +- Second, upload the folder containing the checkpoints to your remote server. + +- Lastly, load the model locally: +```python +from depth_anything.dpt import DepthAnything + +model_configs = { + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]} +} + +encoder = 'vitl' # or 'vitb', 'vits' +depth_anything = DepthAnything(model_configs[encoder]) +depth_anything.load_state_dict(torch.load(f'./checkpoints/depth_anything_{encoder}14.pth')) +``` +Note that in this locally loading manner, you also do not have to install the ``huggingface_hub`` package. In this way, please feel free to delete this [line](https://github.com/LiheYoung/Depth-Anything/blob/e7ef4b4b7a0afd8a05ce9564f04c1e5b68268516/depth_anything/dpt.py#L5) and the ``PyTorchModelHubMixin`` in this [line](https://github.com/LiheYoung/Depth-Anything/blob/e7ef4b4b7a0afd8a05ce9564f04c1e5b68268516/depth_anything/dpt.py#L169). +
+ + +## Usage + +### Installation + +```bash +git clone https://github.com/LiheYoung/Depth-Anything +cd Depth-Anything +pip install -r requirements.txt +``` + +### Running + +```bash +python run.py --encoder --img-path --outdir [--pred-only] [--grayscale] +``` +Arguments: +- ``--img-path``: you can either 1) point it to an image directory storing all interested images, 2) point it to a single image, or 3) point it to a text file storing all image paths. +- ``--pred-only`` is set to save the predicted depth map only. Without it, by default, we visualize both image and its depth map side by side. +- ``--grayscale`` is set to save the grayscale depth map. Without it, by default, we apply a color palette to the depth map. + +For example: +```bash +python run.py --encoder vitl --img-path assets/examples --outdir depth_vis +``` + +**If you want to use Depth Anything on videos:** +```bash +python run_video.py --encoder vitl --video-path assets/examples_video --outdir video_depth_vis +``` + +### Gradio demo + +To use our gradio demo locally: + +```bash +python app.py +``` + +You can also try our [online demo](https://huggingface.co/spaces/LiheYoung/Depth-Anything). + +### Import Depth Anything to your project + +If you want to use Depth Anything in your own project, you can simply follow [``run.py``](run.py) to load our models and define data pre-processing. + +
+Code snippet (note the difference between our data pre-processing and that of MiDaS) + +```python +from depth_anything.dpt import DepthAnything +from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet + +import cv2 +import torch +from torchvision.transforms import Compose + +encoder = 'vits' # can also be 'vitb' or 'vitl' +depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_{:}14'.format(encoder)).eval() + +transform = Compose([ + Resize( + width=518, + height=518, + resize_target=False, + keep_aspect_ratio=True, + ensure_multiple_of=14, + resize_method='lower_bound', + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), +]) + +image = cv2.cvtColor(cv2.imread('your image path'), cv2.COLOR_BGR2RGB) / 255.0 +image = transform({'image': image})['image'] +image = torch.from_numpy(image).unsqueeze(0) + +# depth shape: 1xHxW +depth = depth_anything(image) +``` +
+ +### Do not want to define image pre-processing or download model definition files? + +Easily use Depth Anything through [``transformers``](https://github.com/huggingface/transformers) within 3 lines of code! Please refer to [these instructions](https://huggingface.co/docs/transformers/main/model_doc/depth_anything) (credit to [@niels](https://huggingface.co/nielsr)). + +**Note:** If you encounter ``KeyError: 'depth_anything'``, please install the latest [``transformers``](https://github.com/huggingface/transformers) from source: +```bash +pip install git+https://github.com/huggingface/transformers.git +``` +
+Click here for a brief demo: + +```python +from transformers import pipeline +from PIL import Image + +image = Image.open('Your-image-path') +pipe = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf") +depth = pipe(image)["depth"] +``` +
+ +## Community Support + +**We sincerely appreciate all the extensions built on our Depth Anything from the community. Thank you a lot!** + +Here we list the extensions we have found: +- Depth Anything TensorRT: + - https://github.com/spacewalk01/depth-anything-tensorrt + - https://github.com/thinvy/DepthAnythingTensorrtDeploy + - https://github.com/daniel89710/trt-depth-anything +- Depth Anything ONNX: https://github.com/fabio-sim/Depth-Anything-ONNX +- Depth Anything in Transformers.js (3D visualization): https://huggingface.co/spaces/Xenova/depth-anything-web +- Depth Anything for video (online demo): https://huggingface.co/spaces/JohanDL/Depth-Anything-Video +- Depth Anything in ControlNet WebUI: https://github.com/Mikubill/sd-webui-controlnet +- Depth Anything in ComfyUI's ControlNet: https://github.com/Fannovel16/comfyui_controlnet_aux +- Depth Anything in X-AnyLabeling: https://github.com/CVHub520/X-AnyLabeling +- Depth Anything in OpenXLab: https://openxlab.org.cn/apps/detail/yyfan/depth_anything +- Depth Anything in OpenVINO: https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/280-depth-anything +- Depth Anything ROS: + - https://github.com/scepter914/DepthAnything-ROS + - https://github.com/polatztrk/depth_anything_ros +- Depth Anything Android: + - https://github.com/FeiGeChuanShu/ncnn-android-depth_anything + - https://github.com/shubham0204/Depth-Anything-Android +- Depth Anything in TouchDesigner: https://github.com/olegchomp/TDDepthAnything +- LearnOpenCV research article on Depth Anything: https://learnopencv.com/depth-anything +- Learn more about the DPT architecture we used: https://github.com/heyoeyo/muggled_dpt + + +If you have your amazing projects supporting or improving (*e.g.*, speed) Depth Anything, please feel free to drop an issue. We will add them here. + + +## Acknowledgement + +We would like to express our deepest gratitude to [AK(@_akhaliq)](https://twitter.com/_akhaliq) and the awesome HuggingFace team ([@niels](https://huggingface.co/nielsr), [@hysts](https://huggingface.co/hysts), and [@yuvraj](https://huggingface.co/ysharma)) for helping improve the online demo and build the HF models. + +Besides, we thank the [MagicEdit](https://magic-edit.github.io/) team for providing some video examples for video depth estimation, and [Tiancheng Shen](https://scholar.google.com/citations?user=iRY1YVoAAAAJ) for evaluating the depth maps with MagicEdit. + +## Citation + +If you find this project useful, please consider citing: + +```bibtex +@inproceedings{depthanything, + title={Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data}, + author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, + booktitle={CVPR}, + year={2024} +} +``` diff --git a/depth-anything/index.html b/depth-anything/index.html new file mode 100644 index 0000000000000000000000000000000000000000..b1a4495b6c614d1df27f4ad3edf551f98c56831b --- /dev/null +++ b/depth-anything/index.html @@ -0,0 +1,24 @@ + + + + + + + + Links for depth-anything + + + +

+ Links for depth-anything +

+ + depth_anything-2024.6.15.0-py2.py3-none-any.whl + +
+ + depth_anything-2024.1.22.0-py2.py3-none-any.whl + +
+ + diff --git a/dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl b/dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..f5164a734dcc6bbb8bfdf9573e38f3baf04bc4aa --- /dev/null +++ b/dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2181b6724669fb0147c9ebc326d72b94de41bced1812eff5df4b133b5d0b575 +size 2960298 diff --git a/dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl.metadata b/dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..a5d324ec40d0fb5575eee4dc9da1a54888eeed3a --- /dev/null +++ b/dlib/dlib-19.22.99-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: dlib +Version: 19.22.99 +Summary: A toolkit for making real world machine learning and data analysis applications +Home-page: https://github.com/davisking/dlib +Author: Davis King +Author-email: davis@dlib.net +License: Boost Software License +Keywords: dlib,Computer Vision,Machine Learning +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Image Recognition +Classifier: Topic :: Software Development +License-File: LICENSE.txt + +See http://dlib.net for documentation. + diff --git a/dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl b/dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..d73e8e175bdcb22ddfd13d3aae6df5be755ebf79 --- /dev/null +++ b/dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf28f39a48d812870193e3bf2c44cacc92a7d2b94a17a7a1eb8d0ef3f3d02988 +size 2930358 diff --git a/dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl.metadata b/dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..a5d324ec40d0fb5575eee4dc9da1a54888eeed3a --- /dev/null +++ b/dlib/dlib-19.22.99-cp37-cp37m-win_amd64.whl.metadata @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: dlib +Version: 19.22.99 +Summary: A toolkit for making real world machine learning and data analysis applications +Home-page: https://github.com/davisking/dlib +Author: Davis King +Author-email: davis@dlib.net +License: Boost Software License +Keywords: dlib,Computer Vision,Machine Learning +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Image Recognition +Classifier: Topic :: Software Development +License-File: LICENSE.txt + +See http://dlib.net for documentation. + diff --git a/dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl b/dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..5456b03a5dd895c2495658f715a8f4901b1e13c4 --- /dev/null +++ b/dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b02321413b8765159ae552967ab9781a981324b2a389924bdb8c4ccab227f160 +size 2959754 diff --git a/dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl.metadata b/dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..8a610eec7e3799c010e96e4669eb8ddb9b77737f --- /dev/null +++ b/dlib/dlib-19.22.99-cp38-cp38-win_amd64.whl.metadata @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: dlib +Version: 19.22.99 +Summary: A toolkit for making real world machine learning and data analysis applications +Home-page: https://github.com/davisking/dlib +Author: Davis King +Author-email: davis@dlib.net +License: Boost Software License +Keywords: dlib,Computer Vision,Machine Learning +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Image Recognition +Classifier: Topic :: Software Development + +See http://dlib.net for documentation. + + diff --git a/dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl b/dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..d1132b2ac3df82254faaf0ededcfab01006ae0b0 --- /dev/null +++ b/dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fd828405c77af2df1ff6a09964a8ba7f14838c538d9017046804f353e9b4bc2 +size 2960600 diff --git a/dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl.metadata b/dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..8a610eec7e3799c010e96e4669eb8ddb9b77737f --- /dev/null +++ b/dlib/dlib-19.22.99-cp39-cp39-win_amd64.whl.metadata @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: dlib +Version: 19.22.99 +Summary: A toolkit for making real world machine learning and data analysis applications +Home-page: https://github.com/davisking/dlib +Author: Davis King +Author-email: davis@dlib.net +License: Boost Software License +Keywords: dlib,Computer Vision,Machine Learning +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Image Recognition +Classifier: Topic :: Software Development + +See http://dlib.net for documentation. + + diff --git a/dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl b/dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..4ed3e9ea839800f29b019642b931fbab999b1054 --- /dev/null +++ b/dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f1a5ee167975d7952b28e0ce4495f1d9a77644761cf5720fb66d7c6188ae496 +size 2825619 diff --git a/dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl.metadata b/dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..d11ee1cf7934631f72184ec44b5c5693f5102837 --- /dev/null +++ b/dlib/dlib-19.24.1-cp311-cp311-win_amd64.whl.metadata @@ -0,0 +1,32 @@ +Metadata-Version: 2.1 +Name: dlib +Version: 19.24.1 +Summary: A toolkit for making real world machine learning and data analysis applications +Home-page: https://github.com/davisking/dlib +Author: Davis King +Author-email: davis@dlib.net +License: Boost Software License +Keywords: dlib,Computer Vision,Machine Learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Image Recognition +Classifier: Topic :: Software Development + +See http://dlib.net for documentation. diff --git a/dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl b/dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..5851adc0bc6223bd5f18848bac71e106d39e71bb --- /dev/null +++ b/dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20c62e606ca4c9961305f7be3d03990380d3e6c17f8d27798996e97a73271862 +size 2869640 diff --git a/dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl.metadata b/dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..41829c4908767958623fdd1eca96b3e8436e8c36 --- /dev/null +++ b/dlib/dlib-19.24.99-cp312-cp312-win_amd64.whl.metadata @@ -0,0 +1,26 @@ +Metadata-Version: 2.1 +Name: dlib +Version: 19.24.99 +Summary: A toolkit for making real world machine learning and data analysis applications +Home-page: https://github.com/davisking/dlib +Author: Davis King +Author-email: davis@dlib.net +License: Boost Software License +Keywords: dlib,Computer Vision,Machine Learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Image Recognition +Classifier: Topic :: Software Development +License-File: LICENSE.txt + +See http://dlib.net for documentation. diff --git a/dlib/index.html b/dlib/index.html new file mode 100644 index 0000000000000000000000000000000000000000..3d3ac83ce98b6fca816650f666794a52a4d52c26 --- /dev/null +++ b/dlib/index.html @@ -0,0 +1,40 @@ + + + + + + + + Links for dlib + + + +

+ Links for dlib +

+ + dlib-19.24.99-cp312-cp312-win_amd64.whl + +
+ + dlib-19.24.1-cp311-cp311-win_amd64.whl + +
+ + dlib-19.22.99-cp310-cp310-win_amd64.whl + +
+ + dlib-19.22.99-cp39-cp39-win_amd64.whl + +
+ + dlib-19.22.99-cp38-cp38-win_amd64.whl + +
+ + dlib-19.22.99-cp37-cp37m-win_amd64.whl + +
+ + diff --git a/handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl b/handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..d3a0e62d6e45c6a3632bbec92634485149efb789 --- /dev/null +++ b/handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9425f4d59d149727f5b2467a8e0a7369caa1183cc5dbd855845d99462650cf4 +size 13084072 diff --git a/handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl.metadata b/handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..7cee80284f75b8a03e8cb0fd5782df88f1721dbf --- /dev/null +++ b/handrefinerportable/handrefinerportable-2024.1.18.0-py2.py3-none-any.whl.metadata @@ -0,0 +1,16 @@ +Metadata-Version: 2.1 +Name: handrefinerportable +Version: 2024.1.18.0 +Project-URL: Documentation, https://github.com/huchenlei/HandRefinerPortable +Project-URL: Issues, https://github.com/huchenlei/HandRefinerPortable/issues +Project-URL: Source, https://github.com/huchenlei/HandRefinerPortable +Requires-Dist: mediapipe +Requires-Dist: rtree +Requires-Dist: trimesh[easy] +Description-Content-Type: text/markdown + +# HandRefinerPortable + +This is a convenience package used by +[sd-webui-controlnet](https://github.com/Mikubill/sd-webui-controlnet) +to package the dependencies and model used by the hand refiner preprocessor. diff --git a/handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl b/handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..7b3272a6a08c5e34f23836ab48e89f210a2a4020 --- /dev/null +++ b/handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e6c702905919f4c49bcb2db7b20d334e8458a7555cd57630600584ec38ca6a9 +size 13084081 diff --git a/handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl.metadata b/handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..66460ae90c1dd38f15d6568eb128b3af39341246 --- /dev/null +++ b/handrefinerportable/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl.metadata @@ -0,0 +1,16 @@ +Metadata-Version: 2.1 +Name: handrefinerportable +Version: 2024.2.12.0 +Project-URL: Documentation, https://github.com/huchenlei/HandRefinerPortable +Project-URL: Issues, https://github.com/huchenlei/HandRefinerPortable/issues +Project-URL: Source, https://github.com/huchenlei/HandRefinerPortable +Requires-Dist: mediapipe +Requires-Dist: rtree +Requires-Dist: trimesh[easy] +Description-Content-Type: text/markdown + +# HandRefinerPortable + +This is a convenience package used by +[sd-webui-controlnet](https://github.com/Mikubill/sd-webui-controlnet) +to package the dependencies and model used by the hand refiner preprocessor. diff --git a/handrefinerportable/index.html b/handrefinerportable/index.html new file mode 100644 index 0000000000000000000000000000000000000000..9ddee1feda93b9dfb2bc2ebd1a977bfa17621ba9 --- /dev/null +++ b/handrefinerportable/index.html @@ -0,0 +1,24 @@ + + + + + + + + Links for handrefinerportable + + + +

+ Links for handrefinerportable +

+ + handrefinerportable-2024.2.12.0-py2.py3-none-any.whl + +
+ + handrefinerportable-2024.1.18.0-py2.py3-none-any.whl + +
+ + diff --git a/index.html b/index.html new file mode 100644 index 0000000000000000000000000000000000000000..deb6d6e0e91fe1c94cba1f5d98802ab1ea85214d --- /dev/null +++ b/index.html @@ -0,0 +1,49 @@ + + + + + + + + Simple Package Repository + + + + + depth_anything + +
+ + dlib + +
+ + handrefinerportable + +
+ + insightface + +
+ + intel-extension-for-pytorch + +
+ + torch + +
+ + torchaudio + +
+ + torchvision + +
+ + xformers + +
+ + diff --git a/insightface/index.html b/insightface/index.html new file mode 100644 index 0000000000000000000000000000000000000000..1c24c1ee508b8b808390fae5c23fd9131e715d4f --- /dev/null +++ b/insightface/index.html @@ -0,0 +1,32 @@ + + + + + + + + Links for insightface + + + +

+ Links for insightface +

+ + insightface-0.7.3-cp312-cp312-win_amd64.whl + +
+ + insightface-0.7.3-cp311-cp311-win_amd64.whl + +
+ + insightface-0.7.3-cp310-cp310-win_amd64.whl + +
+ + insightface-0.7.3-cp39-cp39-win_amd64.whl + +
+ + diff --git a/insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl b/insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..4f71ef2ff2ea1ce366b3e3083b510b099882e0b1 Binary files /dev/null and b/insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl differ diff --git a/insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl.metadata b/insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..df7d99d143c6b02ec1639c1258131f82b1430748 --- /dev/null +++ b/insightface/insightface-0.7.3-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,176 @@ +Metadata-Version: 2.1 +Name: insightface +Version: 0.7.3 +Summary: InsightFace Python Library +Home-page: https://github.com/deepinsight/insightface +Author: InsightFace Contributors +Author-email: contact@insightface.ai +License: MIT +Description-Content-Type: text/markdown +Requires-Dist: numpy +Requires-Dist: onnx +Requires-Dist: tqdm +Requires-Dist: requests +Requires-Dist: matplotlib +Requires-Dist: Pillow +Requires-Dist: scipy +Requires-Dist: scikit-learn +Requires-Dist: scikit-image +Requires-Dist: easydict +Requires-Dist: cython +Requires-Dist: albumentations +Requires-Dist: prettytable + +# InsightFace Python Library + +## License + +The code of InsightFace Python Library is released under the MIT License. There is no limitation for both academic and commercial usage. + +**The pretrained models we provided with this library are available for non-commercial research purposes only, including both auto-downloading models and manual-downloading models.** + +## Install + +### Install Inference Backend + +For ``insightface<=0.1.5``, we use MXNet as inference backend. + +Starting from insightface>=0.2, we use onnxruntime as inference backend. + +You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference. + +## Change Log + +### [0.7.1] - 2022-12-14 + +#### Changed + +- Change model downloading provider to cloudfront. + +### [0.7] - 2022-11-28 + +#### Added + +- Add face swapping model and example. + +#### Changed + +- Set default ORT provider to CUDA and CPU. + +### [0.6] - 2022-01-29 + +#### Added + +- Add pose estimation in face-analysis app. + +#### Changed + +- Change model automated downloading url, to ucloud. + + +## Quick Example + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +app.prepare(ctx_id=0, det_size=(640, 640)) +img = ins_get_image('t1') +faces = app.get(img) +rimg = app.draw_on(img, faces) +cv2.imwrite("./t1_output.jpg", rimg) +``` + +This quick example will detect faces from the ``t1.jpg`` image and draw detection results on it. + + + +## Model Zoo + +In the latest version of insightface library, we provide following model packs: + +Name in **bold** is the default model pack. **Auto** means we can download the model pack through the python library directly. + +Once you manually downloaded the zip model pack, unzip it under `~/.insightface/models/` first before you call the program. + +| Name | Detection Model | Recognition Model | Alignment | Attributes | Model-Size | Link | Auto | +| ------------- | --------------- | -------------------- | ------------ | ---------- | ---------- | ------------------------------------------------------------ | ------------- | +| antelopev2 | SCRFD-10GF | ResNet100@Glint360K | 2d106 & 3d68 | Gender&Age | 407MB | [link](https://drive.google.com/file/d/18wEUfMNohBJ4K3Ly5wpTejPfDzp-8fI8/view?usp=sharing) | N | +| **buffalo_l** | SCRFD-10GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 326MB | [link](https://drive.google.com/file/d/1qXsQJ8ZT42_xSmWIYy85IcidpiZudOCB/view?usp=sharing) | Y | +| buffalo_m | SCRFD-2.5GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 313MB | [link](https://drive.google.com/file/d/1net68yNxF33NNV6WP7k56FS6V53tq-64/view?usp=sharing) | N | +| buffalo_s | SCRFD-500MF | MBF@WebFace600K | 2d106 & 3d68 | Gender&Age | 159MB | [link](https://drive.google.com/file/d/1pKIusApEfoHKDjeBTXYB3yOQ0EtTonNE/view?usp=sharing) | N | +| buffalo_sc | SCRFD-500MF | MBF@WebFace600K | - | - | 16MB | [link](https://drive.google.com/file/d/19I-MZdctYKmVf3nu5Da3HS6KH5LBfdzG/view?usp=sharing) | N | + + + +Recognition Accuracy: + +| Name | MR-ALL | African | Caucasian | South Asian | East Asian | LFW | CFP-FP | AgeDB-30 | IJB-C(E4) | +| :-------- | ------ | ------- | --------- | ----------- | ---------- | ----- | ------ | -------- | --------- | +| buffalo_l | 91.25 | 90.29 | 94.70 | 93.16 | 74.96 | 99.83 | 99.33 | 98.23 | 97.25 | +| buffalo_s | 71.87 | 69.45 | 80.45 | 73.39 | 51.03 | 99.70 | 98.00 | 96.58 | 95.02 | + +*buffalo_m has the same accuracy with buffalo_l.* + +*buffalo_sc has the same accuracy with buffalo_s.* + + + +**Note that these models are available for non-commercial research purposes only.** + + + +For insightface>=0.3.3, models will be downloaded automatically once we init ``app = FaceAnalysis()`` instance. + +For insightface==0.3.2, you must first download the model package by command: + +``` +insightface-cli model.download buffalo_l +``` + +## Use Your Own Licensed Model + +You can simply create a new model directory under ``~/.insightface/models/`` and replace the pretrained models we provide with your own models. And then call ``app = FaceAnalysis(name='your_model_zoo')`` to load these models. + +## Call Models + +The latest insightface libary only supports onnx models. Once you have trained detection or recognition models by PyTorch, MXNet or any other frameworks, you can convert it to the onnx format and then they can be called with insightface library. + +### Call Detection Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +# Method-1, use FaceAnalysis +app = FaceAnalysis(allowed_modules=['detection']) # enable detection model only +app.prepare(ctx_id=0, det_size=(640, 640)) + +# Method-2, load model directly +detector = insightface.model_zoo.get_model('your_detection_model.onnx') +detector.prepare(ctx_id=0, input_size=(640, 640)) + +``` + +### Call Recognition Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +handler = insightface.model_zoo.get_model('your_recognition_model.onnx') +handler.prepare(ctx_id=0) + +``` + + diff --git a/insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl b/insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..a07f655bd4b8d931ec656e51aab64787e2fcd3e9 Binary files /dev/null and b/insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl differ diff --git a/insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl.metadata b/insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..df7d99d143c6b02ec1639c1258131f82b1430748 --- /dev/null +++ b/insightface/insightface-0.7.3-cp311-cp311-win_amd64.whl.metadata @@ -0,0 +1,176 @@ +Metadata-Version: 2.1 +Name: insightface +Version: 0.7.3 +Summary: InsightFace Python Library +Home-page: https://github.com/deepinsight/insightface +Author: InsightFace Contributors +Author-email: contact@insightface.ai +License: MIT +Description-Content-Type: text/markdown +Requires-Dist: numpy +Requires-Dist: onnx +Requires-Dist: tqdm +Requires-Dist: requests +Requires-Dist: matplotlib +Requires-Dist: Pillow +Requires-Dist: scipy +Requires-Dist: scikit-learn +Requires-Dist: scikit-image +Requires-Dist: easydict +Requires-Dist: cython +Requires-Dist: albumentations +Requires-Dist: prettytable + +# InsightFace Python Library + +## License + +The code of InsightFace Python Library is released under the MIT License. There is no limitation for both academic and commercial usage. + +**The pretrained models we provided with this library are available for non-commercial research purposes only, including both auto-downloading models and manual-downloading models.** + +## Install + +### Install Inference Backend + +For ``insightface<=0.1.5``, we use MXNet as inference backend. + +Starting from insightface>=0.2, we use onnxruntime as inference backend. + +You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference. + +## Change Log + +### [0.7.1] - 2022-12-14 + +#### Changed + +- Change model downloading provider to cloudfront. + +### [0.7] - 2022-11-28 + +#### Added + +- Add face swapping model and example. + +#### Changed + +- Set default ORT provider to CUDA and CPU. + +### [0.6] - 2022-01-29 + +#### Added + +- Add pose estimation in face-analysis app. + +#### Changed + +- Change model automated downloading url, to ucloud. + + +## Quick Example + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +app.prepare(ctx_id=0, det_size=(640, 640)) +img = ins_get_image('t1') +faces = app.get(img) +rimg = app.draw_on(img, faces) +cv2.imwrite("./t1_output.jpg", rimg) +``` + +This quick example will detect faces from the ``t1.jpg`` image and draw detection results on it. + + + +## Model Zoo + +In the latest version of insightface library, we provide following model packs: + +Name in **bold** is the default model pack. **Auto** means we can download the model pack through the python library directly. + +Once you manually downloaded the zip model pack, unzip it under `~/.insightface/models/` first before you call the program. + +| Name | Detection Model | Recognition Model | Alignment | Attributes | Model-Size | Link | Auto | +| ------------- | --------------- | -------------------- | ------------ | ---------- | ---------- | ------------------------------------------------------------ | ------------- | +| antelopev2 | SCRFD-10GF | ResNet100@Glint360K | 2d106 & 3d68 | Gender&Age | 407MB | [link](https://drive.google.com/file/d/18wEUfMNohBJ4K3Ly5wpTejPfDzp-8fI8/view?usp=sharing) | N | +| **buffalo_l** | SCRFD-10GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 326MB | [link](https://drive.google.com/file/d/1qXsQJ8ZT42_xSmWIYy85IcidpiZudOCB/view?usp=sharing) | Y | +| buffalo_m | SCRFD-2.5GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 313MB | [link](https://drive.google.com/file/d/1net68yNxF33NNV6WP7k56FS6V53tq-64/view?usp=sharing) | N | +| buffalo_s | SCRFD-500MF | MBF@WebFace600K | 2d106 & 3d68 | Gender&Age | 159MB | [link](https://drive.google.com/file/d/1pKIusApEfoHKDjeBTXYB3yOQ0EtTonNE/view?usp=sharing) | N | +| buffalo_sc | SCRFD-500MF | MBF@WebFace600K | - | - | 16MB | [link](https://drive.google.com/file/d/19I-MZdctYKmVf3nu5Da3HS6KH5LBfdzG/view?usp=sharing) | N | + + + +Recognition Accuracy: + +| Name | MR-ALL | African | Caucasian | South Asian | East Asian | LFW | CFP-FP | AgeDB-30 | IJB-C(E4) | +| :-------- | ------ | ------- | --------- | ----------- | ---------- | ----- | ------ | -------- | --------- | +| buffalo_l | 91.25 | 90.29 | 94.70 | 93.16 | 74.96 | 99.83 | 99.33 | 98.23 | 97.25 | +| buffalo_s | 71.87 | 69.45 | 80.45 | 73.39 | 51.03 | 99.70 | 98.00 | 96.58 | 95.02 | + +*buffalo_m has the same accuracy with buffalo_l.* + +*buffalo_sc has the same accuracy with buffalo_s.* + + + +**Note that these models are available for non-commercial research purposes only.** + + + +For insightface>=0.3.3, models will be downloaded automatically once we init ``app = FaceAnalysis()`` instance. + +For insightface==0.3.2, you must first download the model package by command: + +``` +insightface-cli model.download buffalo_l +``` + +## Use Your Own Licensed Model + +You can simply create a new model directory under ``~/.insightface/models/`` and replace the pretrained models we provide with your own models. And then call ``app = FaceAnalysis(name='your_model_zoo')`` to load these models. + +## Call Models + +The latest insightface libary only supports onnx models. Once you have trained detection or recognition models by PyTorch, MXNet or any other frameworks, you can convert it to the onnx format and then they can be called with insightface library. + +### Call Detection Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +# Method-1, use FaceAnalysis +app = FaceAnalysis(allowed_modules=['detection']) # enable detection model only +app.prepare(ctx_id=0, det_size=(640, 640)) + +# Method-2, load model directly +detector = insightface.model_zoo.get_model('your_detection_model.onnx') +detector.prepare(ctx_id=0, input_size=(640, 640)) + +``` + +### Call Recognition Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +handler = insightface.model_zoo.get_model('your_recognition_model.onnx') +handler.prepare(ctx_id=0) + +``` + + diff --git a/insightface/insightface-0.7.3-cp312-cp312-win_amd64.whl b/insightface/insightface-0.7.3-cp312-cp312-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..a3a36453755b201e8d2cdf0fcca0e4552b6b7fc5 Binary files /dev/null and b/insightface/insightface-0.7.3-cp312-cp312-win_amd64.whl differ diff --git a/insightface/insightface-0.7.3-cp312-cp312-win_amd64.whl.metadata b/insightface/insightface-0.7.3-cp312-cp312-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..df7d99d143c6b02ec1639c1258131f82b1430748 --- /dev/null +++ b/insightface/insightface-0.7.3-cp312-cp312-win_amd64.whl.metadata @@ -0,0 +1,176 @@ +Metadata-Version: 2.1 +Name: insightface +Version: 0.7.3 +Summary: InsightFace Python Library +Home-page: https://github.com/deepinsight/insightface +Author: InsightFace Contributors +Author-email: contact@insightface.ai +License: MIT +Description-Content-Type: text/markdown +Requires-Dist: numpy +Requires-Dist: onnx +Requires-Dist: tqdm +Requires-Dist: requests +Requires-Dist: matplotlib +Requires-Dist: Pillow +Requires-Dist: scipy +Requires-Dist: scikit-learn +Requires-Dist: scikit-image +Requires-Dist: easydict +Requires-Dist: cython +Requires-Dist: albumentations +Requires-Dist: prettytable + +# InsightFace Python Library + +## License + +The code of InsightFace Python Library is released under the MIT License. There is no limitation for both academic and commercial usage. + +**The pretrained models we provided with this library are available for non-commercial research purposes only, including both auto-downloading models and manual-downloading models.** + +## Install + +### Install Inference Backend + +For ``insightface<=0.1.5``, we use MXNet as inference backend. + +Starting from insightface>=0.2, we use onnxruntime as inference backend. + +You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference. + +## Change Log + +### [0.7.1] - 2022-12-14 + +#### Changed + +- Change model downloading provider to cloudfront. + +### [0.7] - 2022-11-28 + +#### Added + +- Add face swapping model and example. + +#### Changed + +- Set default ORT provider to CUDA and CPU. + +### [0.6] - 2022-01-29 + +#### Added + +- Add pose estimation in face-analysis app. + +#### Changed + +- Change model automated downloading url, to ucloud. + + +## Quick Example + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +app.prepare(ctx_id=0, det_size=(640, 640)) +img = ins_get_image('t1') +faces = app.get(img) +rimg = app.draw_on(img, faces) +cv2.imwrite("./t1_output.jpg", rimg) +``` + +This quick example will detect faces from the ``t1.jpg`` image and draw detection results on it. + + + +## Model Zoo + +In the latest version of insightface library, we provide following model packs: + +Name in **bold** is the default model pack. **Auto** means we can download the model pack through the python library directly. + +Once you manually downloaded the zip model pack, unzip it under `~/.insightface/models/` first before you call the program. + +| Name | Detection Model | Recognition Model | Alignment | Attributes | Model-Size | Link | Auto | +| ------------- | --------------- | -------------------- | ------------ | ---------- | ---------- | ------------------------------------------------------------ | ------------- | +| antelopev2 | SCRFD-10GF | ResNet100@Glint360K | 2d106 & 3d68 | Gender&Age | 407MB | [link](https://drive.google.com/file/d/18wEUfMNohBJ4K3Ly5wpTejPfDzp-8fI8/view?usp=sharing) | N | +| **buffalo_l** | SCRFD-10GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 326MB | [link](https://drive.google.com/file/d/1qXsQJ8ZT42_xSmWIYy85IcidpiZudOCB/view?usp=sharing) | Y | +| buffalo_m | SCRFD-2.5GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 313MB | [link](https://drive.google.com/file/d/1net68yNxF33NNV6WP7k56FS6V53tq-64/view?usp=sharing) | N | +| buffalo_s | SCRFD-500MF | MBF@WebFace600K | 2d106 & 3d68 | Gender&Age | 159MB | [link](https://drive.google.com/file/d/1pKIusApEfoHKDjeBTXYB3yOQ0EtTonNE/view?usp=sharing) | N | +| buffalo_sc | SCRFD-500MF | MBF@WebFace600K | - | - | 16MB | [link](https://drive.google.com/file/d/19I-MZdctYKmVf3nu5Da3HS6KH5LBfdzG/view?usp=sharing) | N | + + + +Recognition Accuracy: + +| Name | MR-ALL | African | Caucasian | South Asian | East Asian | LFW | CFP-FP | AgeDB-30 | IJB-C(E4) | +| :-------- | ------ | ------- | --------- | ----------- | ---------- | ----- | ------ | -------- | --------- | +| buffalo_l | 91.25 | 90.29 | 94.70 | 93.16 | 74.96 | 99.83 | 99.33 | 98.23 | 97.25 | +| buffalo_s | 71.87 | 69.45 | 80.45 | 73.39 | 51.03 | 99.70 | 98.00 | 96.58 | 95.02 | + +*buffalo_m has the same accuracy with buffalo_l.* + +*buffalo_sc has the same accuracy with buffalo_s.* + + + +**Note that these models are available for non-commercial research purposes only.** + + + +For insightface>=0.3.3, models will be downloaded automatically once we init ``app = FaceAnalysis()`` instance. + +For insightface==0.3.2, you must first download the model package by command: + +``` +insightface-cli model.download buffalo_l +``` + +## Use Your Own Licensed Model + +You can simply create a new model directory under ``~/.insightface/models/`` and replace the pretrained models we provide with your own models. And then call ``app = FaceAnalysis(name='your_model_zoo')`` to load these models. + +## Call Models + +The latest insightface libary only supports onnx models. Once you have trained detection or recognition models by PyTorch, MXNet or any other frameworks, you can convert it to the onnx format and then they can be called with insightface library. + +### Call Detection Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +# Method-1, use FaceAnalysis +app = FaceAnalysis(allowed_modules=['detection']) # enable detection model only +app.prepare(ctx_id=0, det_size=(640, 640)) + +# Method-2, load model directly +detector = insightface.model_zoo.get_model('your_detection_model.onnx') +detector.prepare(ctx_id=0, input_size=(640, 640)) + +``` + +### Call Recognition Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +handler = insightface.model_zoo.get_model('your_recognition_model.onnx') +handler.prepare(ctx_id=0) + +``` + + diff --git a/insightface/insightface-0.7.3-cp39-cp39-win_amd64.whl b/insightface/insightface-0.7.3-cp39-cp39-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..9e088f6698de93683f112f884b7697bb1e464df6 Binary files /dev/null and b/insightface/insightface-0.7.3-cp39-cp39-win_amd64.whl differ diff --git a/insightface/insightface-0.7.3-cp39-cp39-win_amd64.whl.metadata b/insightface/insightface-0.7.3-cp39-cp39-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..df7d99d143c6b02ec1639c1258131f82b1430748 --- /dev/null +++ b/insightface/insightface-0.7.3-cp39-cp39-win_amd64.whl.metadata @@ -0,0 +1,176 @@ +Metadata-Version: 2.1 +Name: insightface +Version: 0.7.3 +Summary: InsightFace Python Library +Home-page: https://github.com/deepinsight/insightface +Author: InsightFace Contributors +Author-email: contact@insightface.ai +License: MIT +Description-Content-Type: text/markdown +Requires-Dist: numpy +Requires-Dist: onnx +Requires-Dist: tqdm +Requires-Dist: requests +Requires-Dist: matplotlib +Requires-Dist: Pillow +Requires-Dist: scipy +Requires-Dist: scikit-learn +Requires-Dist: scikit-image +Requires-Dist: easydict +Requires-Dist: cython +Requires-Dist: albumentations +Requires-Dist: prettytable + +# InsightFace Python Library + +## License + +The code of InsightFace Python Library is released under the MIT License. There is no limitation for both academic and commercial usage. + +**The pretrained models we provided with this library are available for non-commercial research purposes only, including both auto-downloading models and manual-downloading models.** + +## Install + +### Install Inference Backend + +For ``insightface<=0.1.5``, we use MXNet as inference backend. + +Starting from insightface>=0.2, we use onnxruntime as inference backend. + +You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference. + +## Change Log + +### [0.7.1] - 2022-12-14 + +#### Changed + +- Change model downloading provider to cloudfront. + +### [0.7] - 2022-11-28 + +#### Added + +- Add face swapping model and example. + +#### Changed + +- Set default ORT provider to CUDA and CPU. + +### [0.6] - 2022-01-29 + +#### Added + +- Add pose estimation in face-analysis app. + +#### Changed + +- Change model automated downloading url, to ucloud. + + +## Quick Example + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +app.prepare(ctx_id=0, det_size=(640, 640)) +img = ins_get_image('t1') +faces = app.get(img) +rimg = app.draw_on(img, faces) +cv2.imwrite("./t1_output.jpg", rimg) +``` + +This quick example will detect faces from the ``t1.jpg`` image and draw detection results on it. + + + +## Model Zoo + +In the latest version of insightface library, we provide following model packs: + +Name in **bold** is the default model pack. **Auto** means we can download the model pack through the python library directly. + +Once you manually downloaded the zip model pack, unzip it under `~/.insightface/models/` first before you call the program. + +| Name | Detection Model | Recognition Model | Alignment | Attributes | Model-Size | Link | Auto | +| ------------- | --------------- | -------------------- | ------------ | ---------- | ---------- | ------------------------------------------------------------ | ------------- | +| antelopev2 | SCRFD-10GF | ResNet100@Glint360K | 2d106 & 3d68 | Gender&Age | 407MB | [link](https://drive.google.com/file/d/18wEUfMNohBJ4K3Ly5wpTejPfDzp-8fI8/view?usp=sharing) | N | +| **buffalo_l** | SCRFD-10GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 326MB | [link](https://drive.google.com/file/d/1qXsQJ8ZT42_xSmWIYy85IcidpiZudOCB/view?usp=sharing) | Y | +| buffalo_m | SCRFD-2.5GF | ResNet50@WebFace600K | 2d106 & 3d68 | Gender&Age | 313MB | [link](https://drive.google.com/file/d/1net68yNxF33NNV6WP7k56FS6V53tq-64/view?usp=sharing) | N | +| buffalo_s | SCRFD-500MF | MBF@WebFace600K | 2d106 & 3d68 | Gender&Age | 159MB | [link](https://drive.google.com/file/d/1pKIusApEfoHKDjeBTXYB3yOQ0EtTonNE/view?usp=sharing) | N | +| buffalo_sc | SCRFD-500MF | MBF@WebFace600K | - | - | 16MB | [link](https://drive.google.com/file/d/19I-MZdctYKmVf3nu5Da3HS6KH5LBfdzG/view?usp=sharing) | N | + + + +Recognition Accuracy: + +| Name | MR-ALL | African | Caucasian | South Asian | East Asian | LFW | CFP-FP | AgeDB-30 | IJB-C(E4) | +| :-------- | ------ | ------- | --------- | ----------- | ---------- | ----- | ------ | -------- | --------- | +| buffalo_l | 91.25 | 90.29 | 94.70 | 93.16 | 74.96 | 99.83 | 99.33 | 98.23 | 97.25 | +| buffalo_s | 71.87 | 69.45 | 80.45 | 73.39 | 51.03 | 99.70 | 98.00 | 96.58 | 95.02 | + +*buffalo_m has the same accuracy with buffalo_l.* + +*buffalo_sc has the same accuracy with buffalo_s.* + + + +**Note that these models are available for non-commercial research purposes only.** + + + +For insightface>=0.3.3, models will be downloaded automatically once we init ``app = FaceAnalysis()`` instance. + +For insightface==0.3.2, you must first download the model package by command: + +``` +insightface-cli model.download buffalo_l +``` + +## Use Your Own Licensed Model + +You can simply create a new model directory under ``~/.insightface/models/`` and replace the pretrained models we provide with your own models. And then call ``app = FaceAnalysis(name='your_model_zoo')`` to load these models. + +## Call Models + +The latest insightface libary only supports onnx models. Once you have trained detection or recognition models by PyTorch, MXNet or any other frameworks, you can convert it to the onnx format and then they can be called with insightface library. + +### Call Detection Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +# Method-1, use FaceAnalysis +app = FaceAnalysis(allowed_modules=['detection']) # enable detection model only +app.prepare(ctx_id=0, det_size=(640, 640)) + +# Method-2, load model directly +detector = insightface.model_zoo.get_model('your_detection_model.onnx') +detector.prepare(ctx_id=0, input_size=(640, 640)) + +``` + +### Call Recognition Models + +``` +import cv2 +import numpy as np +import insightface +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image + +handler = insightface.model_zoo.get_model('your_recognition_model.onnx') +handler.prepare(ctx_id=0) + +``` + + diff --git a/intel-extension-for-pytorch/index.html b/intel-extension-for-pytorch/index.html new file mode 100644 index 0000000000000000000000000000000000000000..b38ddeb82f7517102c053212dd68b9c64074bba0 --- /dev/null +++ b/intel-extension-for-pytorch/index.html @@ -0,0 +1,36 @@ + + + + + + + + Links for intel-extension-for-pytorch + + + +

+ Links for intel-extension-for-pytorch +

+ + intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl + +
+ + intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl + +
+ + intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl + +
+ + intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl + +
+ + intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl + +
+ + diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..bdd7478955e94dc5006f21f8065e10e777427385 --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f59ee70a5d9fe59890cc799201af9ffca031e039947a3511fae64c66100464 +size 551451963 diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl.metadata b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..39aadab8eb0eef429c68b6ee9183fe7fc5d3646f --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+git632f70a-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,108 @@ +Metadata-Version: 2.1 +Name: intel-extension-for-pytorch +Version: 2.0.110+git632f70a +Summary: Intel® Extension for PyTorch* +Home-page: https://github.com/intel/intel-extension-for-pytorch +Author: Intel Corp. +License: https://www.apache.org/licenses/LICENSE-2.0 +Classifier: License :: OSI Approved :: Apache Software License +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: psutil +Requires-Dist: numpy + +# Intel® Extension for PyTorch\* + +Intel® Extension for PyTorch\* extends PyTorch\* with up-to-date features optimizations for an extra performance boost on Intel hardware. Optimizations take advantage of AVX-512 Vector Neural Network Instructions (AVX512 VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX) on Intel CPUs as well as Intel Xe Matrix Extensions (XMX) AI engines on Intel discrete GPUs. Moreover, through PyTorch\* `xpu` device, Intel® Extension for PyTorch\* provides easy GPU acceleration for Intel discrete GPUs with PyTorch\*. + +Intel® Extension for PyTorch\* provides optimizations for both eager mode and graph mode, however, compared to eager mode, graph mode in PyTorch\* normally yields better performance from optimization techniques, such as operation fusion. Intel® Extension for PyTorch\* amplifies them with more comprehensive graph optimizations. Therefore we recommend you to take advantage of Intel® Extension for PyTorch\* with [TorchScript](https://pytorch.org/docs/stable/jit.html) whenever your workload supports it. You could choose to run with `torch.jit.trace()` function or `torch.jit.script()` function, but based on our evaluation, `torch.jit.trace()` supports more workloads so we recommend you to use `torch.jit.trace()` as your first choice. + +The extension can be loaded as a Python module for Python programs or linked as a C++ library for C++ programs. In Python scripts users can enable it dynamically by importing `intel_extension_for_pytorch`. + +* Check [CPU tutorial](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® CPUs. Source code is available at the [master branch](https://github.com/intel/intel-extension-for-pytorch/tree/master). +* Check [GPU tutorial](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® GPUs. Source code is available at the [xpu-master branch](https://github.com/intel/intel-extension-for-pytorch/tree/xpu-master). + +## Installation + +### CPU version + +You can use either of the following 2 commands to install Intel® Extension for PyTorch\* CPU version. + +```bash +python -m pip install intel_extension_for_pytorch +python -m pip install intel_extension_for_pytorch -f https://developer.intel.com/ipex-whl-stable-cpu +``` + +**Note:** Intel® Extension for PyTorch\* has PyTorch version requirement. Please check more detailed information via the URL below. + +More installation methods can be found at [CPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html). + +Compilation instruction of the latest CPU code base `master` branch can be found at [Installation Guide](https://github.com/intel/intel-extension-for-pytorch/blob/master/docs/tutorials/installation.md#install-via-compiling-from-source). + +### GPU version + +You can install Intel® Extension for PyTorch\* for GPU via command below. + +```bash +python -m pip install torch==2.0.1a0 torchvision==0.15.2a0 intel_extension_for_pytorch==2.0.110+xpu -f https://developer.intel.com/ipex-whl-stable-xpu +``` + +**Note:** The patched PyTorch 2.0.1 is required to work with Intel® Extension for PyTorch\* on Intel® graphics card for now. + +More installation methods can be found at [GPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). + +Compilation instruction of the latest GPU code base `xpu-master` branch can be found at [Installation Guide For Linux/WSL2](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-master/docs/tutorials/installations/linux.rst#install-via-compiling-from-source) and [Installation Guide For Windows](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-master/docs/tutorials/installations/windows.rst#install-via-compiling-from-source). + +## Getting Started + +Minor code changes are required for users to get start with Intel® Extension for PyTorch\*. Both PyTorch imperative mode and TorchScript mode are supported. You just need to import Intel® Extension for PyTorch\* package and apply its optimize function against the model object. If it is a training workload, the optimize function also needs to be applied against the optimizer object. + +The following code snippet shows an inference code with FP32 data type. More examples on CPU, including training and C++ examples, are available at [CPU Example page](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/examples.html). More examples on GPU are available at [GPU Example page](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/examples.html). + +### Inference on CPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +### Inference on GPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = model.to('xpu') +data = data.to('xpu') +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +## License + +_Apache License_, Version _2.0_. As found in [LICENSE](https://github.com/intel/intel-extension-for-pytorch/blob/master/LICENSE) file. + +## Security + +See Intel's [Security Center](https://www.intel.com/content/www/us/en/security-center/default.html) +for information on how to report a potential security issue or vulnerability. + +See also: [Security Policy](SECURITY.md) + diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..04077f03660cb0632e08c3485d5bbe9d9a847dbb --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0c3b42a2092d1a9000e62adf05ca718a2927cd36a0cb586830b6ff97fc9b54a +size 466814181 diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl.metadata b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..55c91b4f8f9dd1d08ba2ffd186a5530deeb67380 --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,108 @@ +Metadata-Version: 2.1 +Name: intel-extension-for-pytorch +Version: 2.0.110+gitc6ea20b +Summary: Intel® Extension for PyTorch* +Home-page: https://github.com/intel/intel-extension-for-pytorch +Author: Intel Corp. +License: https://www.apache.org/licenses/LICENSE-2.0 +Classifier: License :: OSI Approved :: Apache Software License +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: psutil +Requires-Dist: numpy + +# Intel® Extension for PyTorch\* + +Intel® Extension for PyTorch\* extends PyTorch\* with up-to-date features optimizations for an extra performance boost on Intel hardware. Optimizations take advantage of AVX-512 Vector Neural Network Instructions (AVX512 VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX) on Intel CPUs as well as Intel Xe Matrix Extensions (XMX) AI engines on Intel discrete GPUs. Moreover, through PyTorch\* `xpu` device, Intel® Extension for PyTorch\* provides easy GPU acceleration for Intel discrete GPUs with PyTorch\*. + +Intel® Extension for PyTorch\* provides optimizations for both eager mode and graph mode, however, compared to eager mode, graph mode in PyTorch\* normally yields better performance from optimization techniques, such as operation fusion. Intel® Extension for PyTorch\* amplifies them with more comprehensive graph optimizations. Therefore we recommend you to take advantage of Intel® Extension for PyTorch\* with [TorchScript](https://pytorch.org/docs/stable/jit.html) whenever your workload supports it. You could choose to run with `torch.jit.trace()` function or `torch.jit.script()` function, but based on our evaluation, `torch.jit.trace()` supports more workloads so we recommend you to use `torch.jit.trace()` as your first choice. + +The extension can be loaded as a Python module for Python programs or linked as a C++ library for C++ programs. In Python scripts users can enable it dynamically by importing `intel_extension_for_pytorch`. + +* Check [CPU tutorial](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® CPUs. Source code is available at the [master branch](https://github.com/intel/intel-extension-for-pytorch/tree/master). +* Check [GPU tutorial](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® GPUs. Source code is available at the [xpu-master branch](https://github.com/intel/intel-extension-for-pytorch/tree/xpu-master). + +## Installation + +### CPU version + +You can use either of the following 2 commands to install Intel® Extension for PyTorch\* CPU version. + +```bash +python -m pip install intel_extension_for_pytorch +python -m pip install intel_extension_for_pytorch -f https://developer.intel.com/ipex-whl-stable-cpu +``` + +**Note:** Intel® Extension for PyTorch\* has PyTorch version requirement. Please check more detailed information via the URL below. + +More installation methods can be found at [CPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html). + +Compilation instruction of the latest CPU code base `master` branch can be found at [Installation Guide](https://github.com/intel/intel-extension-for-pytorch/blob/master/docs/tutorials/installation.md#install-via-compiling-from-source). + +### GPU version + +You can install Intel® Extension for PyTorch\* for GPU via command below. + +```bash +python -m pip install torch==2.0.1a0 torchvision==0.15.2a0 intel_extension_for_pytorch==2.0.110+xpu -f https://developer.intel.com/ipex-whl-stable-xpu +``` + +**Note:** The patched PyTorch 2.0.1 is required to work with Intel® Extension for PyTorch\* on Intel® graphics card for now. + +More installation methods can be found at [GPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). + +Compilation instruction of the latest GPU code base `xpu-master` branch can be found at [Installation Guide For Linux/WSL2](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-master/docs/tutorials/installations/linux.rst#install-via-compiling-from-source) and [Installation Guide For Windows](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-master/docs/tutorials/installations/windows.rst#install-via-compiling-from-source). + +## Getting Started + +Minor code changes are required for users to get start with Intel® Extension for PyTorch\*. Both PyTorch imperative mode and TorchScript mode are supported. You just need to import Intel® Extension for PyTorch\* package and apply its optimize function against the model object. If it is a training workload, the optimize function also needs to be applied against the optimizer object. + +The following code snippet shows an inference code with FP32 data type. More examples on CPU, including training and C++ examples, are available at [CPU Example page](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/examples.html). More examples on GPU are available at [GPU Example page](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/examples.html). + +### Inference on CPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +### Inference on GPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = model.to('xpu') +data = data.to('xpu') +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +## License + +_Apache License_, Version _2.0_. As found in [LICENSE](https://github.com/intel/intel-extension-for-pytorch/blob/master/LICENSE) file. + +## Security + +See Intel's [Security Center](https://www.intel.com/content/www/us/en/security-center/default.html) +for information on how to report a potential security issue or vulnerability. + +See also: [Security Policy](SECURITY.md) + diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..532b0bb2845601172448424de7ce640b1a7d0e24 --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83b4963eb9d8bb857292a61c6b5bc50863c8e8774fb71d553c24b9592b59332c +size 367153302 diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl.metadata b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..75008400db97c5a5a838888237edf359c33b317b --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,118 @@ +Metadata-Version: 2.1 +Name: intel-extension-for-pytorch +Version: 2.1.10+xpu +Summary: Intel® Extension for PyTorch* +Home-page: https://github.com/intel/intel-extension-for-pytorch +Author: Intel Corp. +License: https://www.apache.org/licenses/LICENSE-2.0 +Classifier: License :: OSI Approved :: Apache Software License +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: psutil +Requires-Dist: numpy +Requires-Dist: packaging +Requires-Dist: pydantic + +
+ +Intel® Extension for Pytorch* +=========================== + +[💻Examples](./docs/tutorials/examples.md)   |   [📖CPU Documentations](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/)   |   [📖GPU Documentations](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) +
+ + + +Intel® Extension for PyTorch\* extends PyTorch\* with up-to-date features optimizations for an extra performance boost on Intel hardware. Optimizations take advantage of AVX-512 Vector Neural Network Instructions (AVX512 VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX) on Intel CPUs as well as Intel Xe Matrix Extensions (XMX) AI engines on Intel discrete GPUs. Moreover, through PyTorch\* `xpu` device, Intel® Extension for PyTorch\* provides easy GPU acceleration for Intel discrete GPUs with PyTorch\*. + +Intel® Extension for PyTorch\* provides optimizations for both eager mode and graph mode, however, compared to eager mode, graph mode in PyTorch\* normally yields better performance from optimization techniques, such as operation fusion. Intel® Extension for PyTorch\* amplifies them with more comprehensive graph optimizations. Therefore we recommend you to take advantage of Intel® Extension for PyTorch\* with [TorchScript](https://pytorch.org/docs/stable/jit.html) whenever your workload supports it. You could choose to run with `torch.jit.trace()` function or `torch.jit.script()` function, but based on our evaluation, `torch.jit.trace()` supports more workloads so we recommend you to use `torch.jit.trace()` as your first choice. + +The extension can be loaded as a Python module for Python programs or linked as a C++ library for C++ programs. In Python scripts users can enable it dynamically by importing `intel_extension_for_pytorch`. + +* Check [CPU tutorial](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® CPUs. Source code is available at the [main branch](https://github.com/intel/intel-extension-for-pytorch/tree/main). +* Check [GPU tutorial](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® GPUs. Source code is available at the [xpu-main branch](https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main). + +## Installation + +### CPU version + +You can use either of the following 2 commands to install Intel® Extension for PyTorch\* CPU version. + +```bash +python -m pip install intel_extension_for_pytorch +python -m pip install intel_extension_for_pytorch -f https://developer.intel.com/ipex-whl-stable-cpu +``` + +**Note:** Intel® Extension for PyTorch\* has PyTorch version requirement. Please check more detailed information via the URL below. + +More installation methods can be found at [CPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html). + +Compilation instruction of the latest CPU code base `main` branch can be found at [Installation Guide](https://github.com/intel/intel-extension-for-pytorch/blob/main/docs/tutorials/installation.md#install-via-compiling-from-source). + +### GPU version + +You can install Intel® Extension for PyTorch\* for GPU via command below. + +```bash +python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 intel_extension_for_pytorch==2.1.10+xpu -f https://developer.intel.com/ipex-whl-stable-xpu +``` + +**Note:** The patched PyTorch 2.1.0 is required to work with Intel® Extension for PyTorch\* on Intel® graphics card for now. + +More installation methods can be found at [GPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). + +Compilation instruction of the latest GPU code base `xpu-main` branch can be found at [Installation Guide For Linux/WSL2](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-main/docs/tutorials/installations/linux.rst#install-via-compiling-from-source) and [Installation Guide For Windows](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-main/docs/tutorials/installations/windows.rst#install-via-compiling-from-source). + +## Getting Started + +Minor code changes are required for users to get start with Intel® Extension for PyTorch\*. Both PyTorch imperative mode and TorchScript mode are supported. You just need to import Intel® Extension for PyTorch\* package and apply its optimize function against the model object. If it is a training workload, the optimize function also needs to be applied against the optimizer object. + +The following code snippet shows an inference code with FP32 data type. More examples on CPU, including training and C++ examples, are available at [CPU Example page](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/examples.html). More examples on GPU are available at [GPU Example page](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/examples.html). + +### Inference on CPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +### Inference on GPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = model.to('xpu') +data = data.to('xpu') +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +## License + +_Apache License_, Version _2.0_. As found in [LICENSE](https://github.com/intel/intel-extension-for-pytorch/blob/main/LICENSE) file. + +## Security + +See Intel's [Security Center](https://www.intel.com/content/www/us/en/security-center/default.html) +for information on how to report a potential security issue or vulnerability. + +See also: [Security Policy](SECURITY.md) + diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl new file mode 100644 index 0000000000000000000000000000000000000000..67a3628a2a8c62df0df746683f816d3f7b08feb6 --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8f52e6b118cff5f310d498f82337ee869d6e03a25b9b1218cc47a2dc7077a9e +size 367159341 diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl.metadata b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..75008400db97c5a5a838888237edf359c33b317b --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64_2.whl.metadata @@ -0,0 +1,118 @@ +Metadata-Version: 2.1 +Name: intel-extension-for-pytorch +Version: 2.1.10+xpu +Summary: Intel® Extension for PyTorch* +Home-page: https://github.com/intel/intel-extension-for-pytorch +Author: Intel Corp. +License: https://www.apache.org/licenses/LICENSE-2.0 +Classifier: License :: OSI Approved :: Apache Software License +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: psutil +Requires-Dist: numpy +Requires-Dist: packaging +Requires-Dist: pydantic + +
+ +Intel® Extension for Pytorch* +=========================== + +[💻Examples](./docs/tutorials/examples.md)   |   [📖CPU Documentations](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/)   |   [📖GPU Documentations](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) +
+ + + +Intel® Extension for PyTorch\* extends PyTorch\* with up-to-date features optimizations for an extra performance boost on Intel hardware. Optimizations take advantage of AVX-512 Vector Neural Network Instructions (AVX512 VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX) on Intel CPUs as well as Intel Xe Matrix Extensions (XMX) AI engines on Intel discrete GPUs. Moreover, through PyTorch\* `xpu` device, Intel® Extension for PyTorch\* provides easy GPU acceleration for Intel discrete GPUs with PyTorch\*. + +Intel® Extension for PyTorch\* provides optimizations for both eager mode and graph mode, however, compared to eager mode, graph mode in PyTorch\* normally yields better performance from optimization techniques, such as operation fusion. Intel® Extension for PyTorch\* amplifies them with more comprehensive graph optimizations. Therefore we recommend you to take advantage of Intel® Extension for PyTorch\* with [TorchScript](https://pytorch.org/docs/stable/jit.html) whenever your workload supports it. You could choose to run with `torch.jit.trace()` function or `torch.jit.script()` function, but based on our evaluation, `torch.jit.trace()` supports more workloads so we recommend you to use `torch.jit.trace()` as your first choice. + +The extension can be loaded as a Python module for Python programs or linked as a C++ library for C++ programs. In Python scripts users can enable it dynamically by importing `intel_extension_for_pytorch`. + +* Check [CPU tutorial](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® CPUs. Source code is available at the [main branch](https://github.com/intel/intel-extension-for-pytorch/tree/main). +* Check [GPU tutorial](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® GPUs. Source code is available at the [xpu-main branch](https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main). + +## Installation + +### CPU version + +You can use either of the following 2 commands to install Intel® Extension for PyTorch\* CPU version. + +```bash +python -m pip install intel_extension_for_pytorch +python -m pip install intel_extension_for_pytorch -f https://developer.intel.com/ipex-whl-stable-cpu +``` + +**Note:** Intel® Extension for PyTorch\* has PyTorch version requirement. Please check more detailed information via the URL below. + +More installation methods can be found at [CPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html). + +Compilation instruction of the latest CPU code base `main` branch can be found at [Installation Guide](https://github.com/intel/intel-extension-for-pytorch/blob/main/docs/tutorials/installation.md#install-via-compiling-from-source). + +### GPU version + +You can install Intel® Extension for PyTorch\* for GPU via command below. + +```bash +python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 intel_extension_for_pytorch==2.1.10+xpu -f https://developer.intel.com/ipex-whl-stable-xpu +``` + +**Note:** The patched PyTorch 2.1.0 is required to work with Intel® Extension for PyTorch\* on Intel® graphics card for now. + +More installation methods can be found at [GPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). + +Compilation instruction of the latest GPU code base `xpu-main` branch can be found at [Installation Guide For Linux/WSL2](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-main/docs/tutorials/installations/linux.rst#install-via-compiling-from-source) and [Installation Guide For Windows](https://github.com/intel/intel-extension-for-pytorch/blob/xpu-main/docs/tutorials/installations/windows.rst#install-via-compiling-from-source). + +## Getting Started + +Minor code changes are required for users to get start with Intel® Extension for PyTorch\*. Both PyTorch imperative mode and TorchScript mode are supported. You just need to import Intel® Extension for PyTorch\* package and apply its optimize function against the model object. If it is a training workload, the optimize function also needs to be applied against the optimizer object. + +The following code snippet shows an inference code with FP32 data type. More examples on CPU, including training and C++ examples, are available at [CPU Example page](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/examples.html). More examples on GPU are available at [GPU Example page](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/examples.html). + +### Inference on CPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +### Inference on GPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = model.to('xpu') +data = data.to('xpu') +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +## License + +_Apache License_, Version _2.0_. As found in [LICENSE](https://github.com/intel/intel-extension-for-pytorch/blob/main/LICENSE) file. + +## Security + +See Intel's [Security Center](https://www.intel.com/content/www/us/en/security-center/default.html) +for information on how to report a potential security issue or vulnerability. + +See also: [Security Policy](SECURITY.md) + diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..cab1885ea06610868bae5cb710ccc584b5d553fe --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0207f841efc2a742f402613abe391470a61d301e3a7a843b59c70f5c2a1f9255 +size 483056266 diff --git a/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl.metadata b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..a968e8cc5126effb5a50b1211973d4655c1fdd53 --- /dev/null +++ b/intel-extension-for-pytorch/intel_extension_for_pytorch-2.1.20+git4849f3b-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,132 @@ +Metadata-Version: 2.1 +Name: intel-extension-for-pytorch +Version: 2.1.20+git4849f3b +Summary: Intel® Extension for PyTorch* +Home-page: https://github.com/intel/intel-extension-for-pytorch +Author: Intel Corp. +License: https://www.apache.org/licenses/LICENSE-2.0 +Classifier: License :: OSI Approved :: Apache Software License +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: psutil +Requires-Dist: numpy +Requires-Dist: packaging +Requires-Dist: pydantic + +
+ +Intel® Extension for Pytorch* +=========================== + +[💻Examples](./docs/tutorials/examples.md)   |   [📖CPU Documentations](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/)   |   [📖GPU Documentations](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) +
+ + + +Intel® Extension for PyTorch\* extends PyTorch\* with up-to-date features optimizations for an extra performance boost on Intel hardware. Optimizations take advantage of AVX-512 Vector Neural Network Instructions (AVX512 VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX) on Intel CPUs as well as Intel Xe Matrix Extensions (XMX) AI engines on Intel discrete GPUs. Moreover, through PyTorch\* `xpu` device, Intel® Extension for PyTorch\* provides easy GPU acceleration for Intel discrete GPUs with PyTorch\*. + +Intel® Extension for PyTorch\* provides optimizations for both eager mode and graph mode, however, compared to eager mode, graph mode in PyTorch\* normally yields better performance from optimization techniques, such as operation fusion. Intel® Extension for PyTorch\* amplifies them with more comprehensive graph optimizations. Therefore we recommend you to take advantage of Intel® Extension for PyTorch\* with [TorchScript](https://pytorch.org/docs/stable/jit.html) whenever your workload supports it. You could choose to run with `torch.jit.trace()` function or `torch.jit.script()` function, but based on our evaluation, `torch.jit.trace()` supports more workloads so we recommend you to use `torch.jit.trace()` as your first choice. + +The extension can be loaded as a Python module for Python programs or linked as a C++ library for C++ programs. In Python scripts users can enable it dynamically by importing `intel_extension_for_pytorch`. + +In the current technological landscape, Generative AI (GenAI) workloads and models have gained widespread attention and popularity. Large Language Models (LLMs) have emerged as the dominant models driving these GenAI applications. Starting from 2.1.0, specific optimizations for certain LLMs are introduced in the Intel® Extension for PyTorch\*. + +* Check [CPU tutorial](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® CPUs. Source code is available at the [main branch](https://github.com/intel/intel-extension-for-pytorch/tree/main). +* Check [GPU tutorial](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/) for detailed information of Intel® Extension for PyTorch\* for Intel® GPUs. Source code is available at the [xpu-main branch](https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main). + + + +## Large Language Models (LLMs) Optimization + +In the current technological landscape, Generative AI (GenAI) workloads and models have gained widespread attention and popularity. Large Language Models (LLMs) have emerged as the dominant models driving these GenAI applications. Starting from 2.1.0, specific optimizations for certain LLM models are introduced in the Intel® Extension for PyTorch\*. Check [LLM optimizations CPU](./examples/cpu/inference/python/llm) and [LLM optimizations GPU](./examples/gpu/inference/python/llm) for details. + + +## Installation + +### CPU version + +You can use either of the following 2 commands to install Intel® Extension for PyTorch\* CPU version. + +```bash +python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu +python -m pip install intel-extension-for-pytorch --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/ +# for PRC user, you can check with the following link +python -m pip install intel-extension-for-pytorch --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/ +``` + +**Note:** Intel® Extension for PyTorch\* has PyTorch version requirement. Please check more detailed information via the URL below. + +More installation methods can be found at [CPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html). + +Compilation instruction of the latest CPU code base `main` branch can be found in the session Package `source` at [CPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html). + +### GPU version + +You can install Intel® Extension for PyTorch\* for GPU via command below. + +```bash +python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +# for PRC user, you can check with the following link +python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ + +``` + +**Note:** The patched PyTorch 2.1.0 is required to work with Intel® Extension for PyTorch\* on Intel® graphics card for now. + +More installation methods can be found at [GPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). + +Compilation instruction of the latest GPU code base `xpu-main` branch can be found in the session Package `source` at [GPU Installation Guide](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). + +## Getting Started + +Minor code changes are required for users to get start with Intel® Extension for PyTorch\*. Both PyTorch imperative mode and TorchScript mode are supported. You just need to import Intel® Extension for PyTorch\* package and apply its optimize function against the model object. If it is a training workload, the optimize function also needs to be applied against the optimizer object. + +The following code snippet shows an inference code with FP32 data type. More examples on CPU, including training and C++ examples, are available at [CPU Example page](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/examples.html). More examples on GPU are available at [GPU Example page](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/examples.html). + +### Inference on CPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +### Inference on GPU + +```python +import torch +import torchvision.models as models + +model = models.resnet50(pretrained=True) +model.eval() +data = torch.rand(1, 3, 224, 224) + +import intel_extension_for_pytorch as ipex +model = model.to('xpu') +data = data.to('xpu') +model = ipex.optimize(model) + +with torch.no_grad(): + model(data) +``` + +## License + +_Apache License_, Version _2.0_. As found in [LICENSE](https://github.com/intel/intel-extension-for-pytorch/blob/main/LICENSE) file. + +## Security + +See Intel's [Security Center](https://www.intel.com/content/www/us/en/security-center/default.html) +for information on how to report a potential security issue or vulnerability. + +See also: [Security Policy](SECURITY.md) + diff --git a/torch/index.html b/torch/index.html new file mode 100644 index 0000000000000000000000000000000000000000..a2d234bf4af924b007c874ecd19c7895df3bfb39 --- /dev/null +++ b/torch/index.html @@ -0,0 +1,36 @@ + + + + + + + + Links for torch + + + +

+ Links for torch +

+ + torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl + +
+ + torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl + +
+ + torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl + +
+ + torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl + +
+ + torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl + +
+ + diff --git a/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..25ea3a1ce1faac6503d7c79b0775a1c633633d2f --- /dev/null +++ b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74c46c5931e12f125f8652667a182f5f990d48b2e1165da112bed108f40e16f7 +size 196277535 diff --git a/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl.metadata b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..dc3ca6c4b33adde104c32b3cf2a374865d20c194 --- /dev/null +++ b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,483 @@ +Metadata-Version: 2.1 +Name: torch +Version: 2.0.0a0+gite9ebda2 +Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration +Home-page: https://pytorch.org/ +Download-URL: https://github.com/pytorch/pytorch/tags +Author: PyTorch Team +Author-email: packages@pytorch.org +License: BSD-3 +Keywords: pytorch,machine learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: NOTICE +Requires-Dist: filelock +Requires-Dist: typing-extensions +Requires-Dist: sympy +Requires-Dist: networkx +Requires-Dist: jinja2 +Provides-Extra: opt-einsum +Requires-Dist: opt-einsum >=3.3 ; extra == 'opt-einsum' + +![PyTorch Logo](https://github.com/pytorch/pytorch/blob/master/docs/source/_static/img/pytorch-logo-dark.png) + +-------------------------------------------------------------------------------- + +PyTorch is a Python package that provides two high-level features: +- Tensor computation (like NumPy) with strong GPU acceleration +- Deep neural networks built on a tape-based autograd system + +You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed. + +Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/master). + + + +- [More About PyTorch](#more-about-pytorch) + - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library) + - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd) + - [Python First](#python-first) + - [Imperative Experiences](#imperative-experiences) + - [Fast and Lean](#fast-and-lean) + - [Extensions Without Pain](#extensions-without-pain) +- [Installation](#installation) + - [Binaries](#binaries) + - [NVIDIA Jetson Platforms](#nvidia-jetson-platforms) + - [From Source](#from-source) + - [Prerequisites](#prerequisites) + - [Install Dependencies](#install-dependencies) + - [Get the PyTorch Source](#get-the-pytorch-source) + - [Install PyTorch](#install-pytorch) + - [Adjust Build Options (Optional)](#adjust-build-options-optional) + - [Docker Image](#docker-image) + - [Using pre-built images](#using-pre-built-images) + - [Building the image yourself](#building-the-image-yourself) + - [Building the Documentation](#building-the-documentation) + - [Previous Versions](#previous-versions) +- [Getting Started](#getting-started) +- [Resources](#resources) +- [Communication](#communication) +- [Releases and Contributing](#releases-and-contributing) +- [The Team](#the-team) +- [License](#license) + + + +## More About PyTorch + +At a granular level, PyTorch is a library that consists of the following components: + +| Component | Description | +| ---- | --- | +| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support | +| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch | +| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code | +| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility | +| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training | +| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience | + +Usually, PyTorch is used either as: + +- A replacement for NumPy to use the power of GPUs. +- A deep learning research platform that provides maximum flexibility and speed. + +Elaborating Further: + +### A GPU-Ready Tensor Library + +If you use NumPy, then you have used Tensors (a.k.a. ndarray). + +![Tensor illustration](./docs/source/_static/img/tensor_illustration.png) + +PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the +computation by a huge amount. + +We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs +such as slicing, indexing, mathematical operations, linear algebra, reductions. +And they are fast! + +### Dynamic Neural Networks: Tape-Based Autograd + +PyTorch has a unique way of building neural networks: using and replaying a tape recorder. + +Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world. +One has to build a neural network and reuse the same structure again and again. +Changing the way the network behaves means that one has to start from scratch. + +With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to +change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes +from several research papers on this topic, as well as current and past work such as +[torch-autograd](https://github.com/twitter/torch-autograd), +[autograd](https://github.com/HIPS/autograd), +[Chainer](https://chainer.org), etc. + +While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. +You get the best of speed and flexibility for your crazy research. + +![Dynamic graph](https://github.com/pytorch/pytorch/blob/master/docs/source/_static/img/dynamic_graph.gif) + +### Python First + +PyTorch is not a Python binding into a monolithic C++ framework. +It is built to be deeply integrated into Python. +You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc. +You can write your new neural network layers in Python itself, using your favorite libraries +and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/). +Our goal is to not reinvent the wheel where appropriate. + +### Imperative Experiences + +PyTorch is designed to be intuitive, linear in thought, and easy to use. +When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. +When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward. +The stack trace points to exactly where your code was defined. +We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. + +### Fast and Lean + +PyTorch has minimal framework overhead. We integrate acceleration libraries +such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed. +At the core, its CPU and GPU Tensor and neural network backends +are mature and have been tested for years. + +Hence, PyTorch is quite fast – whether you run small or large neural networks. + +The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. +We've written custom memory allocators for the GPU to make sure that +your deep learning models are maximally memory efficient. +This enables you to train bigger deep learning models than before. + +### Extensions Without Pain + +Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward +and with minimal abstractions. + +You can write new neural network layers in Python using the torch API +[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html). + +If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate. +No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp). + + +## Installation + +### Binaries +Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/) + + +#### NVIDIA Jetson Platforms + +Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) + +They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them. + + +### From Source + +#### Prerequisites +If you are installing from source, you will need: +- Python 3.8 or later (for Linux, Python 3.8.1+ is needed) +- A C++17 compatible compiler, such as clang + +We highly recommend installing an [Anaconda](https://www.anaconda.com/distribution/#download-section) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro. + +If you want to compile with CUDA support, install the following (note that CUDA is not supported on macOS) +- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) 11.0 or above +- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above +- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA + +Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/pdf/cuDNN-Support-Matrix.pdf) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware + +If you want to disable CUDA support, export the environment variable `USE_CUDA=0`. +Other potentially useful environment variables may be found in `setup.py`. + +If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/) + +If you want to compile with ROCm support, install +- [AMD ROCm](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html) 4.0 and above installation +- ROCm is currently supported only for Linux systems. + +If you want to disable ROCm support, export the environment variable `USE_ROCM=0`. +Other potentially useful environment variables may be found in `setup.py`. + +#### Install Dependencies + +**Common** + +```bash +conda install cmake ninja +# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below +pip install -r requirements.txt +``` + +**On Linux** + +```bash +conda install mkl mkl-include +# CUDA only: Add LAPACK support for the GPU if needed +conda install -c pytorch magma-cuda110 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo +``` + +**On MacOS** + +```bash +# Add this package on intel x86 processor machines only +conda install mkl mkl-include +# Add these packages if torch.distributed is needed +conda install pkg-config libuv +``` + +**On Windows** + +```bash +conda install mkl mkl-include +# Add these packages if torch.distributed is needed. +# Distributed package support on Windows is a prototype feature and is subject to changes. +conda install -c conda-forge libuv=1.39 +``` + +#### Get the PyTorch Source +```bash +git clone --recursive https://github.com/pytorch/pytorch +cd pytorch +# if you are updating an existing checkout +git submodule sync +git submodule update --init --recursive +``` + +#### Install PyTorch +**On Linux** + +If you're compiling for AMD ROCm then first run this command: +```bash +# Only run this if you're compiling for ROCm +python tools/amd_build/build_amd.py +``` + +Install PyTorch +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py develop +``` + +> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker: +> +> ```plaintext +> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized +> collect2: error: ld returned 1 exit status +> error: command 'g++' failed with exit status 1 +> ``` +> +> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+. + +**On macOS** + +```bash +python3 setup.py develop +``` + +**On Windows** + +Choose Correct Visual Studio Version. + +PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise, +Professional, or Community Editions. You can also install the build tools from +https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not* +come with Visual Studio Code by default. + +If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md#building-on-legacy-code-and-cuda) + +**CPU-only builds** + +In this mode PyTorch computations will run on your CPU, not your GPU + +```cmd +conda activate +python setup.py develop +``` + +Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/master/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used. + +**CUDA based build** + +In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching + +[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA. +NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox. +Make sure that CUDA with Nsight Compute is installed after Visual Studio. + +Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019. +
If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain. + +Additional libraries such as +[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/master/.ci/pytorch/win-test-helpers/installation-helpers) to install them. + +You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/master/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations + + +```cmd +cmd + +:: Set the environment variables after you have downloaded and unzipped the mkl package, +:: else CMake would throw an error as `Could NOT find OpenMP`. +set CMAKE_INCLUDE_PATH={Your directory}\mkl\include +set LIB={Your directory}\mkl\lib;%LIB% + +:: Read the content in the previous section carefully before you proceed. +:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block. +:: "Visual Studio 2019 Developer Command Prompt" will be run automatically. +:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator. +set CMAKE_GENERATOR_TOOLSET_VERSION=14.27 +set DISTUTILS_USE_SDK=1 +for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION% + +:: [Optional] If you want to override the CUDA host compiler +set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe + +python setup.py develop + +``` + +##### Adjust Build Options (Optional) + +You can adjust the configuration of cmake variables optionally (without building first), by doing +the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done +with such a step. + +On Linux +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +On macOS +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +### Docker Image + +#### Using pre-built images + +You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+ + +```bash +docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest +``` + +Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. +for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you +should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`. + +#### Building the image yourself + +**NOTE:** Must be built with a docker version > 18.06 + +The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8. +You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it +unset to use the default. +```bash +make -f docker.Makefile +# images are tagged as docker.io/${your_docker_username}/pytorch +``` + +### Building the Documentation + +To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the +readthedocs theme. + +```bash +cd docs/ +pip install -r requirements.txt +``` +You can then build the documentation by running `make ` from the +`docs/` folder. Run `make` to get a list of all available output formats. + +If you get a katex error run `npm install katex`. If it persists, try +`npm install -g katex` + +> Note: if you installed `nodejs` with a different package manager (e.g., +`conda`) then `npm` will probably install a version of `katex` that is not +compatible with your version of `nodejs` and doc builds will fail. +A combination of versions that is known to work is `node@6.13.1` and +`katex@0.13.18`. To install the latter with `npm` you can run +```npm install -g katex@0.13.18``` + +### Previous Versions + +Installation instructions and binaries for previous PyTorch versions may be found +on [our website](https://pytorch.org/previous-versions). + + +## Getting Started + +Three-pointers to get you started: +- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/) +- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples) +- [The API Reference](https://pytorch.org/docs/) +- [Glossary](https://github.com/pytorch/pytorch/blob/master/GLOSSARY.md) + +## Resources + +* [PyTorch.org](https://pytorch.org/) +* [PyTorch Tutorials](https://pytorch.org/tutorials/) +* [PyTorch Examples](https://github.com/pytorch/examples) +* [PyTorch Models](https://pytorch.org/hub/) +* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) +* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229) +* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch) +* [PyTorch Twitter](https://twitter.com/PyTorch) +* [PyTorch Blog](https://pytorch.org/blog/) +* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw) + +## Communication +* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org +* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc. +* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1 +* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv +* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch +* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/) + +## Releases and Contributing + +PyTorch has a 90-day release cycle (major releases). Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues). + +We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. + +If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us. +Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of. + +To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). + +## The Team + +PyTorch is a community-driven project with several skillful engineers and researchers contributing to it. + +PyTorch is currently maintained by [Adam Paszke](https://apaszke.github.io/), [Sam Gross](https://github.com/colesbury), [Soumith Chintala](http://soumith.ch) and [Gregory Chanan](https://github.com/gchanan) with major contributions coming from hundreds of talented individuals in various forms and means. +A non-exhaustive but growing list needs to mention: Trevor Killeen, Sasank Chilamkurthy, Sergey Zagoruyko, Adam Lerer, Francisco Massa, Alykhan Tejani, Luca Antiga, Alban Desmaison, Andreas Koepf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein, Christian Sarofeen, Martin Raison, Edward Yang, Zachary Devito. + +Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch. + +## License + +PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file. diff --git a/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl new file mode 100644 index 0000000000000000000000000000000000000000..5c8b19af93d427e5f4d4a3c6bc47f9e4d8c60aa4 --- /dev/null +++ b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9effde09a740a46ee5be2f7d9468c4e8cf56d52480839f13ed90f0a22dacb64 +size 196386536 diff --git a/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl.metadata b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..dc3ca6c4b33adde104c32b3cf2a374865d20c194 --- /dev/null +++ b/torch/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64_2.whl.metadata @@ -0,0 +1,483 @@ +Metadata-Version: 2.1 +Name: torch +Version: 2.0.0a0+gite9ebda2 +Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration +Home-page: https://pytorch.org/ +Download-URL: https://github.com/pytorch/pytorch/tags +Author: PyTorch Team +Author-email: packages@pytorch.org +License: BSD-3 +Keywords: pytorch,machine learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: NOTICE +Requires-Dist: filelock +Requires-Dist: typing-extensions +Requires-Dist: sympy +Requires-Dist: networkx +Requires-Dist: jinja2 +Provides-Extra: opt-einsum +Requires-Dist: opt-einsum >=3.3 ; extra == 'opt-einsum' + +![PyTorch Logo](https://github.com/pytorch/pytorch/blob/master/docs/source/_static/img/pytorch-logo-dark.png) + +-------------------------------------------------------------------------------- + +PyTorch is a Python package that provides two high-level features: +- Tensor computation (like NumPy) with strong GPU acceleration +- Deep neural networks built on a tape-based autograd system + +You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed. + +Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/master). + + + +- [More About PyTorch](#more-about-pytorch) + - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library) + - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd) + - [Python First](#python-first) + - [Imperative Experiences](#imperative-experiences) + - [Fast and Lean](#fast-and-lean) + - [Extensions Without Pain](#extensions-without-pain) +- [Installation](#installation) + - [Binaries](#binaries) + - [NVIDIA Jetson Platforms](#nvidia-jetson-platforms) + - [From Source](#from-source) + - [Prerequisites](#prerequisites) + - [Install Dependencies](#install-dependencies) + - [Get the PyTorch Source](#get-the-pytorch-source) + - [Install PyTorch](#install-pytorch) + - [Adjust Build Options (Optional)](#adjust-build-options-optional) + - [Docker Image](#docker-image) + - [Using pre-built images](#using-pre-built-images) + - [Building the image yourself](#building-the-image-yourself) + - [Building the Documentation](#building-the-documentation) + - [Previous Versions](#previous-versions) +- [Getting Started](#getting-started) +- [Resources](#resources) +- [Communication](#communication) +- [Releases and Contributing](#releases-and-contributing) +- [The Team](#the-team) +- [License](#license) + + + +## More About PyTorch + +At a granular level, PyTorch is a library that consists of the following components: + +| Component | Description | +| ---- | --- | +| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support | +| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch | +| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code | +| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility | +| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training | +| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience | + +Usually, PyTorch is used either as: + +- A replacement for NumPy to use the power of GPUs. +- A deep learning research platform that provides maximum flexibility and speed. + +Elaborating Further: + +### A GPU-Ready Tensor Library + +If you use NumPy, then you have used Tensors (a.k.a. ndarray). + +![Tensor illustration](./docs/source/_static/img/tensor_illustration.png) + +PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the +computation by a huge amount. + +We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs +such as slicing, indexing, mathematical operations, linear algebra, reductions. +And they are fast! + +### Dynamic Neural Networks: Tape-Based Autograd + +PyTorch has a unique way of building neural networks: using and replaying a tape recorder. + +Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world. +One has to build a neural network and reuse the same structure again and again. +Changing the way the network behaves means that one has to start from scratch. + +With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to +change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes +from several research papers on this topic, as well as current and past work such as +[torch-autograd](https://github.com/twitter/torch-autograd), +[autograd](https://github.com/HIPS/autograd), +[Chainer](https://chainer.org), etc. + +While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. +You get the best of speed and flexibility for your crazy research. + +![Dynamic graph](https://github.com/pytorch/pytorch/blob/master/docs/source/_static/img/dynamic_graph.gif) + +### Python First + +PyTorch is not a Python binding into a monolithic C++ framework. +It is built to be deeply integrated into Python. +You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc. +You can write your new neural network layers in Python itself, using your favorite libraries +and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/). +Our goal is to not reinvent the wheel where appropriate. + +### Imperative Experiences + +PyTorch is designed to be intuitive, linear in thought, and easy to use. +When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. +When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward. +The stack trace points to exactly where your code was defined. +We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. + +### Fast and Lean + +PyTorch has minimal framework overhead. We integrate acceleration libraries +such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed. +At the core, its CPU and GPU Tensor and neural network backends +are mature and have been tested for years. + +Hence, PyTorch is quite fast – whether you run small or large neural networks. + +The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. +We've written custom memory allocators for the GPU to make sure that +your deep learning models are maximally memory efficient. +This enables you to train bigger deep learning models than before. + +### Extensions Without Pain + +Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward +and with minimal abstractions. + +You can write new neural network layers in Python using the torch API +[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html). + +If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate. +No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp). + + +## Installation + +### Binaries +Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/) + + +#### NVIDIA Jetson Platforms + +Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) + +They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them. + + +### From Source + +#### Prerequisites +If you are installing from source, you will need: +- Python 3.8 or later (for Linux, Python 3.8.1+ is needed) +- A C++17 compatible compiler, such as clang + +We highly recommend installing an [Anaconda](https://www.anaconda.com/distribution/#download-section) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro. + +If you want to compile with CUDA support, install the following (note that CUDA is not supported on macOS) +- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) 11.0 or above +- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above +- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA + +Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/pdf/cuDNN-Support-Matrix.pdf) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware + +If you want to disable CUDA support, export the environment variable `USE_CUDA=0`. +Other potentially useful environment variables may be found in `setup.py`. + +If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/) + +If you want to compile with ROCm support, install +- [AMD ROCm](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html) 4.0 and above installation +- ROCm is currently supported only for Linux systems. + +If you want to disable ROCm support, export the environment variable `USE_ROCM=0`. +Other potentially useful environment variables may be found in `setup.py`. + +#### Install Dependencies + +**Common** + +```bash +conda install cmake ninja +# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below +pip install -r requirements.txt +``` + +**On Linux** + +```bash +conda install mkl mkl-include +# CUDA only: Add LAPACK support for the GPU if needed +conda install -c pytorch magma-cuda110 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo +``` + +**On MacOS** + +```bash +# Add this package on intel x86 processor machines only +conda install mkl mkl-include +# Add these packages if torch.distributed is needed +conda install pkg-config libuv +``` + +**On Windows** + +```bash +conda install mkl mkl-include +# Add these packages if torch.distributed is needed. +# Distributed package support on Windows is a prototype feature and is subject to changes. +conda install -c conda-forge libuv=1.39 +``` + +#### Get the PyTorch Source +```bash +git clone --recursive https://github.com/pytorch/pytorch +cd pytorch +# if you are updating an existing checkout +git submodule sync +git submodule update --init --recursive +``` + +#### Install PyTorch +**On Linux** + +If you're compiling for AMD ROCm then first run this command: +```bash +# Only run this if you're compiling for ROCm +python tools/amd_build/build_amd.py +``` + +Install PyTorch +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py develop +``` + +> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker: +> +> ```plaintext +> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized +> collect2: error: ld returned 1 exit status +> error: command 'g++' failed with exit status 1 +> ``` +> +> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+. + +**On macOS** + +```bash +python3 setup.py develop +``` + +**On Windows** + +Choose Correct Visual Studio Version. + +PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise, +Professional, or Community Editions. You can also install the build tools from +https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not* +come with Visual Studio Code by default. + +If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md#building-on-legacy-code-and-cuda) + +**CPU-only builds** + +In this mode PyTorch computations will run on your CPU, not your GPU + +```cmd +conda activate +python setup.py develop +``` + +Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/master/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used. + +**CUDA based build** + +In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching + +[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA. +NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox. +Make sure that CUDA with Nsight Compute is installed after Visual Studio. + +Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019. +
If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain. + +Additional libraries such as +[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/master/.ci/pytorch/win-test-helpers/installation-helpers) to install them. + +You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/master/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations + + +```cmd +cmd + +:: Set the environment variables after you have downloaded and unzipped the mkl package, +:: else CMake would throw an error as `Could NOT find OpenMP`. +set CMAKE_INCLUDE_PATH={Your directory}\mkl\include +set LIB={Your directory}\mkl\lib;%LIB% + +:: Read the content in the previous section carefully before you proceed. +:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block. +:: "Visual Studio 2019 Developer Command Prompt" will be run automatically. +:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator. +set CMAKE_GENERATOR_TOOLSET_VERSION=14.27 +set DISTUTILS_USE_SDK=1 +for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION% + +:: [Optional] If you want to override the CUDA host compiler +set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe + +python setup.py develop + +``` + +##### Adjust Build Options (Optional) + +You can adjust the configuration of cmake variables optionally (without building first), by doing +the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done +with such a step. + +On Linux +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +On macOS +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +### Docker Image + +#### Using pre-built images + +You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+ + +```bash +docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest +``` + +Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. +for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you +should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`. + +#### Building the image yourself + +**NOTE:** Must be built with a docker version > 18.06 + +The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8. +You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it +unset to use the default. +```bash +make -f docker.Makefile +# images are tagged as docker.io/${your_docker_username}/pytorch +``` + +### Building the Documentation + +To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the +readthedocs theme. + +```bash +cd docs/ +pip install -r requirements.txt +``` +You can then build the documentation by running `make ` from the +`docs/` folder. Run `make` to get a list of all available output formats. + +If you get a katex error run `npm install katex`. If it persists, try +`npm install -g katex` + +> Note: if you installed `nodejs` with a different package manager (e.g., +`conda`) then `npm` will probably install a version of `katex` that is not +compatible with your version of `nodejs` and doc builds will fail. +A combination of versions that is known to work is `node@6.13.1` and +`katex@0.13.18`. To install the latter with `npm` you can run +```npm install -g katex@0.13.18``` + +### Previous Versions + +Installation instructions and binaries for previous PyTorch versions may be found +on [our website](https://pytorch.org/previous-versions). + + +## Getting Started + +Three-pointers to get you started: +- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/) +- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples) +- [The API Reference](https://pytorch.org/docs/) +- [Glossary](https://github.com/pytorch/pytorch/blob/master/GLOSSARY.md) + +## Resources + +* [PyTorch.org](https://pytorch.org/) +* [PyTorch Tutorials](https://pytorch.org/tutorials/) +* [PyTorch Examples](https://github.com/pytorch/examples) +* [PyTorch Models](https://pytorch.org/hub/) +* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) +* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229) +* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch) +* [PyTorch Twitter](https://twitter.com/PyTorch) +* [PyTorch Blog](https://pytorch.org/blog/) +* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw) + +## Communication +* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org +* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc. +* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1 +* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv +* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch +* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/) + +## Releases and Contributing + +PyTorch has a 90-day release cycle (major releases). Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues). + +We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. + +If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us. +Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of. + +To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). + +## The Team + +PyTorch is a community-driven project with several skillful engineers and researchers contributing to it. + +PyTorch is currently maintained by [Adam Paszke](https://apaszke.github.io/), [Sam Gross](https://github.com/colesbury), [Soumith Chintala](http://soumith.ch) and [Gregory Chanan](https://github.com/gchanan) with major contributions coming from hundreds of talented individuals in various forms and means. +A non-exhaustive but growing list needs to mention: Trevor Killeen, Sasank Chilamkurthy, Sergey Zagoruyko, Adam Lerer, Francisco Massa, Alykhan Tejani, Luca Antiga, Alban Desmaison, Andreas Koepf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein, Christian Sarofeen, Martin Raison, Edward Yang, Zachary Devito. + +Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch. + +## License + +PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file. diff --git a/torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl b/torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..702b2f6fc1392401d7c4fe45e341e0b0b44bff87 --- /dev/null +++ b/torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:891d5c300207a443d89bbb46599f8bdce604f212d759b7cb536653741cb47f8a +size 217587620 diff --git a/torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata b/torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..fed60fc0a319f4cb1f29402f082d6584f704b21a --- /dev/null +++ b/torch/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,503 @@ +Metadata-Version: 2.1 +Name: torch +Version: 2.1.0a0+cxx11.abi +Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration +Home-page: https://pytorch.org/ +Download-URL: https://github.com/pytorch/pytorch/tags +Author: PyTorch Team +Author-email: packages@pytorch.org +License: BSD-3 +Keywords: pytorch,machine learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: NOTICE +Requires-Dist: filelock +Requires-Dist: typing-extensions +Requires-Dist: sympy +Requires-Dist: networkx +Requires-Dist: jinja2 +Requires-Dist: fsspec +Provides-Extra: opt-einsum +Requires-Dist: opt-einsum >=3.3 ; extra == 'opt-einsum' + +![PyTorch Logo](https://github.com/pytorch/pytorch/blob/main/docs/source/_static/img/pytorch-logo-dark.png) + +-------------------------------------------------------------------------------- + +PyTorch is a Python package that provides two high-level features: +- Tensor computation (like NumPy) with strong GPU acceleration +- Deep neural networks built on a tape-based autograd system + +You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed. + +Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/main). + + + +- [More About PyTorch](#more-about-pytorch) + - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library) + - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd) + - [Python First](#python-first) + - [Imperative Experiences](#imperative-experiences) + - [Fast and Lean](#fast-and-lean) + - [Extensions Without Pain](#extensions-without-pain) +- [Installation](#installation) + - [Binaries](#binaries) + - [NVIDIA Jetson Platforms](#nvidia-jetson-platforms) + - [From Source](#from-source) + - [Prerequisites](#prerequisites) + - [Install Dependencies](#install-dependencies) + - [Get the PyTorch Source](#get-the-pytorch-source) + - [Install PyTorch](#install-pytorch) + - [Adjust Build Options (Optional)](#adjust-build-options-optional) + - [Docker Image](#docker-image) + - [Using pre-built images](#using-pre-built-images) + - [Building the image yourself](#building-the-image-yourself) + - [Building the Documentation](#building-the-documentation) + - [Previous Versions](#previous-versions) +- [Getting Started](#getting-started) +- [Resources](#resources) +- [Communication](#communication) +- [Releases and Contributing](#releases-and-contributing) +- [The Team](#the-team) +- [License](#license) + + + +## More About PyTorch + +[Learn the basics of PyTorch](https://pytorch.org/tutorials/beginner/basics/intro.html) + +At a granular level, PyTorch is a library that consists of the following components: + +| Component | Description | +| ---- | --- | +| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support | +| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch | +| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code | +| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility | +| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training | +| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience | + +Usually, PyTorch is used either as: + +- A replacement for NumPy to use the power of GPUs. +- A deep learning research platform that provides maximum flexibility and speed. + +Elaborating Further: + +### A GPU-Ready Tensor Library + +If you use NumPy, then you have used Tensors (a.k.a. ndarray). + +![Tensor illustration](./docs/source/_static/img/tensor_illustration.png) + +PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the +computation by a huge amount. + +We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs +such as slicing, indexing, mathematical operations, linear algebra, reductions. +And they are fast! + +### Dynamic Neural Networks: Tape-Based Autograd + +PyTorch has a unique way of building neural networks: using and replaying a tape recorder. + +Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world. +One has to build a neural network and reuse the same structure again and again. +Changing the way the network behaves means that one has to start from scratch. + +With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to +change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes +from several research papers on this topic, as well as current and past work such as +[torch-autograd](https://github.com/twitter/torch-autograd), +[autograd](https://github.com/HIPS/autograd), +[Chainer](https://chainer.org), etc. + +While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. +You get the best of speed and flexibility for your crazy research. + +![Dynamic graph](https://github.com/pytorch/pytorch/blob/main/docs/source/_static/img/dynamic_graph.gif) + +### Python First + +PyTorch is not a Python binding into a monolithic C++ framework. +It is built to be deeply integrated into Python. +You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc. +You can write your new neural network layers in Python itself, using your favorite libraries +and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/). +Our goal is to not reinvent the wheel where appropriate. + +### Imperative Experiences + +PyTorch is designed to be intuitive, linear in thought, and easy to use. +When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. +When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward. +The stack trace points to exactly where your code was defined. +We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. + +### Fast and Lean + +PyTorch has minimal framework overhead. We integrate acceleration libraries +such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed. +At the core, its CPU and GPU Tensor and neural network backends +are mature and have been tested for years. + +Hence, PyTorch is quite fast — whether you run small or large neural networks. + +The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. +We've written custom memory allocators for the GPU to make sure that +your deep learning models are maximally memory efficient. +This enables you to train bigger deep learning models than before. + +### Extensions Without Pain + +Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward +and with minimal abstractions. + +You can write new neural network layers in Python using the torch API +[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html). + +If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate. +No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp). + + +## Installation + +### Binaries +Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/) + + +#### NVIDIA Jetson Platforms + +Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) + +They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them. + + +### From Source + +#### Prerequisites +If you are installing from source, you will need: +- Python 3.8 or later (for Linux, Python 3.8.1+ is needed) +- A compiler that fully supports C++17, such as clang or gcc (especially for aarch64, gcc 9.4.0 or newer is required) + +We highly recommend installing an [Anaconda](https://www.anaconda.com/download) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro. + +If you want to compile with CUDA support, [select a supported version of CUDA from our support matrix](https://pytorch.org/get-started/locally/), then install the following: +- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) +- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above +- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA + +Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/pdf/cuDNN-Support-Matrix.pdf) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware + +If you want to disable CUDA support, export the environment variable `USE_CUDA=0`. +Other potentially useful environment variables may be found in `setup.py`. + +If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/) + +If you want to compile with ROCm support, install +- [AMD ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) 4.0 and above installation +- ROCm is currently supported only for Linux systems. + +If you want to disable ROCm support, export the environment variable `USE_ROCM=0`. +Other potentially useful environment variables may be found in `setup.py`. + +#### Install Dependencies + +**Common** + +```bash +conda install cmake ninja +# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below +pip install -r requirements.txt +``` + +**On Linux** + +```bash +conda install mkl mkl-include +# CUDA only: Add LAPACK support for the GPU if needed +conda install -c pytorch magma-cuda110 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo + +# (optional) If using torch.compile with inductor/triton, install the matching version of triton +# Run from the pytorch directory after cloning +make triton +``` + +**On MacOS** + +```bash +# Add this package on intel x86 processor machines only +conda install mkl mkl-include +# Add these packages if torch.distributed is needed +conda install pkg-config libuv +``` + +**On Windows** + +```bash +conda install mkl mkl-include +# Add these packages if torch.distributed is needed. +# Distributed package support on Windows is a prototype feature and is subject to changes. +conda install -c conda-forge libuv=1.39 +``` + +#### Get the PyTorch Source +```bash +git clone --recursive https://github.com/pytorch/pytorch +cd pytorch +# if you are updating an existing checkout +git submodule sync +git submodule update --init --recursive +``` + +#### Install PyTorch +**On Linux** + +If you would like to compile PyTorch with [new C++ ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) enabled, then first run this command: +```bash +export _GLIBCXX_USE_CXX11_ABI=1 +``` + +If you're compiling for AMD ROCm then first run this command: +```bash +# Only run this if you're compiling for ROCm +python tools/amd_build/build_amd.py +``` + +Install PyTorch +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py develop +``` + +> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker: +> +> ```plaintext +> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized +> collect2: error: ld returned 1 exit status +> error: command 'g++' failed with exit status 1 +> ``` +> +> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+. + +**On macOS** + +```bash +python3 setup.py develop +``` + +**On Windows** + +Choose Correct Visual Studio Version. + +PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise, +Professional, or Community Editions. You can also install the build tools from +https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not* +come with Visual Studio Code by default. + +If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md#building-on-legacy-code-and-cuda) + +**CPU-only builds** + +In this mode PyTorch computations will run on your CPU, not your GPU + +```cmd +conda activate +python setup.py develop +``` + +Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/main/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used. + +**CUDA based build** + +In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching + +[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA. +NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox. +Make sure that CUDA with Nsight Compute is installed after Visual Studio. + +Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019. +
If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain. + +Additional libraries such as +[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a. MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/main/.ci/pytorch/win-test-helpers/installation-helpers) to install them. + +You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/main/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations + + +```cmd +cmd + +:: Set the environment variables after you have downloaded and unzipped the mkl package, +:: else CMake would throw an error as `Could NOT find OpenMP`. +set CMAKE_INCLUDE_PATH={Your directory}\mkl\include +set LIB={Your directory}\mkl\lib;%LIB% + +:: Read the content in the previous section carefully before you proceed. +:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block. +:: "Visual Studio 2019 Developer Command Prompt" will be run automatically. +:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator. +set CMAKE_GENERATOR_TOOLSET_VERSION=14.27 +set DISTUTILS_USE_SDK=1 +for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION% + +:: [Optional] If you want to override the CUDA host compiler +set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe + +python setup.py develop + +``` + +##### Adjust Build Options (Optional) + +You can adjust the configuration of cmake variables optionally (without building first), by doing +the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done +with such a step. + +On Linux +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +On macOS +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +### Docker Image + +#### Using pre-built images + +You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+ + +```bash +docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest +``` + +Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. +for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you +should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`. + +#### Building the image yourself + +**NOTE:** Must be built with a docker version > 18.06 + +The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8. +You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it +unset to use the default. + +```bash +make -f docker.Makefile +# images are tagged as docker.io/${your_docker_username}/pytorch +``` + +You can also pass the `CMAKE_VARS="..."` environment variable to specify additional CMake variables to be passed to CMake during the build. +See [setup.py](./setup.py) for the list of available variables. + +```bash +CMAKE_VARS="BUILD_CAFFE2=ON BUILD_CAFFE2_OPS=ON" make -f docker.Makefile +``` + +### Building the Documentation + +To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the +readthedocs theme. + +```bash +cd docs/ +pip install -r requirements.txt +``` +You can then build the documentation by running `make ` from the +`docs/` folder. Run `make` to get a list of all available output formats. + +If you get a katex error run `npm install katex`. If it persists, try +`npm install -g katex` + +> Note: if you installed `nodejs` with a different package manager (e.g., +`conda`) then `npm` will probably install a version of `katex` that is not +compatible with your version of `nodejs` and doc builds will fail. +A combination of versions that is known to work is `node@6.13.1` and +`katex@0.13.18`. To install the latter with `npm` you can run +```npm install -g katex@0.13.18``` + +### Previous Versions + +Installation instructions and binaries for previous PyTorch versions may be found +on [our website](https://pytorch.org/previous-versions). + + +## Getting Started + +Three-pointers to get you started: +- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/) +- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples) +- [The API Reference](https://pytorch.org/docs/) +- [Glossary](https://github.com/pytorch/pytorch/blob/main/GLOSSARY.md) + +## Resources + +* [PyTorch.org](https://pytorch.org/) +* [PyTorch Tutorials](https://pytorch.org/tutorials/) +* [PyTorch Examples](https://github.com/pytorch/examples) +* [PyTorch Models](https://pytorch.org/hub/) +* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) +* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229) +* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch) +* [PyTorch Twitter](https://twitter.com/PyTorch) +* [PyTorch Blog](https://pytorch.org/blog/) +* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw) + +## Communication +* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org +* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc. +* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1 +* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv +* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch +* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/) + +## Releases and Contributing + +Typically, PyTorch has three minor releases a year. Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues). + +We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. + +If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us. +Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of. + +To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). For more information about PyTorch releases, see [Release page](RELEASE.md). + +## The Team + +PyTorch is a community-driven project with several skillful engineers and researchers contributing to it. + +PyTorch is currently maintained by [Soumith Chintala](http://soumith.ch), [Gregory Chanan](https://github.com/gchanan), [Dmytro Dzhulgakov](https://github.com/dzhulgakov), [Edward Yang](https://github.com/ezyang), and [Nikita Shulga](https://github.com/malfet) with major contributions coming from hundreds of talented individuals in various forms and means. +A non-exhaustive but growing list needs to mention: Trevor Killeen, Sasank Chilamkurthy, Sergey Zagoruyko, Adam Lerer, Francisco Massa, Alykhan Tejani, Luca Antiga, Alban Desmaison, Andreas Koepf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein, Christian Sarofeen, Martin Raison, Edward Yang, Zachary Devito. + +Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch. + +## License + +PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file. diff --git a/torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl b/torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..b7110eec7c0340039f0f7243632a3888264d41ad --- /dev/null +++ b/torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baafac9ec83bc362604e767475f8933026aad85b4931c40deda01b5fd34f8fc9 +size 217581717 diff --git a/torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata b/torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..7d9eff5ccabf2a2dd5a58abf8bcdfa6696fd6e40 --- /dev/null +++ b/torch/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata @@ -0,0 +1,504 @@ +Metadata-Version: 2.1 +Name: torch +Version: 2.1.0a0+cxx11.abi +Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration +Home-page: https://pytorch.org/ +Download-URL: https://github.com/pytorch/pytorch/tags +Author: PyTorch Team +Author-email: packages@pytorch.org +License: BSD-3 +Keywords: pytorch,machine learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: NOTICE +Requires-Dist: filelock +Requires-Dist: typing-extensions +Requires-Dist: sympy +Requires-Dist: networkx +Requires-Dist: jinja2 +Requires-Dist: fsspec +Provides-Extra: opt-einsum +Requires-Dist: opt-einsum >=3.3 ; extra == 'opt-einsum' + +![PyTorch Logo](https://github.com/pytorch/pytorch/blob/main/docs/source/_static/img/pytorch-logo-dark.png) + +-------------------------------------------------------------------------------- + +PyTorch is a Python package that provides two high-level features: +- Tensor computation (like NumPy) with strong GPU acceleration +- Deep neural networks built on a tape-based autograd system + +You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed. + +Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/main). + + + +- [More About PyTorch](#more-about-pytorch) + - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library) + - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd) + - [Python First](#python-first) + - [Imperative Experiences](#imperative-experiences) + - [Fast and Lean](#fast-and-lean) + - [Extensions Without Pain](#extensions-without-pain) +- [Installation](#installation) + - [Binaries](#binaries) + - [NVIDIA Jetson Platforms](#nvidia-jetson-platforms) + - [From Source](#from-source) + - [Prerequisites](#prerequisites) + - [Install Dependencies](#install-dependencies) + - [Get the PyTorch Source](#get-the-pytorch-source) + - [Install PyTorch](#install-pytorch) + - [Adjust Build Options (Optional)](#adjust-build-options-optional) + - [Docker Image](#docker-image) + - [Using pre-built images](#using-pre-built-images) + - [Building the image yourself](#building-the-image-yourself) + - [Building the Documentation](#building-the-documentation) + - [Previous Versions](#previous-versions) +- [Getting Started](#getting-started) +- [Resources](#resources) +- [Communication](#communication) +- [Releases and Contributing](#releases-and-contributing) +- [The Team](#the-team) +- [License](#license) + + + +## More About PyTorch + +[Learn the basics of PyTorch](https://pytorch.org/tutorials/beginner/basics/intro.html) + +At a granular level, PyTorch is a library that consists of the following components: + +| Component | Description | +| ---- | --- | +| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support | +| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch | +| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code | +| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility | +| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training | +| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience | + +Usually, PyTorch is used either as: + +- A replacement for NumPy to use the power of GPUs. +- A deep learning research platform that provides maximum flexibility and speed. + +Elaborating Further: + +### A GPU-Ready Tensor Library + +If you use NumPy, then you have used Tensors (a.k.a. ndarray). + +![Tensor illustration](./docs/source/_static/img/tensor_illustration.png) + +PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the +computation by a huge amount. + +We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs +such as slicing, indexing, mathematical operations, linear algebra, reductions. +And they are fast! + +### Dynamic Neural Networks: Tape-Based Autograd + +PyTorch has a unique way of building neural networks: using and replaying a tape recorder. + +Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world. +One has to build a neural network and reuse the same structure again and again. +Changing the way the network behaves means that one has to start from scratch. + +With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to +change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes +from several research papers on this topic, as well as current and past work such as +[torch-autograd](https://github.com/twitter/torch-autograd), +[autograd](https://github.com/HIPS/autograd), +[Chainer](https://chainer.org), etc. + +While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. +You get the best of speed and flexibility for your crazy research. + +![Dynamic graph](https://github.com/pytorch/pytorch/blob/main/docs/source/_static/img/dynamic_graph.gif) + +### Python First + +PyTorch is not a Python binding into a monolithic C++ framework. +It is built to be deeply integrated into Python. +You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc. +You can write your new neural network layers in Python itself, using your favorite libraries +and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/). +Our goal is to not reinvent the wheel where appropriate. + +### Imperative Experiences + +PyTorch is designed to be intuitive, linear in thought, and easy to use. +When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. +When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward. +The stack trace points to exactly where your code was defined. +We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. + +### Fast and Lean + +PyTorch has minimal framework overhead. We integrate acceleration libraries +such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed. +At the core, its CPU and GPU Tensor and neural network backends +are mature and have been tested for years. + +Hence, PyTorch is quite fast — whether you run small or large neural networks. + +The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. +We've written custom memory allocators for the GPU to make sure that +your deep learning models are maximally memory efficient. +This enables you to train bigger deep learning models than before. + +### Extensions Without Pain + +Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward +and with minimal abstractions. + +You can write new neural network layers in Python using the torch API +[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html). + +If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate. +No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp). + + +## Installation + +### Binaries +Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/) + + +#### NVIDIA Jetson Platforms + +Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) + +They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them. + + +### From Source + +#### Prerequisites +If you are installing from source, you will need: +- Python 3.8 or later (for Linux, Python 3.8.1+ is needed) +- A compiler that fully supports C++17, such as clang or gcc (especially for aarch64, gcc 9.4.0 or newer is required) + +We highly recommend installing an [Anaconda](https://www.anaconda.com/download) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro. + +If you want to compile with CUDA support, [select a supported version of CUDA from our support matrix](https://pytorch.org/get-started/locally/), then install the following: +- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) +- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above +- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA + +Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/pdf/cuDNN-Support-Matrix.pdf) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware + +If you want to disable CUDA support, export the environment variable `USE_CUDA=0`. +Other potentially useful environment variables may be found in `setup.py`. + +If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/) + +If you want to compile with ROCm support, install +- [AMD ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) 4.0 and above installation +- ROCm is currently supported only for Linux systems. + +If you want to disable ROCm support, export the environment variable `USE_ROCM=0`. +Other potentially useful environment variables may be found in `setup.py`. + +#### Install Dependencies + +**Common** + +```bash +conda install cmake ninja +# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below +pip install -r requirements.txt +``` + +**On Linux** + +```bash +conda install mkl mkl-include +# CUDA only: Add LAPACK support for the GPU if needed +conda install -c pytorch magma-cuda110 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo + +# (optional) If using torch.compile with inductor/triton, install the matching version of triton +# Run from the pytorch directory after cloning +make triton +``` + +**On MacOS** + +```bash +# Add this package on intel x86 processor machines only +conda install mkl mkl-include +# Add these packages if torch.distributed is needed +conda install pkg-config libuv +``` + +**On Windows** + +```bash +conda install mkl mkl-include +# Add these packages if torch.distributed is needed. +# Distributed package support on Windows is a prototype feature and is subject to changes. +conda install -c conda-forge libuv=1.39 +``` + +#### Get the PyTorch Source +```bash +git clone --recursive https://github.com/pytorch/pytorch +cd pytorch +# if you are updating an existing checkout +git submodule sync +git submodule update --init --recursive +``` + +#### Install PyTorch +**On Linux** + +If you would like to compile PyTorch with [new C++ ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) enabled, then first run this command: +```bash +export _GLIBCXX_USE_CXX11_ABI=1 +``` + +If you're compiling for AMD ROCm then first run this command: +```bash +# Only run this if you're compiling for ROCm +python tools/amd_build/build_amd.py +``` + +Install PyTorch +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py develop +``` + +> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker: +> +> ```plaintext +> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized +> collect2: error: ld returned 1 exit status +> error: command 'g++' failed with exit status 1 +> ``` +> +> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+. + +**On macOS** + +```bash +python3 setup.py develop +``` + +**On Windows** + +Choose Correct Visual Studio Version. + +PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise, +Professional, or Community Editions. You can also install the build tools from +https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not* +come with Visual Studio Code by default. + +If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md#building-on-legacy-code-and-cuda) + +**CPU-only builds** + +In this mode PyTorch computations will run on your CPU, not your GPU + +```cmd +conda activate +python setup.py develop +``` + +Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/main/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used. + +**CUDA based build** + +In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching + +[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA. +NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox. +Make sure that CUDA with Nsight Compute is installed after Visual Studio. + +Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019. +
If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain. + +Additional libraries such as +[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a. MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/main/.ci/pytorch/win-test-helpers/installation-helpers) to install them. + +You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/main/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations + + +```cmd +cmd + +:: Set the environment variables after you have downloaded and unzipped the mkl package, +:: else CMake would throw an error as `Could NOT find OpenMP`. +set CMAKE_INCLUDE_PATH={Your directory}\mkl\include +set LIB={Your directory}\mkl\lib;%LIB% + +:: Read the content in the previous section carefully before you proceed. +:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block. +:: "Visual Studio 2019 Developer Command Prompt" will be run automatically. +:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator. +set CMAKE_GENERATOR_TOOLSET_VERSION=14.27 +set DISTUTILS_USE_SDK=1 +for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION% + +:: [Optional] If you want to override the CUDA host compiler +set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe + +python setup.py develop + +``` + +##### Adjust Build Options (Optional) + +You can adjust the configuration of cmake variables optionally (without building first), by doing +the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done +with such a step. + +On Linux +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +On macOS +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +### Docker Image + +#### Using pre-built images + +You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+ + +```bash +docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest +``` + +Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. +for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you +should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`. + +#### Building the image yourself + +**NOTE:** Must be built with a docker version > 18.06 + +The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8. +You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it +unset to use the default. + +```bash +make -f docker.Makefile +# images are tagged as docker.io/${your_docker_username}/pytorch +``` + +You can also pass the `CMAKE_VARS="..."` environment variable to specify additional CMake variables to be passed to CMake during the build. +See [setup.py](./setup.py) for the list of available variables. + +```bash +CMAKE_VARS="BUILD_CAFFE2=ON BUILD_CAFFE2_OPS=ON" make -f docker.Makefile +``` + +### Building the Documentation + +To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the +readthedocs theme. + +```bash +cd docs/ +pip install -r requirements.txt +``` +You can then build the documentation by running `make ` from the +`docs/` folder. Run `make` to get a list of all available output formats. + +If you get a katex error run `npm install katex`. If it persists, try +`npm install -g katex` + +> Note: if you installed `nodejs` with a different package manager (e.g., +`conda`) then `npm` will probably install a version of `katex` that is not +compatible with your version of `nodejs` and doc builds will fail. +A combination of versions that is known to work is `node@6.13.1` and +`katex@0.13.18`. To install the latter with `npm` you can run +```npm install -g katex@0.13.18``` + +### Previous Versions + +Installation instructions and binaries for previous PyTorch versions may be found +on [our website](https://pytorch.org/previous-versions). + + +## Getting Started + +Three-pointers to get you started: +- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/) +- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples) +- [The API Reference](https://pytorch.org/docs/) +- [Glossary](https://github.com/pytorch/pytorch/blob/main/GLOSSARY.md) + +## Resources + +* [PyTorch.org](https://pytorch.org/) +* [PyTorch Tutorials](https://pytorch.org/tutorials/) +* [PyTorch Examples](https://github.com/pytorch/examples) +* [PyTorch Models](https://pytorch.org/hub/) +* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) +* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229) +* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch) +* [PyTorch Twitter](https://twitter.com/PyTorch) +* [PyTorch Blog](https://pytorch.org/blog/) +* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw) + +## Communication +* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org +* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc. +* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1 +* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv +* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch +* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/) + +## Releases and Contributing + +Typically, PyTorch has three minor releases a year. Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues). + +We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. + +If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us. +Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of. + +To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). For more information about PyTorch releases, see [Release page](RELEASE.md). + +## The Team + +PyTorch is a community-driven project with several skillful engineers and researchers contributing to it. + +PyTorch is currently maintained by [Soumith Chintala](http://soumith.ch), [Gregory Chanan](https://github.com/gchanan), [Dmytro Dzhulgakov](https://github.com/dzhulgakov), [Edward Yang](https://github.com/ezyang), and [Nikita Shulga](https://github.com/malfet) with major contributions coming from hundreds of talented individuals in various forms and means. +A non-exhaustive but growing list needs to mention: Trevor Killeen, Sasank Chilamkurthy, Sergey Zagoruyko, Adam Lerer, Francisco Massa, Alykhan Tejani, Luca Antiga, Alban Desmaison, Andreas Koepf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein, Christian Sarofeen, Martin Raison, Edward Yang, Zachary Devito. + +Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch. + +## License + +PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file. diff --git a/torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl b/torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..5b7f1478224eb327b855e03762b6556303757f22 --- /dev/null +++ b/torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cfc345c2d275241b39f82c805725cecb39fe1c673dd0619f3b055ff4409ee98 +size 202931543 diff --git a/torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl.metadata b/torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..6bb4b27e027f8f740e8fd8bc84b82373f414e7cd --- /dev/null +++ b/torch/torch-2.1.0a0+git7bcf7da-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,503 @@ +Metadata-Version: 2.1 +Name: torch +Version: 2.1.0a0+git7bcf7da +Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration +Home-page: https://pytorch.org/ +Download-URL: https://github.com/pytorch/pytorch/tags +Author: PyTorch Team +Author-email: packages@pytorch.org +License: BSD-3 +Keywords: pytorch,machine learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: NOTICE +Requires-Dist: filelock +Requires-Dist: typing-extensions +Requires-Dist: sympy +Requires-Dist: networkx +Requires-Dist: jinja2 +Requires-Dist: fsspec +Provides-Extra: opt-einsum +Requires-Dist: opt-einsum >=3.3 ; extra == 'opt-einsum' + +![PyTorch Logo](https://github.com/pytorch/pytorch/blob/main/docs/source/_static/img/pytorch-logo-dark.png) + +-------------------------------------------------------------------------------- + +PyTorch is a Python package that provides two high-level features: +- Tensor computation (like NumPy) with strong GPU acceleration +- Deep neural networks built on a tape-based autograd system + +You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed. + +Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/main). + + + +- [More About PyTorch](#more-about-pytorch) + - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library) + - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd) + - [Python First](#python-first) + - [Imperative Experiences](#imperative-experiences) + - [Fast and Lean](#fast-and-lean) + - [Extensions Without Pain](#extensions-without-pain) +- [Installation](#installation) + - [Binaries](#binaries) + - [NVIDIA Jetson Platforms](#nvidia-jetson-platforms) + - [From Source](#from-source) + - [Prerequisites](#prerequisites) + - [Install Dependencies](#install-dependencies) + - [Get the PyTorch Source](#get-the-pytorch-source) + - [Install PyTorch](#install-pytorch) + - [Adjust Build Options (Optional)](#adjust-build-options-optional) + - [Docker Image](#docker-image) + - [Using pre-built images](#using-pre-built-images) + - [Building the image yourself](#building-the-image-yourself) + - [Building the Documentation](#building-the-documentation) + - [Previous Versions](#previous-versions) +- [Getting Started](#getting-started) +- [Resources](#resources) +- [Communication](#communication) +- [Releases and Contributing](#releases-and-contributing) +- [The Team](#the-team) +- [License](#license) + + + +## More About PyTorch + +[Learn the basics of PyTorch](https://pytorch.org/tutorials/beginner/basics/intro.html) + +At a granular level, PyTorch is a library that consists of the following components: + +| Component | Description | +| ---- | --- | +| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support | +| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch | +| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code | +| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility | +| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training | +| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience | + +Usually, PyTorch is used either as: + +- A replacement for NumPy to use the power of GPUs. +- A deep learning research platform that provides maximum flexibility and speed. + +Elaborating Further: + +### A GPU-Ready Tensor Library + +If you use NumPy, then you have used Tensors (a.k.a. ndarray). + +![Tensor illustration](./docs/source/_static/img/tensor_illustration.png) + +PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the +computation by a huge amount. + +We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs +such as slicing, indexing, mathematical operations, linear algebra, reductions. +And they are fast! + +### Dynamic Neural Networks: Tape-Based Autograd + +PyTorch has a unique way of building neural networks: using and replaying a tape recorder. + +Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world. +One has to build a neural network and reuse the same structure again and again. +Changing the way the network behaves means that one has to start from scratch. + +With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to +change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes +from several research papers on this topic, as well as current and past work such as +[torch-autograd](https://github.com/twitter/torch-autograd), +[autograd](https://github.com/HIPS/autograd), +[Chainer](https://chainer.org), etc. + +While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. +You get the best of speed and flexibility for your crazy research. + +![Dynamic graph](https://github.com/pytorch/pytorch/blob/main/docs/source/_static/img/dynamic_graph.gif) + +### Python First + +PyTorch is not a Python binding into a monolithic C++ framework. +It is built to be deeply integrated into Python. +You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc. +You can write your new neural network layers in Python itself, using your favorite libraries +and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/). +Our goal is to not reinvent the wheel where appropriate. + +### Imperative Experiences + +PyTorch is designed to be intuitive, linear in thought, and easy to use. +When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. +When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward. +The stack trace points to exactly where your code was defined. +We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. + +### Fast and Lean + +PyTorch has minimal framework overhead. We integrate acceleration libraries +such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed. +At the core, its CPU and GPU Tensor and neural network backends +are mature and have been tested for years. + +Hence, PyTorch is quite fast — whether you run small or large neural networks. + +The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. +We've written custom memory allocators for the GPU to make sure that +your deep learning models are maximally memory efficient. +This enables you to train bigger deep learning models than before. + +### Extensions Without Pain + +Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward +and with minimal abstractions. + +You can write new neural network layers in Python using the torch API +[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html). + +If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate. +No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp). + + +## Installation + +### Binaries +Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/) + + +#### NVIDIA Jetson Platforms + +Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) + +They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them. + + +### From Source + +#### Prerequisites +If you are installing from source, you will need: +- Python 3.8 or later (for Linux, Python 3.8.1+ is needed) +- A compiler that fully supports C++17, such as clang or gcc (especially for aarch64, gcc 9.4.0 or newer is required) + +We highly recommend installing an [Anaconda](https://www.anaconda.com/download) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro. + +If you want to compile with CUDA support, [select a supported version of CUDA from our support matrix](https://pytorch.org/get-started/locally/), then install the following: +- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) +- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above +- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA + +Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/pdf/cuDNN-Support-Matrix.pdf) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware + +If you want to disable CUDA support, export the environment variable `USE_CUDA=0`. +Other potentially useful environment variables may be found in `setup.py`. + +If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/) + +If you want to compile with ROCm support, install +- [AMD ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) 4.0 and above installation +- ROCm is currently supported only for Linux systems. + +If you want to disable ROCm support, export the environment variable `USE_ROCM=0`. +Other potentially useful environment variables may be found in `setup.py`. + +#### Install Dependencies + +**Common** + +```bash +conda install cmake ninja +# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below +pip install -r requirements.txt +``` + +**On Linux** + +```bash +conda install mkl mkl-include +# CUDA only: Add LAPACK support for the GPU if needed +conda install -c pytorch magma-cuda110 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo + +# (optional) If using torch.compile with inductor/triton, install the matching version of triton +# Run from the pytorch directory after cloning +make triton +``` + +**On MacOS** + +```bash +# Add this package on intel x86 processor machines only +conda install mkl mkl-include +# Add these packages if torch.distributed is needed +conda install pkg-config libuv +``` + +**On Windows** + +```bash +conda install mkl mkl-include +# Add these packages if torch.distributed is needed. +# Distributed package support on Windows is a prototype feature and is subject to changes. +conda install -c conda-forge libuv=1.39 +``` + +#### Get the PyTorch Source +```bash +git clone --recursive https://github.com/pytorch/pytorch +cd pytorch +# if you are updating an existing checkout +git submodule sync +git submodule update --init --recursive +``` + +#### Install PyTorch +**On Linux** + +If you would like to compile PyTorch with [new C++ ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) enabled, then first run this command: +```bash +export _GLIBCXX_USE_CXX11_ABI=1 +``` + +If you're compiling for AMD ROCm then first run this command: +```bash +# Only run this if you're compiling for ROCm +python tools/amd_build/build_amd.py +``` + +Install PyTorch +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py develop +``` + +> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker: +> +> ```plaintext +> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized +> collect2: error: ld returned 1 exit status +> error: command 'g++' failed with exit status 1 +> ``` +> +> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+. + +**On macOS** + +```bash +python3 setup.py develop +``` + +**On Windows** + +Choose Correct Visual Studio Version. + +PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise, +Professional, or Community Editions. You can also install the build tools from +https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not* +come with Visual Studio Code by default. + +If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md#building-on-legacy-code-and-cuda) + +**CPU-only builds** + +In this mode PyTorch computations will run on your CPU, not your GPU + +```cmd +conda activate +python setup.py develop +``` + +Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/main/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used. + +**CUDA based build** + +In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching + +[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA. +NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox. +Make sure that CUDA with Nsight Compute is installed after Visual Studio. + +Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019. +
If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain. + +Additional libraries such as +[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a. MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/main/.ci/pytorch/win-test-helpers/installation-helpers) to install them. + +You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/main/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations + + +```cmd +cmd + +:: Set the environment variables after you have downloaded and unzipped the mkl package, +:: else CMake would throw an error as `Could NOT find OpenMP`. +set CMAKE_INCLUDE_PATH={Your directory}\mkl\include +set LIB={Your directory}\mkl\lib;%LIB% + +:: Read the content in the previous section carefully before you proceed. +:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block. +:: "Visual Studio 2019 Developer Command Prompt" will be run automatically. +:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator. +set CMAKE_GENERATOR_TOOLSET_VERSION=14.27 +set DISTUTILS_USE_SDK=1 +for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION% + +:: [Optional] If you want to override the CUDA host compiler +set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe + +python setup.py develop + +``` + +##### Adjust Build Options (Optional) + +You can adjust the configuration of cmake variables optionally (without building first), by doing +the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done +with such a step. + +On Linux +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +On macOS +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +### Docker Image + +#### Using pre-built images + +You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+ + +```bash +docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest +``` + +Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. +for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you +should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`. + +#### Building the image yourself + +**NOTE:** Must be built with a docker version > 18.06 + +The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8. +You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it +unset to use the default. + +```bash +make -f docker.Makefile +# images are tagged as docker.io/${your_docker_username}/pytorch +``` + +You can also pass the `CMAKE_VARS="..."` environment variable to specify additional CMake variables to be passed to CMake during the build. +See [setup.py](./setup.py) for the list of available variables. + +```bash +CMAKE_VARS="BUILD_CAFFE2=ON BUILD_CAFFE2_OPS=ON" make -f docker.Makefile +``` + +### Building the Documentation + +To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the +readthedocs theme. + +```bash +cd docs/ +pip install -r requirements.txt +``` +You can then build the documentation by running `make ` from the +`docs/` folder. Run `make` to get a list of all available output formats. + +If you get a katex error run `npm install katex`. If it persists, try +`npm install -g katex` + +> Note: if you installed `nodejs` with a different package manager (e.g., +`conda`) then `npm` will probably install a version of `katex` that is not +compatible with your version of `nodejs` and doc builds will fail. +A combination of versions that is known to work is `node@6.13.1` and +`katex@0.13.18`. To install the latter with `npm` you can run +```npm install -g katex@0.13.18``` + +### Previous Versions + +Installation instructions and binaries for previous PyTorch versions may be found +on [our website](https://pytorch.org/previous-versions). + + +## Getting Started + +Three-pointers to get you started: +- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/) +- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples) +- [The API Reference](https://pytorch.org/docs/) +- [Glossary](https://github.com/pytorch/pytorch/blob/main/GLOSSARY.md) + +## Resources + +* [PyTorch.org](https://pytorch.org/) +* [PyTorch Tutorials](https://pytorch.org/tutorials/) +* [PyTorch Examples](https://github.com/pytorch/examples) +* [PyTorch Models](https://pytorch.org/hub/) +* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) +* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229) +* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch) +* [PyTorch Twitter](https://twitter.com/PyTorch) +* [PyTorch Blog](https://pytorch.org/blog/) +* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw) + +## Communication +* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org +* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc. +* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1 +* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv +* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch +* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/) + +## Releases and Contributing + +Typically, PyTorch has three minor releases a year. Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues). + +We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. + +If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us. +Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of. + +To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). For more information about PyTorch releases, see [Release page](RELEASE.md). + +## The Team + +PyTorch is a community-driven project with several skillful engineers and researchers contributing to it. + +PyTorch is currently maintained by [Soumith Chintala](http://soumith.ch), [Gregory Chanan](https://github.com/gchanan), [Dmytro Dzhulgakov](https://github.com/dzhulgakov), [Edward Yang](https://github.com/ezyang), and [Nikita Shulga](https://github.com/malfet) with major contributions coming from hundreds of talented individuals in various forms and means. +A non-exhaustive but growing list needs to mention: Trevor Killeen, Sasank Chilamkurthy, Sergey Zagoruyko, Adam Lerer, Francisco Massa, Alykhan Tejani, Luca Antiga, Alban Desmaison, Andreas Koepf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein, Christian Sarofeen, Martin Raison, Edward Yang, Zachary Devito. + +Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch. + +## License + +PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file. diff --git a/torchaudio/index.html b/torchaudio/index.html new file mode 100644 index 0000000000000000000000000000000000000000..f5901b5fc2d252bcc0d96025e0da4b38578755cb --- /dev/null +++ b/torchaudio/index.html @@ -0,0 +1,28 @@ + + + + + + + + Links for torchaudio + + + +

+ Links for torchaudio +

+ + torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl + +
+ + torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl + +
+ + torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl + +
+ + diff --git a/torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl b/torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..68260958af46bfd81ce284afe81d9f282355e041 --- /dev/null +++ b/torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27c2cd91b81a29cea15f10d2a355262dd85c942bdf40ee480dceaa4ba460858e +size 2308023 diff --git a/torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl.metadata b/torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..211f1ec6855a41b70943ff958dd30a689d15e381 --- /dev/null +++ b/torchaudio/torchaudio-2.1.0+6ea1133-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: torchaudio +Version: 2.1.0+6ea1133 +Summary: An audio package for PyTorch +Home-page: https://github.com/pytorch/audio +Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Author-email: soumith@pytorch.org +Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Maintainer-email: moto@meta.com +Classifier: Environment :: Plugins +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Multimedia :: Sound/Audio +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: torch + +torchaudio: an audio library for PyTorch +======================================== + +[![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/) +[![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio) +[![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio) + +![TorchAudio Logo](docs/source/_static/img/logo.png) + +The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to +the audio domain. By supporting PyTorch, torchaudio follows the same philosophy +of providing strong GPU acceleration, having a focus on trainable features through +the autograd system, and having consistent style (tensor names and dimension names). +Therefore, it is primarily a machine learning library and not a general signal +processing library. The benefits of PyTorch can be seen in torchaudio through +having all the computations be through PyTorch operations which makes it easy +to use and feel like a natural extension. + +- [Support audio I/O (Load files, Save files)](http://pytorch.org/audio/main/) + - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX + - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html) +- [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html) +- Audio and speech processing functions + - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html) +- Common audio transforms + - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html) +- Compliance interfaces: Run code using PyTorch that align with other libraries + - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html) + +Installation +------------ + +Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio. + + +API Reference +------------- + +API Reference is located here: http://pytorch.org/audio/main/ + +Contributing Guidelines +----------------------- + +Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md) + +Citation +-------- + +If you find this package useful, please cite as: + +```bibtex +@article{yang2021torchaudio, + title={TorchAudio: Building Blocks for Audio and Speech Processing}, + author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi}, + journal={arXiv preprint arXiv:2110.15018}, + year={2021} +} +``` + +Disclaimer on Datasets +---------------------- + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +------------------------- + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details. + +Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/). diff --git a/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..54f88b7dc31c15cdb7f6468a076e755f7700a6f1 --- /dev/null +++ b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34f084413b3db2b76bdf2561c73fc83c543a2eb95cce57f63cf6130f2dc1b18f +size 2305739 diff --git a/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..7f23e156a36895ba0fee8324da249a6efabab12e --- /dev/null +++ b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: torchaudio +Version: 2.1.0a0+cxx11.abi +Summary: An audio package for PyTorch +Home-page: https://github.com/pytorch/audio +Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Author-email: soumith@pytorch.org +Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Maintainer-email: moto@meta.com +Classifier: Environment :: Plugins +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Multimedia :: Sound/Audio +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: torch + +torchaudio: an audio library for PyTorch +======================================== + +[![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/) +[![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio) +[![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio) + +![TorchAudio Logo](docs/source/_static/img/logo.png) + +The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to +the audio domain. By supporting PyTorch, torchaudio follows the same philosophy +of providing strong GPU acceleration, having a focus on trainable features through +the autograd system, and having consistent style (tensor names and dimension names). +Therefore, it is primarily a machine learning library and not a general signal +processing library. The benefits of PyTorch can be seen in torchaudio through +having all the computations be through PyTorch operations which makes it easy +to use and feel like a natural extension. + +- [Support audio I/O (Load files, Save files)](http://pytorch.org/audio/main/) + - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX + - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html) +- [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html) +- Audio and speech processing functions + - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html) +- Common audio transforms + - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html) +- Compliance interfaces: Run code using PyTorch that align with other libraries + - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html) + +Installation +------------ + +Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio. + + +API Reference +------------- + +API Reference is located here: http://pytorch.org/audio/main/ + +Contributing Guidelines +----------------------- + +Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md) + +Citation +-------- + +If you find this package useful, please cite as: + +```bibtex +@article{yang2021torchaudio, + title={TorchAudio: Building Blocks for Audio and Speech Processing}, + author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi}, + journal={arXiv preprint arXiv:2110.15018}, + year={2021} +} +``` + +Disclaimer on Datasets +---------------------- + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +------------------------- + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details. + +Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/). diff --git a/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..d6e79b9cfc336c96de05c45c300979f2a58d964c --- /dev/null +++ b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40d66048212f748875485b216db99ecf3833c8255413c16783e4dcd004bb0005 +size 2311212 diff --git a/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..7f23e156a36895ba0fee8324da249a6efabab12e --- /dev/null +++ b/torchaudio/torchaudio-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: torchaudio +Version: 2.1.0a0+cxx11.abi +Summary: An audio package for PyTorch +Home-page: https://github.com/pytorch/audio +Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Author-email: soumith@pytorch.org +Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Maintainer-email: moto@meta.com +Classifier: Environment :: Plugins +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Multimedia :: Sound/Audio +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: torch + +torchaudio: an audio library for PyTorch +======================================== + +[![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/) +[![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio) +[![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio) + +![TorchAudio Logo](docs/source/_static/img/logo.png) + +The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to +the audio domain. By supporting PyTorch, torchaudio follows the same philosophy +of providing strong GPU acceleration, having a focus on trainable features through +the autograd system, and having consistent style (tensor names and dimension names). +Therefore, it is primarily a machine learning library and not a general signal +processing library. The benefits of PyTorch can be seen in torchaudio through +having all the computations be through PyTorch operations which makes it easy +to use and feel like a natural extension. + +- [Support audio I/O (Load files, Save files)](http://pytorch.org/audio/main/) + - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX + - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html) +- [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html) +- Audio and speech processing functions + - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html) +- Common audio transforms + - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html) +- Compliance interfaces: Run code using PyTorch that align with other libraries + - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html) + +Installation +------------ + +Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio. + + +API Reference +------------- + +API Reference is located here: http://pytorch.org/audio/main/ + +Contributing Guidelines +----------------------- + +Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md) + +Citation +-------- + +If you find this package useful, please cite as: + +```bibtex +@article{yang2021torchaudio, + title={TorchAudio: Building Blocks for Audio and Speech Processing}, + author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi}, + journal={arXiv preprint arXiv:2110.15018}, + year={2021} +} +``` + +Disclaimer on Datasets +---------------------- + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +------------------------- + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details. + +Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/). diff --git a/torchvision/index.html b/torchvision/index.html new file mode 100644 index 0000000000000000000000000000000000000000..217f70ea37bba31035da7442cea5246d512a3643 --- /dev/null +++ b/torchvision/index.html @@ -0,0 +1,36 @@ + + + + + + + + Links for torchvision + + + +

+ Links for torchvision +

+ + torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl + +
+ + torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl + +
+ + torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl + +
+ + torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl + +
+ + torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl + +
+ + diff --git a/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..a9d700a9cb034145ef6336896d1ec92889b7cc73 Binary files /dev/null and b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl differ diff --git a/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl.metadata b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..9cf69e5a66ed5ab79ee88f2371df258771e39eab --- /dev/null +++ b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: torchvision +Version: 0.15.2a0+fa99a53 +Summary: image and video datasets and models for torch deep learning +Home-page: https://github.com/pytorch/vision +Author: PyTorch Core Team +Author-email: soumith@pytorch.org +License: BSD +Requires-Python: >=3.8 +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: requests +Requires-Dist: torch +Requires-Dist: pillow !=8.3.*,>=5.3.0 +Provides-Extra: scipy +Requires-Dist: scipy ; extra == 'scipy' + +torchvision +=========== + +.. image:: https://pepy.tech/badge/torchvision + :target: https://pepy.tech/project/torchvision + +.. image:: https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v + :target: https://pytorch.org/vision/stable/index.html + + +The torchvision package consists of popular datasets, model architectures, and common image transformations for computer vision. + + +Installation +============ + +We recommend Anaconda as Python package management system. Please refer to `pytorch.org `_ +for the detail of PyTorch (``torch``) installation. The following is the corresponding ``torchvision`` versions and +supported Python versions. + ++--------------------------+--------------------------+---------------------------------+ +| ``torch`` | ``torchvision`` | ``python`` | ++==========================+==========================+=================================+ +| ``main`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.8``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.13.0`` | ``0.14.0`` | ``>=3.7.2``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.12.0`` | ``0.13.0`` | ``>=3.7``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.11.0`` | ``0.12.0`` | ``>=3.7``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.10.2`` | ``0.11.3`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.10.1`` | ``0.11.2`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.10.0`` | ``0.11.1`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.9.1`` | ``0.10.1`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.9.0`` | ``0.10.0`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.2`` | ``0.9.2`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.1`` | ``0.9.1`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.0`` | ``0.9.0`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.1`` | ``0.8.2`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.0`` | ``0.8.1`` | ``>=3.6``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.0`` | ``0.8.0`` | ``>=3.6``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.6.0`` | ``0.7.0`` | ``>=3.6``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.5.1`` | ``0.6.1`` | ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.5.0`` | ``0.6.0`` | ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.4.0`` | ``0.5.0`` | ``==2.7``, ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.3.1`` | ``0.4.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.3.0`` | ``0.4.1`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.2.0`` | ``0.4.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.1.0`` | ``0.3.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``<=1.0.1`` | ``0.2.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ + +Anaconda: + +.. code:: bash + + conda install torchvision -c pytorch + +pip: + +.. code:: bash + + pip install torchvision + +From source: + +.. code:: bash + + python setup.py install + # or, for OSX + # MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install + + +We don't officially support building from source using ``pip``, but *if* you do, +you'll need to use the ``--no-build-isolation`` flag. +In case building TorchVision from source fails, install the nightly version of PyTorch following +the linked guide on the `contributing page `_ and retry the install. + +By default, GPU support is built if CUDA is found and ``torch.cuda.is_available()`` is true. +It's possible to force building GPU support by setting ``FORCE_CUDA=1`` environment variable, +which is useful when building a docker image. + +Image Backend +============= +Torchvision currently supports the following image backends: + +* `Pillow`_ (default) + +* `Pillow-SIMD`_ - a **much faster** drop-in replacement for Pillow with SIMD. If installed will be used as the default. + +* `accimage`_ - if installed can be activated by calling :code:`torchvision.set_image_backend('accimage')` + +* `libpng`_ - can be installed via conda :code:`conda install libpng` or any of the package managers for debian-based and RHEL-based Linux distributions. + +* `libjpeg`_ - can be installed via conda :code:`conda install jpeg` or any of the package managers for debian-based and RHEL-based Linux distributions. `libjpeg-turbo`_ can be used as well. + +**Notes:** ``libpng`` and ``libjpeg`` must be available at compilation time in order to be available. Make sure that it is available on the standard library locations, +otherwise, add the include and library paths in the environment variables ``TORCHVISION_INCLUDE`` and ``TORCHVISION_LIBRARY``, respectively. + +.. _libpng : http://www.libpng.org/pub/png/libpng.html +.. _Pillow : https://python-pillow.org/ +.. _Pillow-SIMD : https://github.com/uploadcare/pillow-simd +.. _accimage: https://github.com/pytorch/accimage +.. _libjpeg: http://ijg.org/ +.. _libjpeg-turbo: https://libjpeg-turbo.org/ + +Video Backend +============= +Torchvision currently supports the following video backends: + +* `pyav`_ (default) - Pythonic binding for ffmpeg libraries. + +.. _pyav : https://github.com/PyAV-Org/PyAV + +* video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any conflicting version of ffmpeg installed. Currently, this is only supported on Linux. + +.. code:: bash + + conda install -c conda-forge ffmpeg + python setup.py install + + +Using the models on C++ +======================= +TorchVision provides an example project for how to use the models on C++ using JIT Script. + +Installation From source: + +.. code:: bash + + mkdir build + cd build + # Add -DWITH_CUDA=on support for the CUDA if needed + cmake .. + make + make install + +Once installed, the library can be accessed in cmake (after properly configuring ``CMAKE_PREFIX_PATH``) via the :code:`TorchVision::TorchVision` target: + +.. code:: rest + + find_package(TorchVision REQUIRED) + target_link_libraries(my-target PUBLIC TorchVision::TorchVision) + +The ``TorchVision`` package will also automatically look for the ``Torch`` package and add it as a dependency to ``my-target``, +so make sure that it is also available to cmake via the ``CMAKE_PREFIX_PATH``. + +For an example setup, take a look at ``examples/cpp/hello_world``. + +Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any Python +dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link to Python. This +can be done by passing ``-DUSE_PYTHON=on`` to CMake. + +TorchVision Operators +--------------------- +In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that you +:code:`#include ` in your project. + +Documentation +============= +You can find the API documentation on the pytorch website: https://pytorch.org/vision/stable/index.html + +Contributing +============ + +See the `CONTRIBUTING `_ file for how to help out. + +Disclaimer on Datasets +====================== + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +========================= + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See `SWAG LICENSE `_ for additional details. + +Citing TorchVision +================== + +If you find TorchVision useful in your work, please consider citing the following BibTeX entry: + +.. code:: bibtex + + @software{torchvision2016, + title = {TorchVision: PyTorch's Computer Vision library}, + author = {TorchVision maintainers and contributors}, + year = 2016, + journal = {GitHub repository}, + publisher = {GitHub}, + howpublished = {\url{https://github.com/pytorch/vision}} + } diff --git a/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl new file mode 100644 index 0000000000000000000000000000000000000000..19a3ae47486a9dfd318e63ba605b04b0041d4338 Binary files /dev/null and b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl differ diff --git a/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl.metadata b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..9cf69e5a66ed5ab79ee88f2371df258771e39eab --- /dev/null +++ b/torchvision/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64_2.whl.metadata @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: torchvision +Version: 0.15.2a0+fa99a53 +Summary: image and video datasets and models for torch deep learning +Home-page: https://github.com/pytorch/vision +Author: PyTorch Core Team +Author-email: soumith@pytorch.org +License: BSD +Requires-Python: >=3.8 +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: requests +Requires-Dist: torch +Requires-Dist: pillow !=8.3.*,>=5.3.0 +Provides-Extra: scipy +Requires-Dist: scipy ; extra == 'scipy' + +torchvision +=========== + +.. image:: https://pepy.tech/badge/torchvision + :target: https://pepy.tech/project/torchvision + +.. image:: https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v + :target: https://pytorch.org/vision/stable/index.html + + +The torchvision package consists of popular datasets, model architectures, and common image transformations for computer vision. + + +Installation +============ + +We recommend Anaconda as Python package management system. Please refer to `pytorch.org `_ +for the detail of PyTorch (``torch``) installation. The following is the corresponding ``torchvision`` versions and +supported Python versions. + ++--------------------------+--------------------------+---------------------------------+ +| ``torch`` | ``torchvision`` | ``python`` | ++==========================+==========================+=================================+ +| ``main`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.8``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.13.0`` | ``0.14.0`` | ``>=3.7.2``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.12.0`` | ``0.13.0`` | ``>=3.7``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.11.0`` | ``0.12.0`` | ``>=3.7``, ``<=3.10`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.10.2`` | ``0.11.3`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.10.1`` | ``0.11.2`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.10.0`` | ``0.11.1`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.9.1`` | ``0.10.1`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.9.0`` | ``0.10.0`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.2`` | ``0.9.2`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.1`` | ``0.9.1`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.0`` | ``0.9.0`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.1`` | ``0.8.2`` | ``>=3.6``, ``<=3.9`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.0`` | ``0.8.1`` | ``>=3.6``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.0`` | ``0.8.0`` | ``>=3.6``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.6.0`` | ``0.7.0`` | ``>=3.6``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.5.1`` | ``0.6.1`` | ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.5.0`` | ``0.6.0`` | ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.4.0`` | ``0.5.0`` | ``==2.7``, ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.3.1`` | ``0.4.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.3.0`` | ``0.4.1`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.2.0`` | ``0.4.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.1.0`` | ``0.3.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``<=1.0.1`` | ``0.2.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ + +Anaconda: + +.. code:: bash + + conda install torchvision -c pytorch + +pip: + +.. code:: bash + + pip install torchvision + +From source: + +.. code:: bash + + python setup.py install + # or, for OSX + # MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install + + +We don't officially support building from source using ``pip``, but *if* you do, +you'll need to use the ``--no-build-isolation`` flag. +In case building TorchVision from source fails, install the nightly version of PyTorch following +the linked guide on the `contributing page `_ and retry the install. + +By default, GPU support is built if CUDA is found and ``torch.cuda.is_available()`` is true. +It's possible to force building GPU support by setting ``FORCE_CUDA=1`` environment variable, +which is useful when building a docker image. + +Image Backend +============= +Torchvision currently supports the following image backends: + +* `Pillow`_ (default) + +* `Pillow-SIMD`_ - a **much faster** drop-in replacement for Pillow with SIMD. If installed will be used as the default. + +* `accimage`_ - if installed can be activated by calling :code:`torchvision.set_image_backend('accimage')` + +* `libpng`_ - can be installed via conda :code:`conda install libpng` or any of the package managers for debian-based and RHEL-based Linux distributions. + +* `libjpeg`_ - can be installed via conda :code:`conda install jpeg` or any of the package managers for debian-based and RHEL-based Linux distributions. `libjpeg-turbo`_ can be used as well. + +**Notes:** ``libpng`` and ``libjpeg`` must be available at compilation time in order to be available. Make sure that it is available on the standard library locations, +otherwise, add the include and library paths in the environment variables ``TORCHVISION_INCLUDE`` and ``TORCHVISION_LIBRARY``, respectively. + +.. _libpng : http://www.libpng.org/pub/png/libpng.html +.. _Pillow : https://python-pillow.org/ +.. _Pillow-SIMD : https://github.com/uploadcare/pillow-simd +.. _accimage: https://github.com/pytorch/accimage +.. _libjpeg: http://ijg.org/ +.. _libjpeg-turbo: https://libjpeg-turbo.org/ + +Video Backend +============= +Torchvision currently supports the following video backends: + +* `pyav`_ (default) - Pythonic binding for ffmpeg libraries. + +.. _pyav : https://github.com/PyAV-Org/PyAV + +* video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any conflicting version of ffmpeg installed. Currently, this is only supported on Linux. + +.. code:: bash + + conda install -c conda-forge ffmpeg + python setup.py install + + +Using the models on C++ +======================= +TorchVision provides an example project for how to use the models on C++ using JIT Script. + +Installation From source: + +.. code:: bash + + mkdir build + cd build + # Add -DWITH_CUDA=on support for the CUDA if needed + cmake .. + make + make install + +Once installed, the library can be accessed in cmake (after properly configuring ``CMAKE_PREFIX_PATH``) via the :code:`TorchVision::TorchVision` target: + +.. code:: rest + + find_package(TorchVision REQUIRED) + target_link_libraries(my-target PUBLIC TorchVision::TorchVision) + +The ``TorchVision`` package will also automatically look for the ``Torch`` package and add it as a dependency to ``my-target``, +so make sure that it is also available to cmake via the ``CMAKE_PREFIX_PATH``. + +For an example setup, take a look at ``examples/cpp/hello_world``. + +Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any Python +dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link to Python. This +can be done by passing ``-DUSE_PYTHON=on`` to CMake. + +TorchVision Operators +--------------------- +In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that you +:code:`#include ` in your project. + +Documentation +============= +You can find the API documentation on the pytorch website: https://pytorch.org/vision/stable/index.html + +Contributing +============ + +See the `CONTRIBUTING `_ file for how to help out. + +Disclaimer on Datasets +====================== + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +========================= + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See `SWAG LICENSE `_ for additional details. + +Citing TorchVision +================== + +If you find TorchVision useful in your work, please consider citing the following BibTeX entry: + +.. code:: bibtex + + @software{torchvision2016, + title = {TorchVision: PyTorch's Computer Vision library}, + author = {TorchVision maintainers and contributors}, + year = 2016, + journal = {GitHub repository}, + publisher = {GitHub}, + howpublished = {\url{https://github.com/pytorch/vision}} + } diff --git a/torchvision/torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl b/torchvision/torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..26a7e36be9405df1d776bf128f904c9ec1f43057 Binary files /dev/null and b/torchvision/torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl differ diff --git a/torchvision/torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl.metadata b/torchvision/torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..cd1ac833dd6857fdeafc5b5d448d5256ec2922b2 --- /dev/null +++ b/torchvision/torchvision-0.16.0+fbb4cc5-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,168 @@ +Metadata-Version: 2.1 +Name: torchvision +Version: 0.16.0+fbb4cc5 +Summary: image and video datasets and models for torch deep learning +Home-page: https://github.com/pytorch/vision +Author: PyTorch Core Team +Author-email: soumith@pytorch.org +License: BSD +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: requests +Requires-Dist: torch +Requires-Dist: pillow !=8.3.*,>=5.3.0 +Provides-Extra: scipy +Requires-Dist: scipy ; extra == 'scipy' + +# torchvision + +[![total torchvision downloads](https://pepy.tech/badge/torchvision)](https://pepy.tech/project/torchvision) +[![documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/vision/stable/index.html) + +The torchvision package consists of popular datasets, model architectures, and common image transformations for computer +vision. + +## Installation + +Please refer to the [official +instructions](https://pytorch.org/get-started/locally/) to install the stable +versions of `torch` and `torchvision` on your system. + +To build source, refer to our [contributing +page](https://github.com/pytorch/vision/blob/main/CONTRIBUTING.md#development-installation). + +The following is the corresponding `torchvision` versions and supported Python +versions. + +| `torch` | `torchvision` | Python | +| ------------------ | ------------------ | ------------------- | +| `main` / `nightly` | `main` / `nightly` | `>=3.8`, `<=3.11` | +| `2.1` | `0.16` | `>=3.8`, `<=3.11` | +| `2.0` | `0.15` | `>=3.8`, `<=3.11` | +| `1.13` | `0.14` | `>=3.7.2`, `<=3.10` | + +
+ older versions + +| `torch` | `torchvision` | Python | +|---------|-------------------|---------------------------| +| `1.12` | `0.13` | `>=3.7`, `<=3.10` | +| `1.11` | `0.12` | `>=3.7`, `<=3.10` | +| `1.10` | `0.11` | `>=3.6`, `<=3.9` | +| `1.9` | `0.10` | `>=3.6`, `<=3.9` | +| `1.8` | `0.9` | `>=3.6`, `<=3.9` | +| `1.7` | `0.8` | `>=3.6`, `<=3.9` | +| `1.6` | `0.7` | `>=3.6`, `<=3.8` | +| `1.5` | `0.6` | `>=3.5`, `<=3.8` | +| `1.4` | `0.5` | `==2.7`, `>=3.5`, `<=3.8` | +| `1.3` | `0.4.2` / `0.4.3` | `==2.7`, `>=3.5`, `<=3.7` | +| `1.2` | `0.4.1` | `==2.7`, `>=3.5`, `<=3.7` | +| `1.1` | `0.3` | `==2.7`, `>=3.5`, `<=3.7` | +| `<=1.0` | `0.2` | `==2.7`, `>=3.5`, `<=3.7` | + +
+ +## Image Backends + +Torchvision currently supports the following image backends: + +- torch tensors +- PIL images: + - [Pillow](https://python-pillow.org/) + - [Pillow-SIMD](https://github.com/uploadcare/pillow-simd) - a **much faster** drop-in replacement for Pillow with SIMD. + +Read more in in our [docs](https://pytorch.org/vision/stable/transforms.html). + +## [UNSTABLE] Video Backend + +Torchvision currently supports the following video backends: + +- [pyav](https://github.com/PyAV-Org/PyAV) (default) - Pythonic binding for ffmpeg libraries. +- video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any + conflicting version of ffmpeg installed. Currently, this is only supported on Linux. + +``` +conda install -c conda-forge ffmpeg +python setup.py install +``` + +# Using the models on C++ + +TorchVision provides an example project for how to use the models on C++ using JIT Script. + +Installation From source: + +``` +mkdir build +cd build +# Add -DWITH_CUDA=on support for the CUDA if needed +cmake .. +make +make install +``` + +Once installed, the library can be accessed in cmake (after properly configuring `CMAKE_PREFIX_PATH`) via the +`TorchVision::TorchVision` target: + +``` +find_package(TorchVision REQUIRED) +target_link_libraries(my-target PUBLIC TorchVision::TorchVision) +``` + +The `TorchVision` package will also automatically look for the `Torch` package and add it as a dependency to +`my-target`, so make sure that it is also available to cmake via the `CMAKE_PREFIX_PATH`. + +For an example setup, take a look at `examples/cpp/hello_world`. + +Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any +Python dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link +to Python. This can be done by passing `-DUSE_PYTHON=on` to CMake. + +### TorchVision Operators + +In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that +you `#include ` in your project. + +## Documentation + +You can find the API documentation on the pytorch website: + +## Contributing + +See the [CONTRIBUTING](CONTRIBUTING.md) file for how to help out. + +## Disclaimer on Datasets + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, +vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to +determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset +to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML +community! + +## Pre-trained Model License + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the +dataset used for training. It is your responsibility to determine whether you have permission to use the models for your +use case. + +More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See +[SWAG LICENSE](https://github.com/facebookresearch/SWAG/blob/main/LICENSE) for additional details. + +## Citing TorchVision + +If you find TorchVision useful in your work, please consider citing the following BibTeX entry: + +```bibtex +@software{torchvision2016, + title = {TorchVision: PyTorch's Computer Vision library}, + author = {TorchVision maintainers and contributors}, + year = 2016, + journal = {GitHub repository}, + publisher = {GitHub}, + howpublished = {\url{https://github.com/pytorch/vision}} +} +``` diff --git a/torchvision/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..6d9c1d8be2fe1dfff16010190e02cb227def8d9e Binary files /dev/null and b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl differ diff --git a/torchvision/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..fd60b74ba0b4916e528c82bb4f369e07549b8b5e --- /dev/null +++ b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,168 @@ +Metadata-Version: 2.1 +Name: torchvision +Version: 0.16.0a0+cxx11.abi +Summary: image and video datasets and models for torch deep learning +Home-page: https://github.com/pytorch/vision +Author: PyTorch Core Team +Author-email: soumith@pytorch.org +License: BSD +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: requests +Requires-Dist: torch +Requires-Dist: pillow !=8.3.*,>=5.3.0 +Provides-Extra: scipy +Requires-Dist: scipy ; extra == 'scipy' + +# torchvision + +[![total torchvision downloads](https://pepy.tech/badge/torchvision)](https://pepy.tech/project/torchvision) +[![documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/vision/stable/index.html) + +The torchvision package consists of popular datasets, model architectures, and common image transformations for computer +vision. + +## Installation + +Please refer to the [official +instructions](https://pytorch.org/get-started/locally/) to install the stable +versions of `torch` and `torchvision` on your system. + +To build source, refer to our [contributing +page](https://github.com/pytorch/vision/blob/main/CONTRIBUTING.md#development-installation). + +The following is the corresponding `torchvision` versions and supported Python +versions. + +| `torch` | `torchvision` | Python | +| ------------------ | ------------------ | ------------------- | +| `main` / `nightly` | `main` / `nightly` | `>=3.8`, `<=3.11` | +| `2.1` | `0.16` | `>=3.8`, `<=3.11` | +| `2.0` | `0.15` | `>=3.8`, `<=3.11` | +| `1.13` | `0.14` | `>=3.7.2`, `<=3.10` | + +
+ older versions + +| `torch` | `torchvision` | Python | +|---------|-------------------|---------------------------| +| `1.12` | `0.13` | `>=3.7`, `<=3.10` | +| `1.11` | `0.12` | `>=3.7`, `<=3.10` | +| `1.10` | `0.11` | `>=3.6`, `<=3.9` | +| `1.9` | `0.10` | `>=3.6`, `<=3.9` | +| `1.8` | `0.9` | `>=3.6`, `<=3.9` | +| `1.7` | `0.8` | `>=3.6`, `<=3.9` | +| `1.6` | `0.7` | `>=3.6`, `<=3.8` | +| `1.5` | `0.6` | `>=3.5`, `<=3.8` | +| `1.4` | `0.5` | `==2.7`, `>=3.5`, `<=3.8` | +| `1.3` | `0.4.2` / `0.4.3` | `==2.7`, `>=3.5`, `<=3.7` | +| `1.2` | `0.4.1` | `==2.7`, `>=3.5`, `<=3.7` | +| `1.1` | `0.3` | `==2.7`, `>=3.5`, `<=3.7` | +| `<=1.0` | `0.2` | `==2.7`, `>=3.5`, `<=3.7` | + +
+ +## Image Backends + +Torchvision currently supports the following image backends: + +- torch tensors +- PIL images: + - [Pillow](https://python-pillow.org/) + - [Pillow-SIMD](https://github.com/uploadcare/pillow-simd) - a **much faster** drop-in replacement for Pillow with SIMD. + +Read more in in our [docs](https://pytorch.org/vision/stable/transforms.html). + +## [UNSTABLE] Video Backend + +Torchvision currently supports the following video backends: + +- [pyav](https://github.com/PyAV-Org/PyAV) (default) - Pythonic binding for ffmpeg libraries. +- video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any + conflicting version of ffmpeg installed. Currently, this is only supported on Linux. + +``` +conda install -c conda-forge ffmpeg +python setup.py install +``` + +# Using the models on C++ + +TorchVision provides an example project for how to use the models on C++ using JIT Script. + +Installation From source: + +``` +mkdir build +cd build +# Add -DWITH_CUDA=on support for the CUDA if needed +cmake .. +make +make install +``` + +Once installed, the library can be accessed in cmake (after properly configuring `CMAKE_PREFIX_PATH`) via the +`TorchVision::TorchVision` target: + +``` +find_package(TorchVision REQUIRED) +target_link_libraries(my-target PUBLIC TorchVision::TorchVision) +``` + +The `TorchVision` package will also automatically look for the `Torch` package and add it as a dependency to +`my-target`, so make sure that it is also available to cmake via the `CMAKE_PREFIX_PATH`. + +For an example setup, take a look at `examples/cpp/hello_world`. + +Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any +Python dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link +to Python. This can be done by passing `-DUSE_PYTHON=on` to CMake. + +### TorchVision Operators + +In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that +you `#include ` in your project. + +## Documentation + +You can find the API documentation on the pytorch website: + +## Contributing + +See the [CONTRIBUTING](CONTRIBUTING.md) file for how to help out. + +## Disclaimer on Datasets + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, +vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to +determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset +to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML +community! + +## Pre-trained Model License + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the +dataset used for training. It is your responsibility to determine whether you have permission to use the models for your +use case. + +More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See +[SWAG LICENSE](https://github.com/facebookresearch/SWAG/blob/main/LICENSE) for additional details. + +## Citing TorchVision + +If you find TorchVision useful in your work, please consider citing the following BibTeX entry: + +```bibtex +@software{torchvision2016, + title = {TorchVision: PyTorch's Computer Vision library}, + author = {TorchVision maintainers and contributors}, + year = 2016, + journal = {GitHub repository}, + publisher = {GitHub}, + howpublished = {\url{https://github.com/pytorch/vision}} +} +``` diff --git a/torchvision/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..c735721f311914a58a25144c37d96f3ab6098a64 Binary files /dev/null and b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl differ diff --git a/torchvision/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..fd60b74ba0b4916e528c82bb4f369e07549b8b5e --- /dev/null +++ b/torchvision/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl.metadata @@ -0,0 +1,168 @@ +Metadata-Version: 2.1 +Name: torchvision +Version: 0.16.0a0+cxx11.abi +Summary: image and video datasets and models for torch deep learning +Home-page: https://github.com/pytorch/vision +Author: PyTorch Core Team +Author-email: soumith@pytorch.org +License: BSD +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: requests +Requires-Dist: torch +Requires-Dist: pillow !=8.3.*,>=5.3.0 +Provides-Extra: scipy +Requires-Dist: scipy ; extra == 'scipy' + +# torchvision + +[![total torchvision downloads](https://pepy.tech/badge/torchvision)](https://pepy.tech/project/torchvision) +[![documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/vision/stable/index.html) + +The torchvision package consists of popular datasets, model architectures, and common image transformations for computer +vision. + +## Installation + +Please refer to the [official +instructions](https://pytorch.org/get-started/locally/) to install the stable +versions of `torch` and `torchvision` on your system. + +To build source, refer to our [contributing +page](https://github.com/pytorch/vision/blob/main/CONTRIBUTING.md#development-installation). + +The following is the corresponding `torchvision` versions and supported Python +versions. + +| `torch` | `torchvision` | Python | +| ------------------ | ------------------ | ------------------- | +| `main` / `nightly` | `main` / `nightly` | `>=3.8`, `<=3.11` | +| `2.1` | `0.16` | `>=3.8`, `<=3.11` | +| `2.0` | `0.15` | `>=3.8`, `<=3.11` | +| `1.13` | `0.14` | `>=3.7.2`, `<=3.10` | + +
+ older versions + +| `torch` | `torchvision` | Python | +|---------|-------------------|---------------------------| +| `1.12` | `0.13` | `>=3.7`, `<=3.10` | +| `1.11` | `0.12` | `>=3.7`, `<=3.10` | +| `1.10` | `0.11` | `>=3.6`, `<=3.9` | +| `1.9` | `0.10` | `>=3.6`, `<=3.9` | +| `1.8` | `0.9` | `>=3.6`, `<=3.9` | +| `1.7` | `0.8` | `>=3.6`, `<=3.9` | +| `1.6` | `0.7` | `>=3.6`, `<=3.8` | +| `1.5` | `0.6` | `>=3.5`, `<=3.8` | +| `1.4` | `0.5` | `==2.7`, `>=3.5`, `<=3.8` | +| `1.3` | `0.4.2` / `0.4.3` | `==2.7`, `>=3.5`, `<=3.7` | +| `1.2` | `0.4.1` | `==2.7`, `>=3.5`, `<=3.7` | +| `1.1` | `0.3` | `==2.7`, `>=3.5`, `<=3.7` | +| `<=1.0` | `0.2` | `==2.7`, `>=3.5`, `<=3.7` | + +
+ +## Image Backends + +Torchvision currently supports the following image backends: + +- torch tensors +- PIL images: + - [Pillow](https://python-pillow.org/) + - [Pillow-SIMD](https://github.com/uploadcare/pillow-simd) - a **much faster** drop-in replacement for Pillow with SIMD. + +Read more in in our [docs](https://pytorch.org/vision/stable/transforms.html). + +## [UNSTABLE] Video Backend + +Torchvision currently supports the following video backends: + +- [pyav](https://github.com/PyAV-Org/PyAV) (default) - Pythonic binding for ffmpeg libraries. +- video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any + conflicting version of ffmpeg installed. Currently, this is only supported on Linux. + +``` +conda install -c conda-forge ffmpeg +python setup.py install +``` + +# Using the models on C++ + +TorchVision provides an example project for how to use the models on C++ using JIT Script. + +Installation From source: + +``` +mkdir build +cd build +# Add -DWITH_CUDA=on support for the CUDA if needed +cmake .. +make +make install +``` + +Once installed, the library can be accessed in cmake (after properly configuring `CMAKE_PREFIX_PATH`) via the +`TorchVision::TorchVision` target: + +``` +find_package(TorchVision REQUIRED) +target_link_libraries(my-target PUBLIC TorchVision::TorchVision) +``` + +The `TorchVision` package will also automatically look for the `Torch` package and add it as a dependency to +`my-target`, so make sure that it is also available to cmake via the `CMAKE_PREFIX_PATH`. + +For an example setup, take a look at `examples/cpp/hello_world`. + +Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any +Python dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link +to Python. This can be done by passing `-DUSE_PYTHON=on` to CMake. + +### TorchVision Operators + +In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that +you `#include ` in your project. + +## Documentation + +You can find the API documentation on the pytorch website: + +## Contributing + +See the [CONTRIBUTING](CONTRIBUTING.md) file for how to help out. + +## Disclaimer on Datasets + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, +vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to +determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset +to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML +community! + +## Pre-trained Model License + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the +dataset used for training. It is your responsibility to determine whether you have permission to use the models for your +use case. + +More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See +[SWAG LICENSE](https://github.com/facebookresearch/SWAG/blob/main/LICENSE) for additional details. + +## Citing TorchVision + +If you find TorchVision useful in your work, please consider citing the following BibTeX entry: + +```bibtex +@software{torchvision2016, + title = {TorchVision: PyTorch's Computer Vision library}, + author = {TorchVision maintainers and contributors}, + year = 2016, + journal = {GitHub repository}, + publisher = {GitHub}, + howpublished = {\url{https://github.com/pytorch/vision}} +} +``` diff --git a/xformers/index.html b/xformers/index.html new file mode 100644 index 0000000000000000000000000000000000000000..79a394df2caf4931976be21dbdea0bd7537eddc6 --- /dev/null +++ b/xformers/index.html @@ -0,0 +1,20 @@ + + + + + + + + Links for xformers + + + +

+ Links for xformers +

+ + xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl + +
+ + diff --git a/xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl b/xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl new file mode 100644 index 0000000000000000000000000000000000000000..1db17e8c0f533c4bdfafe0b19b1ef454811e72c8 --- /dev/null +++ b/xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8508fe14c8f2552a822f5e6f5620b24fdd4ba3129c2a31a39b56425bcc023bc +size 184256480 diff --git a/xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl.metadata b/xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl.metadata new file mode 100644 index 0000000000000000000000000000000000000000..a9a15cf057547aee9dbdfc645b1c1e8118d8bdb2 --- /dev/null +++ b/xformers/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl.metadata @@ -0,0 +1,24 @@ +Metadata-Version: 2.1 +Name: xformers +Version: 0.0.14.dev0 +Summary: XFormers: A collection of composable Transformer building blocks. +Home-page: https://facebookresearch.github.io/xformers/ +Author: Facebook AI Research +Author-email: lefaudeux@fb.com +License: UNKNOWN +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: torch (>=1.12) +Requires-Dist: numpy +Requires-Dist: pyre-extensions (==0.0.23) + +XFormers: A collection of composable Transformer building blocks.XFormers aims at being able to reproduce most architectures in the Transformer-family SOTA,defined as compatible and combined building blocks as opposed to monolithic models +