From f82acabe4c43a3784b162923bf0ab56420850bf0 Mon Sep 17 00:00:00 2001 From: Ramakrishnan Sivakumar Date: Thu, 30 Nov 2023 19:05:07 -0800 Subject: [PATCH] Merge fork from cleanup (#1) * Bring up the tkml tests * Revamp the tkml readme * Create LICENSE * Fix all names, links, and paths (#4) --------- Signed-off-by: Jeremy Fowers <80718789+jeremyfowers@users.noreply.github.com> Co-authored-by: Jeremy Fowers Co-authored-by: Jeremy Fowers <80718789+jeremyfowers@users.noreply.github.com> Co-authored-by: Victoria Godsoe --- .github/workflows/publish-to-test-pypi.yml | 11 +- .github/workflows/test_azure.yml | 80 ------- .github/workflows/test_build_api.yml | 34 +-- .github/workflows/test_gpu_turnkey.yml | 4 +- .github/workflows/test_turnkey.yml | 81 +++---- LICENSE | 201 +++++++++++++++++ README.md | 205 ++++++++++++++++-- docs/code.md | 14 +- docs/contribute.md | 32 +-- docs/coverage.md | 2 +- docs/install.md | 14 +- docs/readme.md | 12 +- docs/tools_user_guide.md | 48 ++-- examples/cli/build.md | 16 +- examples/cli/cache.md | 22 +- examples/cli/discovery.md | 42 ++-- .../runtime.py | 11 +- examples/cli/plugins/readme.md | 2 +- examples/cli/readme.md | 34 +-- models/readme.md | 16 +- .../skip/ssdlite320_mobilenet_v3_large.py | 2 +- setup.py | 5 - src/turnkeyml/analyze/script.py | 2 +- src/turnkeyml/analyze/util.py | 2 +- src/turnkeyml/build/stage.py | 2 +- src/turnkeyml/build_api.py | 2 +- src/turnkeyml/cli/cli.py | 2 +- src/turnkeyml/cli/setup_venv.sh | 4 +- src/turnkeyml/common/labels.py | 2 +- test/cli.py | 2 +- trackers/huggingface/app.py | 9 +- trackers/report_plots.py | 122 +++++++---- 32 files changed, 681 insertions(+), 356 deletions(-) delete mode 100644 .github/workflows/test_azure.yml create mode 100644 LICENSE diff --git a/.github/workflows/publish-to-test-pypi.yml b/.github/workflows/publish-to-test-pypi.yml index 35ef257d..0808cdb6 100644 --- a/.github/workflows/publish-to-test-pypi.yml +++ b/.github/workflows/publish-to-test-pypi.yml @@ -1,4 +1,4 @@ -name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI +name: Publish Python distributions to PyPI on: push: @@ -10,7 +10,7 @@ on: jobs: build-n-publish: - name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI + name: Build and publish Python distributions to PyPI runs-on: ubuntu-latest steps: - uses: actions/checkout@main @@ -24,7 +24,7 @@ jobs: python -m pip install build --user - name: Build a binary wheel and a source tarball run: >- - python -m build --sdist --wheel --outdir dist/ toolchain + python -m build --sdist --wheel --outdir dist/ . - name: Test wheel shell: bash -el {0} run: | @@ -32,3 +32,8 @@ jobs: pip install dist/*.whl models=$(turnkey models location --quiet) turnkey $models/selftest/linear.py + - name: Publish distribution package to PyPI + if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/test_azure.yml b/.github/workflows/test_azure.yml deleted file mode 100644 index 122b7a2d..00000000 --- a/.github/workflows/test_azure.yml +++ /dev/null @@ -1,80 +0,0 @@ -# This workflow will install Python dependencies and run a basic test to ensure -# that the Cloud/Azure CLI is still working -name: Test Azure CLI - -# TODO: enable these tests again once the repository is in the public domain - -# on: -# push: -# branches: ["main"] -# paths: -# - toolchain/src/turnkeyml/cli/scale/** -# pull_request: -# branches: ["main"] -# paths: -# - toolchain/src/turnkeyml/cli/scale/** - -# In the meantime, allow manual execution -on: - # Allows users to manually trigger the workflow using the GitHub UI - workflow_dispatch: - -permissions: - contents: read - -jobs: - build_and_test: - timeout-minutes: 30 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 - with: - miniconda-version: "latest" - activate-environment: tkml - python-version: "3.8" - - name: Install dependencies - shell: bash -el {0} - run: | - sudo apt-get update - sudo apt-get install -y azure-cli - python -m pip install --upgrade pip - pip install -e . - pip install -r toolchain/src/turnkeyml/cli/cloud/requirements.txt - - name: Run tests - env: - SSH_AUTH_SOCK: /tmp/ssh_agent.sock - TURNKEY_AZURE_PUBLIC_KEY: ${{ secrets.AZURE_SSH_PUBLIC_KEY }} - AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - TURNKEY_AZURE_PASSWORD: ${{ secrets.TURNKEY_AZURE_PASSWORD }} - shell: bash -el {0} - run: | - az login --username ${{ secrets.AZURE_CLIENT_ID }} --password ${{ secrets.AZURE_CLIENT_PWD }} - - ssh-agent -a $SSH_AUTH_SOCK > /dev/null - ssh-add - <<< "${{ secrets.AZURE_SSH_PRIVATE_KEY }}" - - mkdir ~/.ssh - touch ~/.ssh/known_hosts - - cd src/turnkeyml/cli/cloud - - python control.py create setup selftest info stop --name github-test-azure-single --hardware cpu-small - - python control.py create setup selftest info stop --cluster --name github-test-azure-cluster --size 2 --hardware cpu-small - python control.py start run --cluster --name github-test-azure-cluster --size 2 --hardware cpu-small --input-files onnxmodelzoo/toolchain/models/selftest/*.py - clean_up: - needs: build_and_test - if: always() - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Install az cli - run: | - sudo apt-get update - sudo apt-get install -y azure-cli - - name: Delete resource group - run: | - az login --username ${{ secrets.AZURE_CLIENT_ID }} --password ${{ secrets.AZURE_CLIENT_PWD }} - az group delete --name github-test-azure-single-rg --yes - az group delete --name github-test-azure-cluster-rg --yes diff --git a/.github/workflows/test_build_api.yml b/.github/workflows/test_build_api.yml index c3ed5299..9b030920 100644 --- a/.github/workflows/test_build_api.yml +++ b/.github/workflows/test_build_api.yml @@ -36,14 +36,14 @@ jobs: run: | python -m pip install --upgrade pip conda install pylint - pip install -e toolchain[tensorflow] --no-cache-dir + pip install -e .[tensorflow] --no-cache-dir pip install transformers python -m pip check - name: Lint with PyLint shell: bash -el {0} run: | - pylint toolchain/src/turnkeyml/build --rcfile toolchain/.pylintrc - pylint toolchain/examples/build_api --rcfile toolchain/.pylintrc + pylint src/turnkeyml/build --rcfile .pylintrc + pylint examples/build_api --rcfile .pylintrc - name: Test with unittest shell: bash -el {0} run: | @@ -51,31 +51,31 @@ jobs: # build api examples # Note: we clear the default cache location prior to each example run rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/hello_torch_world.py + python examples/build_api/hello_torch_world.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/hello_keras_world.py + python examples/build_api/hello_keras_world.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/hello_onnx_world.py + python examples/build_api/hello_onnx_world.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/randomforest.py + python examples/build_api/randomforest.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/xgbclassifier.py + python examples/build_api/xgbclassifier.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/lgbmclassifier.py + python examples/build_api/lgbmclassifier.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/kneighborsclassifier.py + python examples/build_api/kneighborsclassifier.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/build_name.py + python examples/build_api/build_name.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/cache_dir.py + python examples/build_api/cache_dir.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/no_monitor.py + python examples/build_api/no_monitor.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/rebuild_always.py + python examples/build_api/rebuild_always.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/rebuild_never.py + python examples/build_api/rebuild_never.py rm -rf ~/.cache/turnkey_test_cache - python toolchain/examples/build_api/sequence.py + python examples/build_api/sequence.py # build api tests - python toolchain/test/build_model.py + python test/build_model.py diff --git a/.github/workflows/test_gpu_turnkey.yml b/.github/workflows/test_gpu_turnkey.yml index f0877a3e..860def12 100644 --- a/.github/workflows/test_gpu_turnkey.yml +++ b/.github/workflows/test_gpu_turnkey.yml @@ -63,14 +63,14 @@ jobs: run: | python -m pip install --upgrade pip conda install pylint - pip install -e toolchain + pip install -e . pip install transformers python -m pip check - name: Test with unittest shell: bash -el {0} run: | # E2E tests - python toolchain/test/gpu.py + python test/gpu.py deallocate_vm: needs: build_and_test if: always() diff --git a/.github/workflows/test_turnkey.yml b/.github/workflows/test_turnkey.yml index 80b14eb9..ac4712c3 100644 --- a/.github/workflows/test_turnkey.yml +++ b/.github/workflows/test_turnkey.yml @@ -36,39 +36,40 @@ jobs: python -m pip install --upgrade pip conda install pylint pip install pytest - pip install -e toolchain + pip install -e . pip install transformers timm python -m pip check - name: Lint with PyLint shell: bash -el {0} run: | - pylint toolchain/src/turnkeyml --rcfile toolchain/.pylintrc - pylint toolchain/examples --rcfile toolchain/.pylintrc --ignore-paths toolchain/examples/build_api --disable E0401,E0611 + pylint src/turnkeyml --rcfile .pylintrc + pylint examples --rcfile .pylintrc --ignore-paths examples/build_api --disable E0401,E0611 - name: Test with unittest shell: bash -el {0} run: | # Unit tests - python toolchain/test/unit.py + python test/unit.py # turnkey examples # Note: we clear the default cache location prior to each example run rm -rf ~/.cache/turnkey - python toolchain/examples/model_api/hello_world.py + python examples/model_api/hello_world.py rm -rf ~/.cache/turnkey - python toolchain/examples/files_api/onnx_opset.py --onnx-opset 15 + python examples/files_api/onnx_opset.py --onnx-opset 15 rm -rf ~/.cache/turnkey - turnkey toolchain/examples/cli/scripts/hello_world.py + turnkey examples/cli/scripts/hello_world.py rm -rf ~/.cache/turnkey - turnkey toolchain/examples/cli/scripts/multiple_invocations.py + turnkey examples/cli/scripts/multiple_invocations.py rm -rf ~/.cache/turnkey - turnkey toolchain/examples/cli/scripts/max_depth.py --max-depth 1 + turnkey examples/cli/scripts/max_depth.py --max-depth 1 rm -rf ~/.cache/turnkey - turnkey toolchain/examples/cli/scripts/two_models.py + turnkey examples/cli/scripts/two_models.py rm -rf ~/.cache/turnkey - turnkey toolchain/examples/cli/onnx/sample.onnx + # TODO: sample.onnx test is commented because it throws an error in CI + # turnkey examples/cli/onnx/sample.onnx # E2E tests - cd toolchain/test/ + cd test/ python cli.py python analysis.py python model_api.py @@ -76,23 +77,23 @@ jobs: shell: bash -el {0} run: | rm -rf ~/.cache/turnkey - pip install -e toolchain/examples/cli/plugins/example_rt - turnkey toolchain/examples/cli/scripts/hello_world.py --runtime example-rt + pip install -e examples/cli/plugins/example_rt + turnkey examples/cli/scripts/hello_world.py --runtime example-rt rm -rf ~/.cache/turnkey - pip install -e toolchain/examples/cli/plugins/example_seq - turnkey toolchain/examples/cli/scripts/hello_world.py --sequence example-seq + pip install -e examples/cli/plugins/example_seq + turnkey examples/cli/scripts/hello_world.py --sequence example-seq rm -rf ~/.cache/turnkey - pip install -e toolchain/examples/cli/plugins/example_combined - turnkey toolchain/examples/cli/scripts/hello_world.py --runtime example-combined-rt --rt-args delay_before_benchmarking::5 - turnkey toolchain/examples/cli/scripts/hello_world.py --device example_family::part1::config2 - turnkey toolchain/examples/cli/scripts/hello_world.py --device example_family::part1::config1 - turnkey toolchain/examples/cli/scripts/hello_world.py --device example_family::part1 - turnkey toolchain/examples/cli/scripts/hello_world.py --device example_family + pip install -e examples/cli/plugins/example_combined + turnkey examples/cli/scripts/hello_world.py --runtime example-combined-rt --rt-args delay_before_benchmarking::5 + turnkey examples/cli/scripts/hello_world.py --device example_family::part1::config2 + turnkey examples/cli/scripts/hello_world.py --device example_family::part1::config1 + turnkey examples/cli/scripts/hello_world.py --device example_family::part1 + turnkey examples/cli/scripts/hello_world.py --device example_family # E2E tests - cd toolchain/test + cd test python plugins.py - name: Install and Start Slurm if: runner.os != 'Windows' @@ -100,25 +101,27 @@ jobs: run: | sudo apt update -y sudo apt install slurm-wlm -y - cp toolchain/test/helpers/slurm.conf toolchain/test/helpers/slurm_modified.conf - sed -i "s/YOUR_HOSTNAME_HERE/$HOSTNAME/" toolchain/test/helpers/slurm_modified.conf - sudo mv toolchain/test/helpers/slurm_modified.conf /etc/slurm/slurm.conf + cp test/helpers/slurm.conf test/helpers/slurm_modified.conf + sed -i "s/YOUR_HOSTNAME_HERE/$HOSTNAME/" test/helpers/slurm_modified.conf + sudo mv test/helpers/slurm_modified.conf /etc/slurm/slurm.conf sudo service slurmd start sudo service slurmctld start sudo service munge start - - name: Test turnkey on Slurm - if: runner.os != 'Windows' - shell: bash -el {0} - run: | - # Create conda environment for Slurm using srun (sbatch + wait) - export SKIP_REQUIREMENTS_INSTALL="True" - export TORCH_CPU="True" - srun toolchain/src/turnkeyml/cli/setup_venv.sh + # TODO: Slurm test is commented out because it isn't working in OMZ CI + # @Daniel to fix and un-comment + # - name: Test turnkey on Slurm + # if: runner.os != 'Windows' + # shell: bash -el {0} + # run: | + # # Create conda environment for Slurm using srun (sbatch + wait) + # export SKIP_REQUIREMENTS_INSTALL="True" + # export TORCH_CPU="True" + # srun src/turnkeyml/cli/setup_venv.sh - # Run tests on Slurm - export TURNKEY_SLURM_USE_DEFAULT_MEMORY="True" - turnkey benchmark toolchain/models/selftest/linear.py --build-only --use-slurm --cache-dir local_cache - bash toolchain/test/helpers/check_slurm_output.sh slurm-2.out + # # Run tests on Slurm + # export TURNKEY_SLURM_USE_DEFAULT_MEMORY="True" + # turnkey benchmark models/selftest/linear.py --build-only --use-slurm --cache-dir local_cache + # bash test/helpers/check_slurm_output.sh slurm-2.out # Below tests are commented out as the GitHub runner runs out of space installing the requirements # - name: Check installation of requirements.txt and their compatibility with turnkey @@ -126,7 +129,7 @@ jobs: # run: | # conda create --name test-requirements python=3.8 # conda activate test-requirements - # pip install -r toolchain/models/requirements.txt + # pip install -r models/requirements.txt # python -m pip check # python -c "import torch_geometric" # conda deactivate \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index d1f7fbe2..3119e75c 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,201 @@ -# The TurnkeyML Project +# Welcome to ONNX TurnkeyML -[![Turnkey tests](https://github.com/aig-bench/onnxmodelzoo/actions/workflows/test_turnkey.yml/badge.svg)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/test "Check out our tests") -[![Build API tests](https://github.com/aig-bench/onnxmodelzoo/actions/workflows/test_build_api.yml/badge.svg)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/test "Check out our tests") -[![Turnkey GPU tests](https://github.com/aig-bench/onnxmodelzoo/actions/workflows/test_gpu_turnkey.yml/badge.svg)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/test "Check out our tests") -[![OS - Linux](https://img.shields.io/badge/OS-Linux-blue?logo=linux&logoColor=white)](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md "Check out our instructions") -[![Made with Python](https://img.shields.io/badge/Python-3.8,3.10-blue?logo=python&logoColor=white)](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md "Check out our instructions") +[![Turnkey tests](https://github.com/onnx/turnkeyml/actions/workflows/test_turnkey.yml/badge.svg)](https://github.com/onnx/turnkeyml/tree/main/test "Check out our tests") +[![Build API tests](https://github.com/onnx/turnkeyml/actions/workflows/test_build_api.yml/badge.svg)](https://github.com/onnx/turnkeyml/tree/main/test "Check out our tests") +[![Turnkey GPU tests](https://github.com/onnx/turnkeyml/actions/workflows/test_gpu_turnkey.yml/badge.svg)](https://github.com/onnx/turnkeyml/tree/main/test "Check out our tests") +[![OS - Linux](https://img.shields.io/badge/OS-Linux-blue?logo=linux&logoColor=white)](https://github.com/onnx/turnkeyml/blob/main/docs/install.md "Check out our instructions")ADD WINDOWS +[![Made with Python](https://img.shields.io/badge/Python-3.8,3.10-blue?logo=python&logoColor=white)](https://github.com/onnx/turnkeyml/blob/main/docs/install.md "Check out our instructions") -TurnkeyML examines the capability of vendors to provide turnkey solutions to a corpus of hundreds of off-the-shelf models. All of the model scripts and benchmarking code are published as open source software. +We are on a mission to understand and use as many models as possible while leveraging the right toolchain and AI hardware for the job in every scenario. +Evaluating a deep learning model with a familiar toolchain and hardware accelerator is pretty straightforward. Scaling these evaluations to get apples-to-applies insights across a landscape of millions of permutations of models, toolchains, and hardware targets is not straightforward. Not without help, anyways. -## Benchmarking Tool +TurnkeyML is a *tools framework* that integrates models, toolchains, and hardware backends to make evaluation and actuation of this landscape as simple as turning a key. -Our _turnkey_ CLI allows you to benchmark Pytorch models without changing a single line of code. The demo below shows BERT-Base being benchmarked on both Nvidia A100 and Intel Xeon. For more information, check out our [Tutorials](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) and [Tools User Guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md). +## Get started -You can reproduce a nice demo by trying out the [Just Benchmark BERT](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#just-benchmark-bert) tutorial. +For most users its as simple as: -## 1000+ Models +``` +pip install turnkeyml +turnkey my_model.py +``` -[![Transformers](https://img.shields.io/github/directory-file-count/aig-bench/onnxmodelzoo/toolchain/models/transformers?label=transformers)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/models/transformers "Transformer models") -[![Diffusers](https://img.shields.io/github/directory-file-count/aig-bench/onnxmodelzoo/toolchain/models/diffusers?label=diffusers)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/models/diffusers "Diffusion models") -[![popular_on_huggingface](https://img.shields.io/github/directory-file-count/aig-bench/onnxmodelzoo/toolchain/models/popular_on_huggingface?label=popular_on_huggingface)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/models/popular_on_huggingface "Popular Models on Huggingface") -[![torch_hub](https://img.shields.io/github/directory-file-count/aig-bench/onnxmodelzoo/toolchain/models/torch_hub?label=torch_hub)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/models/torch_hub "Models from Torch Hub") -[![torchvision](https://img.shields.io/github/directory-file-count/aig-bench/onnxmodelzoo/toolchain/models/torchvision?label=torchvision)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/models/torchvision "Models from Torch Vision") -[![timm](https://img.shields.io/github/directory-file-count/aig-bench/onnxmodelzoo/toolchain/models/timm?label=timm)](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/models/timm "Pytorch Image Models") +The [installation guide](ADDLINK), [tutorials](ADDLINK), and [user guide](ADDLINK) have everything you need to know. -This repository is home to a diverse corpus of hundreds of models. We are actively working on increasing the number of models on our model library. You can see the set of models in each category by clicking on the corresponding badge. +## Use Cases -## Installation +TurnkeyML is designed to support the following use cases. Of course, it is also quite flexible, so we are sure you will come up with some use cases of your own too. -Please refer to our [turnkeyml installation guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md) to get instructions on how to install the turnkeyml package. +| Use Case | Description | +|------------------------|-------------| +| ONNX Model Zoo | Export thousands of ONNX files across different opsets and data types. This is how we generated the contents of the new [ONNX Model Zoo](ADDLINK). | +| Performance validation | Measure latency and throughput in hardware across devices and runtimes to understand product-market fit. | +| Functional coverage | Measure the functional coverage of toolchain/hardware combinations over a large corpus of models (e.g., how many models are supported by a novel compiler?). | +| Stress testing | Run millions of inferences across thousands of models and log all the results to find the bugs in a HW/SW stack. | +| Model insights | Analyze a model to learn its parameter count, input shapes, which ONNX ops it uses, etc. | + + + +## Demo + +Let's say you have a Python script that includes a PyTorch model. Maybe you downloaded the model from Huggingface, grabbed it from our corpus, or wrote it yourself. Doesn't matter, just call `turnkey` and get to work. + +The `turnkey` CLI will analyze your script, find the model(s), run an ONNX toolchain on the model, and execute the resulting ONNX file in CPU hardware: + +``` +> turnkey bert.py +``` + +``` +Models discovered during profiling: + +bert.py: + model (executed 1x) + Model Type: Pytorch (torch.nn.Module) + Class: BertModel () + Location: /home/jfowers/turnkeyml/models/transformers/bert.py, line 23 + Parameters: 109,482,240 (417.64 MB) + Input Shape: 'attention_mask': (1, 128), 'input_ids': (1, 128) + Hash: bf722986 + Build dir: /home/jfowers/.cache/turnkey/bert_bf722986 + Status: Successfully benchmarked on AMD Ryzen 9 7940HS w/ Radeon 780M Graphics (ort v1.15.1) + Mean Latency: 44.168 milliseconds (ms) + Throughput: 22.6 inferences per second (IPS) +``` + +Let's say you want a fp16 ONNX file of the same model: incorporate the ONNX ML Tools fp16 converter tool into the build sequence, and the `Build dir` will contain the ONNX file you seek: + +``` +> turnkey build bert.py --sequence optimize-fp16 +``` + +``` +bert.py: + model (executed 1x) + ... + Build dir: /home/jfowers/.cache/turnkey/bert_bf722986 + Status: Model successfully built! +``` + +``` +> ls /home/jfowers/.cache/turnkey/bert_bf722986/onnx + +bert_bf722986-op14-base.onnx bert_bf722986-op14-opt-f16.onnx bert_bf722986-op14-opt.onnx +``` + +Now you want to see the fp16 model running on your Nvidia GPU with the Nvidia TensorRT runtime: + +``` +> turnkey bert.py --sequence export optimize-fp16 --device nvidia --runtime tensorrt +``` + +``` +bert.py: + model (executed 1x) + ... + Status: Successfully benchmarked on NVIDIA GeForce RTX 4070 Laptop GPU (trt v23.09-py3) + Mean Latency: 2.573 milliseconds (ms) + Throughput: 377.8 inferences per second (IPS) +``` + +Mad with power, you want to see dozens of fp16 Transformers running on your Nvidia GPU: + +``` +> turnkey REPO_ROOT/models/transformers/*.py --sequence optimize-fp16 --device nvidia --runtime tensorrt +``` + +``` +Models discovered during profiling: + +albert.py: + model (executed 1x) + Class: AlbertModel () + Parameters: 11,683,584 (44.57 MB) + Status: Successfully benchmarked on NVIDIA GeForce RTX 4070 Laptop GPU (trt v23.09-py3) + Mean Latency: 1.143 milliseconds (ms) + Throughput: 828.3 inferences per second (IPS) + +bart.py: + model (executed 1x) + Class: BartModel () + Parameters: 139,420,416 (531.85 MB) + Status: Successfully benchmarked on NVIDIA GeForce RTX 4070 Laptop GPU (trt v23.09-py3) + Mean Latency: 2.343 milliseconds (ms) + Throughput: 414.5 inferences per second (IPS) + +bert.py: + model (executed 1x) + Class: BertModel () + Parameters: 109,482,240 (417.64 MB) + Status: Successfully benchmarked on NVIDIA GeForce RTX 4070 Laptop GPU (trt v23.09-py3) + Mean Latency: 2.565 milliseconds (ms) + Throughput: 378.0 inferences per second (IPS) + +... +``` + +Finally, you want to visualize the results in one place so that your boss can see how productive you've been. This command will collect all of the statistics across all prior commands into a single spreadsheet. + +``` +> turnkey cache report + +Summary spreadsheet saved at /home/jfowers/2023-11-30.csv +``` + +You're probably starting to get the idea :rocket: + +There's a lot more features you can learn about in the [tutorials](ADDLINK) and [user guide](ADDLINK). + +## What's Inside + +The TurnkeyML framework has 5 core components: +- **Analysis tool**: Inspect Python scripts to find the PyTorch models within. Discover insights and pass the models to the other tools. +- **Build tool**: Prepare your model using industry-standard AI tools (e.g., exporters, optimizers, quantizers, and compilers). Any model-to-model transformation is fair game. +- **Runtime tool**: Invoke AI runtimes (e.g., ONNX Runtime, TensorRT, etc.) to execute models in hardware and measure key performance indicators. +- **Reporting tool**: Visualize statistics about the models, builds, and invocations. +- **Models corpus**: Hundreds of popular PyTorch models that are ready for use with `turnkey`. + +All of this is seamlessly integrated together such that a command like `turnkey repo/models/corpus/script.py` gets you all of the functionality in one shot. Or you can access functionality piecemeal with commands and APIs like `turnkey analyze script.py` or `build_model(my_model_instance)`. The [tutorials](ADDLINK) show off the individual features. + +You can read more about the code organization [here](ADDLINK). + +## Extensibility + +### Models + +[![transformers](https://img.shields.io/github/directory-file-count/onnx/turnkeyml/models/transformers?label=transformers)](https://github.com/onnx/turnkeyml/tree/main/models/transformers "Transformer models") +[![graph_convolutions](https://img.shields.io/github/directory-file-count/onnx/turnkeyml/models/graph_convolutions?label=graph_convolutions)](https://github.com/onnx/turnkeyml/tree/main/models/graph_convolutions "Graph Convolution models") +[![torch_hub](https://img.shields.io/github/directory-file-count/onnx/turnkeyml/models/torch_hub?label=torch_hub)](https://github.com/onnx/turnkeyml/tree/main/models/torch_hub "Models from Torch Hub") +[![torchvision](https://img.shields.io/github/directory-file-count/onnx/turnkeyml/models/torchvision?label=torchvision)](https://github.com/onnx/turnkeyml/tree/main/models/torchvision "Models from Torch Vision") +[![timm](https://img.shields.io/github/directory-file-count/onnx/turnkeyml/models/timm?label=timm)](https://github.com/onnx/turnkeyml/tree/main/models/timm "Pytorch Image Models") + +This repository is home to a diverse corpus of hundreds of models. We are actively working on increasing the number of models in our model library. You can see the set of models in each category by clicking on the corresponding badge. + +Evaluating a new model is as simple as taking a Python script that instantiates and invokes a PyTorch `torch.nn.module` and call `turnkey` on it. Read about model contributions [here](ADDLINK). + +### Plugins + +The build tool has built-in support for a variety of export and optimization tools (e.g., the PyTorch-to-ONNX exporter, ONNX ML Tools fp16 converter, etc.). Likewise, the runtime tool comes out-of-box with support for x86 and Nvidia devices, along with ONNX Runtime, TensorRT, torch-eager, and torch-compiled runtimes. + +If you need more, the TurnkeyML plugin API lets you extend the build and runtime tools with any functionality you like: + +``` +> pip install -e my_custom_plugin +> turnkey my_model.py --sequence my-custom-sequence --device my-custom-device --runtime my-custom-runtime --rt-args my-custom-args +``` + +All of the built-in sequences, runtimes, and devices are implemented against the plugin API. Check out this [example plugin](ADDLINK) and the [plugin API guide](ADDLINK). ## Contributing -We are actively seeking collaborators from across the industry. If you would like to contribute to this project, please check out our [contribution guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/contribute.md). +We are actively seeking collaborators from across the industry. If you would like to contribute to this project, please check out our [contribution guide](https://github.com/onnx/turnkeyml/blob/main/docs/contribute.md). + +## Maintainers + +This project is sponsored by the [ONNX Model Zoo](ADDLINK) special interest group (SIG). It is maintained by @danielholanda @jeremyfowers @ramkrishna @vgodsoe in equal measure. You can reach us at [turnkeyml@???.com](ADDLINK) or by filing an [issue](ADDLINK). ## License -This is a closed source project. The source code and artifacts are AMD internal only. Do not distribute. +This project is licensed under the [Apache 2.0 License](ADDLINK). diff --git a/docs/code.md b/docs/code.md index 5cf228ce..6168492a 100644 --- a/docs/code.md +++ b/docs/code.md @@ -8,7 +8,7 @@ The TurnkeyML source code has a few major top-level directories: - `examples/api`: examples scripts that invoke the benchmarking API to get the performance of models. - `examples/cli`: tutorial series starting in `examples/cli/readme.md` to help learn the `turnkey` CLI. - `examples/cli/scripts`: example scripts that can be fed as input into the `turnkey` CLI. These scripts each have a docstring that recommends one or more `turnkey` CLI commands to try out. -- `models`: the corpora of models that makes up the TurnkeyML models (see [the models readme](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/models/readme.md)). +- `models`: the corpora of models that makes up the TurnkeyML models (see [the models readme](https://github.com/onnx/turnkeyml/blob/main/models/readme.md)). - Each subdirectory under `models` represents a corpus of models pulled from somewhere on the internet. For example, `models/torch_hub` is a corpus of models from [Torch Hub](https://github.com/pytorch/hub). - `src/turnkey`: source code for the TurnkeyML tools (see [Benchmarking Tools](#benchmarking-tools) for a description of how the code is used). - `src/turnkeyml/analysis`: functions for profiling a model script, discovering model instances, and invoking `benchmark_model()` on those instances. @@ -23,14 +23,14 @@ The TurnkeyML source code has a few major top-level directories: # Benchmarking Tools -TurnkeyML provides two main tools, the `turnkey` CLI and benchmarking APIs. Instructions for how to use these tools are documented in the [Tools User Guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md), while this section is about how the source code is invoked to implement the tools. All of the code below is located under `src/turnkeyml/`. +TurnkeyML provides two main tools, the `turnkey` CLI and benchmarking APIs. Instructions for how to use these tools are documented in the [Tools User Guide](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md), while this section is about how the source code is invoked to implement the tools. All of the code below is located under `src/turnkeyml/`. -1. The `turnkey` CLI is the comprehensive frontend that wraps all the other code. It is implemented in [cli/cli.py](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/src/turnkeyml/cli/cli.py). -1. The default command for `turnkey` CLI runs the `benchmark_files()` API, which is implemented in [files_api.py](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/src/turnkeyml/files_api.py). +1. The `turnkey` CLI is the comprehensive frontend that wraps all the other code. It is implemented in [cli/cli.py](https://github.com/onnx/turnkeyml/blob/main/src/turnkeyml/cli/cli.py). +1. The default command for `turnkey` CLI runs the `benchmark_files()` API, which is implemented in [files_api.py](https://github.com/onnx/turnkeyml/blob/main/src/turnkeyml/files_api.py). - Other CLI commands are also implemented in `cli/`, for example the `report` command is implemented in `cli/report.py`. -1. The `benchmark_files()` API takes in a set of scripts, each of which should invoke at least one model instance, to evaluate and passes each into the `evaluate_script()` function for analysis, which is implemented in [analyze/script.py](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/src/turnkeyml/analyze/script.py). -1. `evaluate_script()` uses a profiler to discover the model instances in the script, and passes each into the `benchmark_model()` API, which is defined in [model_api.py](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/src/turnkeyml/model_api.py). -1. The `benchmark_model()` API prepares the model for benchmarking (e.g., exporting and optimizing an ONNX file), which creates an instance of a `*Model` class, where `*` can be CPU, GPU, etc. The `*Model` classes are defined in [run/](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/src/turnkeyml/run/). +1. The `benchmark_files()` API takes in a set of scripts, each of which should invoke at least one model instance, to evaluate and passes each into the `evaluate_script()` function for analysis, which is implemented in [analyze/script.py](https://github.com/onnx/turnkeyml/blob/main/src/turnkeyml/analyze/script.py). +1. `evaluate_script()` uses a profiler to discover the model instances in the script, and passes each into the `benchmark_model()` API, which is defined in [model_api.py](https://github.com/onnx/turnkeyml/blob/main/src/turnkeyml/model_api.py). +1. The `benchmark_model()` API prepares the model for benchmarking (e.g., exporting and optimizing an ONNX file), which creates an instance of a `*Model` class, where `*` can be CPU, GPU, etc. The `*Model` classes are defined in [run/](https://github.com/onnx/turnkeyml/blob/main/src/turnkeyml/run/). 1. The `*Model` classes provide a `.benchmark()` method that benchmarks the model on the device and returns an instance of the `MeasuredPerformance` class, which includes the performance statistics acquired during benchmarking. 1. `benchmark_model()` and the `*Model` classes are built using [`build_model()`](#model-build-tool) diff --git a/docs/contribute.md b/docs/contribute.md index dae88933..d1e88dca 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -4,7 +4,7 @@ Hello and welcome to the project! 🎉 We're thrilled that you're considering contributing to the project. This project is a collaborative effort and we welcome contributors from everyone. -Before you start, please take a few moments to read through these guidelines. They are designed to make the contribution process easy and effective for everyone involved. Also take a look at the [code organization](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/code.md) for a bird's eye view of the repository. +Before you start, please take a few moments to read through these guidelines. They are designed to make the contribution process easy and effective for everyone involved. Also take a look at the [code organization](https://github.com/onnx/turnkeyml/blob/main/docs/code.md) for a bird's eye view of the repository. The guidelines document is organized as the following sections: - [Contributing a model](#contributing-a-model) @@ -18,13 +18,13 @@ The guidelines document is organized as the following sections: ## Contributing a model -One of the easiest ways to contribute is to add a model to the benchmark. To do so, simply add a `.py` file to the `models/` directory that instantiates and calls a supported type of model (see [Tools User Guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md) to learn more). The automated benchmarking infrastructure will do the rest! +One of the easiest ways to contribute is to add a model to the benchmark. To do so, simply add a `.py` file to the `models/` directory that instantiates and calls a supported type of model (see [Tools User Guide](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md) to learn more). The automated benchmarking infrastructure will do the rest! ## Contributing a plugin -TurnkeyML supports a variety of built-in build sequences, runtimes, and devices (see the [Devices and Runtimes table](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#devices-runtimes-table)). You can contribute a plugin to add support for a different build sequence, runtime, or device of your choosing. +TurnkeyML supports a variety of built-in build sequences, runtimes, and devices (see the [Devices and Runtimes table](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#devices-runtimes-table)). You can contribute a plugin to add support for a different build sequence, runtime, or device of your choosing. -A turnkey plugin is a pip-installable package that implements support for building a model using a custom sequence and/or benchmarking a model on a [device](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#device) with a [runtime](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#runtime). These packages must adhere to a specific interface that is documented below. +A turnkey plugin is a pip-installable package that implements support for building a model using a custom sequence and/or benchmarking a model on a [device](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#device) with a [runtime](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#runtime). These packages must adhere to a specific interface that is documented below. ### Plugin Directory Layout @@ -60,12 +60,12 @@ We require the following naming scheme: Plugins can implement one or more runtimes. -> See [example_rt](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/examples/cli/plugins/example_rt) for an example of a minimal runtime plug in. This example is used below to help explain the interface. +> See [example_rt](https://github.com/onnx/turnkeyml/tree/main/examples/cli/plugins/example_rt) for an example of a minimal runtime plug in. This example is used below to help explain the interface. To add a runtime to a plugin: 1. Pick a unique name, `` for each runtime that will be supported by the plugin. - - This name will be used in the `turnkey --runtime ` [argument](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#runtimes) + - This name will be used in the `turnkey --runtime ` [argument](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#runtimes) - For example, a runtime named `example-rt` would be invoked with `turnkey --runtime example-rt` 1. Populate the [Implements Dictionary](#implements-dictionary) with a per-runtime dictionary with the following fields: @@ -75,7 +75,7 @@ To add a runtime to a plugin: - Each supported part within a device family must be defined as a dictionary. - Each supported configuration within a device model must be defined as a list. - Example: `"supported_devices": {"family1":{"part1":["config1","config2"]}}`. - - See [example_combined](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/examples/cli/plugins/example_combined) for a plugin implementation example that leverages this feature. + - See [example_combined](https://github.com/onnx/turnkeyml/tree/main/examples/cli/plugins/example_combined) for a plugin implementation example that leverages this feature. - Note: If a device is already supported by the tools, this simply adds support for another runtime to that device. If the device is _not_ already supported by the tools, this also adds support for that device and it will start to appear as an option for the `turnkey --device ` argument. - `"build_required": Bool`: indicates whether the `build_model()` API should be called on the `model` and `inputs`. - `"docker_required": Bool`: indicates whether benchmarking is implemented through a docker container. @@ -101,12 +101,12 @@ To add a runtime to a plugin: Plugins can implement one or more build sequences. -> See [example_seq](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/examples/cli/plugins/example_seq) for an example of a minimal sequence plug in. This example is used below to help explain the interface. +> See [example_seq](https://github.com/onnx/turnkeyml/tree/main/examples/cli/plugins/example_seq) for an example of a minimal sequence plug in. This example is used below to help explain the interface. To add a build sequence to a plugin: 1. Pick a unique name, `` for each sequence that will be supported by the plugin. - - This name will be used in the `turnkey --sequence ` [argument](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#sequence) + - This name will be used in the `turnkey --sequence ` [argument](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#sequence) - For example, a sequence named `example-seq` would be invoked with `turnkey --sequence example-seq` 1. Populate the [Implements Dictionary](#implements-dictionary) with a per-sequence dictionary with the following fields: @@ -147,7 +147,7 @@ implements = { ### Runtime Class -A runtime class inherits the abstract base class [`BaseRT`](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/src/turnkeyml/run/basert.py) and implements a one or more [runtimes](#runtime) to provide benchmarking support for one or more [devices](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#devices). +A runtime class inherits the abstract base class [`BaseRT`](https://github.com/onnx/turnkeyml/tree/main/src/turnkeyml/run/basert.py) and implements a one or more [runtimes](#runtime) to provide benchmarking support for one or more [devices](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#devices). `BaseRT` has 4 methods that plugin developers must overload: - `_setup()`: any code that should be called prior to benchmarking as a one-time setup. Called automatically at the end of `BaseRT.__init__()`. @@ -179,7 +179,7 @@ Where: API users can pass an arbitrary dictionary of arguments, e.g., `benchmark_files(rt_args=Dict[str, Union[str, List[str]]])`. -See [example_combined](https://github.com/aig-bench/onnxmodelzoo/tree/main/toolchain/examples/cli/plugins/example_combined) for an example. +See [example_combined](https://github.com/onnx/turnkeyml/tree/main/examples/cli/plugins/example_combined) for an example. ### Execute Method @@ -204,17 +204,17 @@ If you wish to contribute to any other part of the repository such as examples o ## Issues -Please file any bugs or feature requests you have as an [Issue](https://github.com/aig-bench/onnxmodelzoo/issues) and we will take a look. +Please file any bugs or feature requests you have as an [Issue](https://github.com/onnx/turnkeyml/issues) and we will take a look. ## Pull Requests -Contribute code by creating a pull request (PR). Your PR will be reviewed by one of the [repo maintainers](https://github.com/aig-bench/onnxmodelzoo/blob/main/CODEOWNERS). +Contribute code by creating a pull request (PR). Your PR will be reviewed by one of the [repo maintainers](https://github.com/onnx/turnkeyml/blob/main/CODEOWNERS). -Please have a discussion with the team before making major changes. The best way to start such a discussion is to file an [Issue](https://github.com/aig-bench/onnxmodelzoo/issues) and seek a response from one of the [repo maintainers](https://github.com/aig-bench/onnxmodelzoo/blob/main/CODEOWNERS). +Please have a discussion with the team before making major changes. The best way to start such a discussion is to file an [Issue](https://github.com/onnx/turnkeyml/issues) and seek a response from one of the [repo maintainers](https://github.com/onnx/turnkeyml/blob/main/CODEOWNERS). ## Testing -Tests are defined in `tests/` and run automatically on each PR, as defined in our [testing action](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/.github/workflows/test.yml). This action performs both linting and unit testing and must succeed before code can be merged. +Tests are defined in `tests/` and run automatically on each PR, as defined in our [testing action](https://github.com/onnx/turnkeyml/blob/main/.github/workflows/test.yml). This action performs both linting and unit testing and must succeed before code can be merged. We don't have any fancy testing framework set up yet. If you want to run tests locally: - Activate a `conda` environment that has `turnkey` (this package) installed. @@ -224,4 +224,4 @@ We don't have any fancy testing framework set up yet. If you want to run tests l ## Versioning -We use semantic versioning, as described in [versioning.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/versioning.md). +We use semantic versioning, as described in [versioning.md](https://github.com/onnx/turnkeyml/blob/main/docs/versioning.md). diff --git a/docs/coverage.md b/docs/coverage.md index 4b1a862b..9cf1fc34 100644 --- a/docs/coverage.md +++ b/docs/coverage.md @@ -10,7 +10,7 @@ On your main `tkml` environment, run `pip install coverage`. ### Gathering Results -To gather results, cd into the test folder on `toolchain\test` and call `coverage run` on each of the tests as shown below. +To gather results, cd into the test folder on `REPO_ROOT/test` and call `coverage run` on each of the tests as shown below. ``` coverage run --data-file=.coverage_unit -m unittest unit.py diff --git a/docs/install.md b/docs/install.md index 149d0188..020f2b1f 100644 --- a/docs/install.md +++ b/docs/install.md @@ -32,13 +32,13 @@ conda activate tkml First, make sure you have a copy of the repository locally: ``` -git clone https://github.com/aig-bench/onnxmodelzoo.git +git clone https://github.com/onnx/turnkeyml.git ``` Then, simply pip install the TurnkeyML package: ``` -pip install -e onnxmodelzoo/toolchain +pip install -e turnkeyml ``` You are now done installing TurnkeyML! @@ -47,15 +47,15 @@ If you are planning to use the `turnkey` tools with the TurnkeyML models or Slur ## TurnkeyML Models Requirements -The TurnkeyML models are located at `install_path/toolchain/models`, which we refer to as `models/` in most of the guides. +The TurnkeyML models are located at `install_path/models`, which we refer to as `models/` in most of the guides. > _Note_: The `turnkey models location` command and `turnkey.common.filesystem.MODELS_DIR` are useful ways to locate the `models` directory. If you perform PyPI installation, we recommend that you take an additional step like this: ``` (tkml) jfowers:~$ turnkey models location -Info: The TurnkeyML models directory is: ~/onnxmodelzoo/toolchain/models -(tkml) jfowers:~$ export models=~/onnxmodelzoo/toolchain/models +Info: The TurnkeyML models directory is: ~/turnkeyml/models +(tkml) jfowers:~$ export models=~/turnkeyml/models ``` The `turnkeyml` package only requires the packages to run the tools. If you want to run the models as well, you will also have to install the models' requirements. @@ -72,11 +72,11 @@ Slurm is an open source workload manager for clusters. If you would like to use ### Setup your Slurm environment -Ensure that your onnxmodelzoo folder and your conda installation are both inside a shared volume that can be accessed by Slurm. +Ensure that your turnkeyml clone and your conda installation are both inside a shared volume that can be accessed by Slurm. Then, run the following command and wait for the Slurm job to finish: ``` -sbatch --mem=128000 toolchain/src/turnkeyml/cli/setup_venv.sh +sbatch --mem=128000 src/turnkeyml/cli/setup_venv.sh ``` ### Get an API token from Huggingface.co (optional) diff --git a/docs/readme.md b/docs/readme.md index fd70d595..55ec96c0 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -1,11 +1,11 @@ # TurnkeyML Documentation This directory contains documentation for the TurnkeyML project: -- [code.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/code.md): Code organization for the benchmark and tools. -- [install.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md): Installation instructions for the tools. -- [tools_user_guide.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md): User guide for the tools: `turnkey` CLI, `benchmark_files()`, and `benchmark_model()`. -- [versioning.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/versioning.md): Defines the semantic versioning rules for the `turnkey` package. +- [code.md](https://github.com/onnx/turnkeyml/blob/main/docs/code.md): Code organization for the benchmark and tools. +- [install.md](https://github.com/onnx/turnkeyml/blob/main/docs/install.md): Installation instructions for the tools. +- [tools_user_guide.md](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md): User guide for the tools: `turnkey` CLI, `benchmark_files()`, and `benchmark_model()`. +- [versioning.md](https://github.com/onnx/turnkeyml/blob/main/docs/versioning.md): Defines the semantic versioning rules for the `turnkey` package. There is more useful documentation available in: -- [examples/cli/readme.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md): Tutorial series for learning the `turnkey` CLI. -- [models/readme.md](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/models/readme.md): Tutorial for understanding the models and how to use `turnkey` to evaluate the models. \ No newline at end of file +- [examples/cli/readme.md](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md): Tutorial series for learning the `turnkey` CLI. +- [models/readme.md](https://github.com/onnx/turnkeyml/blob/main/models/readme.md): Tutorial for understanding the models and how to use `turnkey` to evaluate the models. \ No newline at end of file diff --git a/docs/tools_user_guide.md b/docs/tools_user_guide.md index 7fb76c3a..d9819979 100644 --- a/docs/tools_user_guide.md +++ b/docs/tools_user_guide.md @@ -1,8 +1,8 @@ # Tools User Guide -The TurnkeyML package provides a CLI, `turnkey`, and Python API for benchmarking machine learning and deep learning models. This document reviews the functionality provided by the package. If you are looking for repo and code organization, you can find that [here](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/code.md). +The TurnkeyML package provides a CLI, `turnkey`, and Python API for benchmarking machine learning and deep learning models. This document reviews the functionality provided by the package. If you are looking for repo and code organization, you can find that [here](https://github.com/onnx/turnkeyml/blob/main/docs/code.md). -For a hands-on learning approach, check out the [`turnkey` CLI tutorials](#https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md). +For a hands-on learning approach, check out the [`turnkey` CLI tutorials](#https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md). The tools currently support the following combinations of runtimes and devices: @@ -54,11 +54,11 @@ The `turnkey` CLI performs the following steps: 2. [Build](#build): call the `benchmark_files()` [API](#the-turnkey-api) to prepare each model for benchmarking 3. [Benchmark](#benchmark): call the `benchmark_model()` [API](#the-turnkey-api) on each model to gather performance statistics -_Note_: The benchmarking methodology is defined [here](#benchmark). If you are looking for more detailed instructions on how to install turnkey, you can find that [here](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md). +_Note_: The benchmarking methodology is defined [here](#benchmark). If you are looking for more detailed instructions on how to install turnkey, you can find that [here](https://github.com/onnx/turnkeyml/blob/main/docs/install.md). -> For a detailed example, see the [CLI Hello World tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#hello-world). +> For a detailed example, see the [CLI Hello World tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#hello-world). -> `turnkey` can also benchmark ONNX files with a command like `turnkey your_model.onnx`. See the [CLI ONNX tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#onnx-benchmarking) for details. However, the majority of this document focuses on the use case of passing .py scripts as input to `turnkey`. +> `turnkey` can also benchmark ONNX files with a command like `turnkey your_model.onnx`. See the [CLI ONNX tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#onnx-benchmarking) for details. However, the majority of this document focuses on the use case of passing .py scripts as input to `turnkey`. # The TurnkeyML API @@ -68,7 +68,7 @@ Most of the functionality provided by the `turnkey` CLI is also available in the - The main difference is that `benchmark_model()` does not include the [Analysis](#analysis) feature, and `benchmark_files()` does. - `turnkey.build_model(model, inputs)` is used to programmatically [build](#build) a model instance through a sequence of model-to-model transformations (e.g., starting with an fp32 PyTorch model and ending with an fp16 ONNX model). -Generally speaking, the `turnkey` CLI is a command line interface for the `benchmark_files()` API, which internally calls `benchmark_model()`, which in turn calls `build_model()`. You can read more about this code organization [here](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/code.md). +Generally speaking, the `turnkey` CLI is a command line interface for the `benchmark_files()` API, which internally calls `benchmark_model()`, which in turn calls `build_model()`. You can read more about this code organization [here](https://github.com/onnx/turnkeyml/blob/main/docs/code.md). For an example of `benchmark_model()`, the following script: @@ -139,7 +139,7 @@ A **runtime** is a piece of software that executes a model on a device. > _Note_: the `turnkey` CLI and `benchmark_files()` API both run your entire python script(s) whenever python script(s) are passed as input files. Please ensure that these scripts are safe to run, especially if you got them from the internet. -> See the [Multiple Models per Script tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#multiple-models-per-script) for a detailed example of how analysis can discover multiple models from a single script. +> See the [Multiple Models per Script tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#multiple-models-per-script) for a detailed example of how analysis can discover multiple models from a single script. ## Model Hashes @@ -255,7 +255,7 @@ Also available as API arguments: - `benchmark_files(device=...)` - `benchmark_model(device=...)`. -> For a detailed example, see the [CLI Nvidia tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#nvidia-benchmarking). +> For a detailed example, see the [CLI Nvidia tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#nvidia-benchmarking). ### Runtimes @@ -324,7 +324,7 @@ Examples: - `turnkey selected_models.txt` - Benchmark all models listed inside the text file. -> See the [Benchmark Multiple Scripts tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md#benchmark-multiple-scripts) for a detailed example. +> See the [Benchmark Multiple Scripts tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md#benchmark-multiple-scripts) for a detailed example. You can also leverage model hashes (see [Model Hashes](#model-hashes)) to filter which models in a script will be acted on, in the following manner: - `turnkey example.py::hash_0` will only benchmark the model corresponding to `hash_0`. @@ -332,7 +332,7 @@ You can also leverage model hashes (see [Model Hashes](#model-hashes)) to filter > _Note_: Using bash regular expressions and filtering model by hashes are mutually exclusive. To filter models by hashes, provide the full path of the Python script rather than a regular expression. -> See the [Filtering Model Hashes tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md#filtering-model-hashes) for a detailed example. +> See the [Filtering Model Hashes tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md#filtering-model-hashes) for a detailed example. Additionally, you can leverage labels (see [Labels](#labels)) to filter which models in a script will be acted on, in the following manner: - `turnkey *.py --labels test_group::a` will only benchmark the scripts labels with `test_group::a`. @@ -355,9 +355,9 @@ Usage: Available as an API argument: - `benchmark_files(use_slurm=True/False)` (default False) -> _Note_: Requires setting up Slurm as shown [here](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md). +> _Note_: Requires setting up Slurm as shown [here](https://github.com/onnx/turnkeyml/blob/main/docs/install.md). -> _Note_: while `--use-slurm` is implemented, and we use it for our own purposes, it has some limitations and we do not recommend using it. Currently, `turnkey` has some Slurm to be configuration assumptions that we have not documented yet. Please contact the developers by [filing an issue](https://github.com/aig-bench/onnxmodelzoo/issues/new) if you need Slurm support for your project. +> _Note_: while `--use-slurm` is implemented, and we use it for our own purposes, it has some limitations and we do not recommend using it. Currently, `turnkey` has some Slurm to be configuration assumptions that we have not documented yet. Please contact the developers by [filing an issue](https://github.com/onnx/turnkeyml/issues/new) if you need Slurm support for your project. > _Note_: Slurm mode applies a timeout to each job, and will cancel the job move if the timeout is exceeded. See [Set the Timeout](#set-the-timeout) @@ -384,7 +384,7 @@ Also available as API arguments: - `benchmark_model(cache_dir=...)` - `build_model(cache_dir=...)` -> See the [Cache Directory tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md#cache-directory) for a detailed example. +> See the [Cache Directory tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md#cache-directory) for a detailed example. ### Lean Cache @@ -396,7 +396,7 @@ Also available as API arguments: > _Note_: useful for benchmarking many models, since the `build` artifacts from the models can take up a significant amount of hard drive space. -> See the [Lean Cache tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md#lean-cache) for a detailed example. +> See the [Lean Cache tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md#lean-cache) for a detailed example. ### Rebuild Policy @@ -435,7 +435,7 @@ Usage: Also available as an API argument: - `benchmark_files(script_args=...)` -> See the [Parameters documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/models/readme.md#parameters) for a detailed example. +> See the [Parameters documentation](https://github.com/onnx/turnkeyml/blob/main/models/readme.md#parameters) for a detailed example. ### Maximum Analysis Depth @@ -449,7 +449,7 @@ Also available as an API argument: > _Note_: `--max-depth` values greater than 0 are only supported for PyTorch models. -> See the [Maximum Analysis Depth tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md#maximum-analysis-depth) for a detailed example. +> See the [Maximum Analysis Depth tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md#maximum-analysis-depth) for a detailed example. ### ONNX Opset @@ -489,7 +489,7 @@ Usage: Also available as an API argument: - `benchmark_files(analyze_only=True/False)` (default False) -> See the [Analyze Only tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md#analyze-only) for a detailed example. +> See the [Analyze Only tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md#analyze-only) for a detailed example. ### Build Only @@ -505,13 +505,13 @@ Also available as API arguments: - `benchmark_files(build_only=True/False)` (default False) - `benchmark_model(build_only=True/False)` (default False) -> See the [Build Only tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/build.md#build-only) for a detailed example. +> See the [Build Only tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/build.md#build-only) for a detailed example. ### Custom Runtime Arguments Users can pass arbitrary arguments into a runtime, as long as the target runtime supports those arguments, by using the `--rt-args` argument. -None of the built-in runtimes support such arguments, however plugin contributors can use this interface to add arguments to their custom runtimes. See [plugins contribution guideline](https://github.com/aigdat/onnxmodelzoo/blob/main/toolchain/docs/contribute.md#contributing-a-plugin) for details. +None of the built-in runtimes support such arguments, however plugin contributors can use this interface to add arguments to their custom runtimes. See [plugins contribution guideline](https://github.com/onnx/turnkeyml/blob/main/docs/contribute.md#contributing-a-plugin) for details. Also available as API arguments: - `benchmark_files(rt_args=Dict)` (default None) @@ -529,7 +529,7 @@ The `cache` commands help you manage the `turnkey cache` and get information abo > _Note_: `cache list` is not available as an API. -> See the [Cache Commands tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md#cache-commands) for a detailed example. +> See the [Cache Commands tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md#cache-commands) for a detailed example. ### `cache stats` Command @@ -540,7 +540,7 @@ The `cache` commands help you manage the `turnkey cache` and get information abo > _Note_: `cache stats` is not available as an API. -> See the [Cache Commands tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md#cache-commands) for a detailed example. +> See the [Cache Commands tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md#cache-commands) for a detailed example. ### `cache delete` Command @@ -552,7 +552,7 @@ The `cache` commands help you manage the `turnkey cache` and get information abo > _Note_: `cache delete` is not available as an API. -> See the [Cache Commands tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md#cache-commands) for a detailed example. +> See the [Cache Commands tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md#cache-commands) for a detailed example. ### `cache clean` Command @@ -585,7 +585,7 @@ The `models` commands help you work with the turnkey models provided in the pack ### `locate models` Command -`turnkey models location` prints out the location of the [models directory](https://github.com/aig-bench/onnxmodelzoo/tree/main/models) with over 1000 models. It presents the following options: +`turnkey models location` prints out the location of the [models directory](https://github.com/onnx/turnkeyml/tree/main/models) with over 1000 models. It presents the following options: - `--quiet` Command output will only include the directory path @@ -677,7 +677,7 @@ The tools support a variety of built-in build sequences, runtimes, and devices ( A turnkey plugin is a pip-installable package that implements support for building a model using a custom sequence and/or benchmarking a model on a [device](#device) with a [runtime](#runtime). These packages must adhere to a specific plugin template. -For more details on implementing a plugin, please refer to the [plugins contribution guideline](https://github.com/aigdat/onnxmodelzoo/blob/main/toolchain/docs/contribute.md#contributing-a-plugin) +For more details on implementing a plugin, please refer to the [plugins contribution guideline](https://github.com/onnx/turnkeyml/blob/main/docs/contribute.md#contributing-a-plugin) # Build API Arguments diff --git a/examples/cli/build.md b/examples/cli/build.md index 2c46d7b2..f8997217 100644 --- a/examples/cli/build.md +++ b/examples/cli/build.md @@ -5,9 +5,9 @@ This chapter of the `turnkey` CLI tutorial focuses on techniques to customize th - [How to customize the build process with Sequences](#sequence-file) The tutorial chapters are: -1. [Getting Started](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) -1. [Guiding Model Discovery](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md): `turnkey` CLI arguments that customize the model discovery process to help streamline your workflow. -1. [Working with the Cache](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` CLI arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. +1. [Getting Started](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md) +1. [Guiding Model Discovery](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md): `turnkey` CLI arguments that customize the model discovery process to help streamline your workflow. +1. [Working with the Cache](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` CLI arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. 1. Customizing Builds (this document): `turnkey` CLI arguments that customize build behavior to unlock new workflows. # Build Tutorials @@ -33,7 +33,7 @@ hello_world.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/hello_world.py, line 29 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/hello_world.py, line 29 Parameters: 55 (<0.1 MB) Hash: 479b1332 Status: Model successfully built! @@ -45,12 +45,12 @@ Woohoo! The 'benchmark' command is complete. You can see that the model is discovered and built, but no benchmark took place. -> See the [Build Only documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#build-only) for more details. +> See the [Build Only documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#build-only) for more details. # Thanks! Now that you have completed this tutorial, make sure to check out the other tutorials if you want to learn more: -1. [Getting Started](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) -1. [Guiding Model Discovery](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. -1. [Working with the Cache](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. +1. [Getting Started](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md) +1. [Guiding Model Discovery](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. +1. [Working with the Cache](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. 1. Customizing Builds (this document): `turnkey` arguments that customize build behavior to unlock new workflows. \ No newline at end of file diff --git a/examples/cli/cache.md b/examples/cli/cache.md index 2ebe7e70..fe890c2e 100644 --- a/examples/cli/cache.md +++ b/examples/cli/cache.md @@ -9,10 +9,10 @@ This chapter of the `turnkey` CLI tutorials is focused on understanding, inspect - [How to keep your filesystem from filling up with build artifacts](#lean-cache) The tutorial chapters are: -1. [Getting Started](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) -1. [Guiding Model Discovery](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. +1. [Getting Started](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md) +1. [Guiding Model Discovery](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. 1. Working with the Cache (this document): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. -1. [Customizing Builds](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. +1. [Customizing Builds](https://github.com/onnx/turnkeyml/blob/main/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. # Cache Tutorials @@ -20,7 +20,7 @@ All of the tutorials assume that your current working directory is in the same l ## Cache Directory -By default, the tools use `~/.cache/turnkey/` as the location for the cache (see the [Build documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#build) for more details). +By default, the tools use `~/.cache/turnkey/` as the location for the cache (see the [Build documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#build) for more details). However, you might want to set the cache location for any number of reasons. For example, you might want to keep the results from benchmarking one corpus of models separate from the results from another corpus. @@ -34,11 +34,11 @@ When that command completes, you can use the `ls` command to see that `tmp_cache See the Cache Commands tutorials below to see what you can do with the cache. -> See the [Cache Directory documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#cache-directory) for more details. +> See the [Cache Directory documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#cache-directory) for more details. ## Cache List Command -This tutorial assumes you have completed the [Cache Directory](#cache-directory) and [Benchmark Multiple Scripts documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#benchmark-multiple-scripts) tutorials, and that the `tmp_cache` directory exists at your command line location. +This tutorial assumes you have completed the [Cache Directory](#cache-directory) and [Benchmark Multiple Scripts documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#benchmark-multiple-scripts) tutorials, and that the `tmp_cache` directory exists at your command line location. You can use the `cache list` command to see what builds are available in your cache: @@ -113,7 +113,7 @@ Info: Builds available in cache tmp_cache: hello_world_479b1332 ``` -> See the [Cache Commands documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#cache-commands) for more details. +> See the [Cache Commands documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#cache-commands) for more details. ## Lean Cache @@ -170,14 +170,14 @@ total 20K 0 -rw-r--r-- 1 jfowers 0 Feb 16 08:14 log_set_success.txt ``` -> See the [Lean Cache documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#lean-cache) for more details. +> See the [Lean Cache documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#lean-cache) for more details. > _Note_: If you want to get rid of build artifacts after the build is done, you can run `turnkey cache clean build_name`. # Thanks! Now that you have completed this tutorial, make sure to check out the other tutorials if you want to learn more: -1. [Getting Started](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) -1. [Guiding Model Discovery](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. +1. [Getting Started](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md) +1. [Guiding Model Discovery](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. 1. Working with the Cache (this document): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. -1. [Customizing Builds](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. \ No newline at end of file +1. [Customizing Builds](https://github.com/onnx/turnkeyml/blob/main/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. \ No newline at end of file diff --git a/examples/cli/discovery.md b/examples/cli/discovery.md index 6fb14586..ef434d8e 100644 --- a/examples/cli/discovery.md +++ b/examples/cli/discovery.md @@ -7,10 +7,10 @@ This chapter of the `turnkey` CLI tutorial is focused on how to guide the tool a - [How to filter which models are passed to the build and benchmark operations](#filtering-model-hashes) The tutorial chapters are: -1. [Getting Started](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) +1. [Getting Started](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md) 1. Guiding Model Discovery (this document): `turnkey` arguments that customize the model discovery process to help streamline your workflow. -1. [Working with the Cache](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. -1. [Customizing Builds](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. +1. [Working with the Cache](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. +1. [Customizing Builds](https://github.com/onnx/turnkeyml/blob/main/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. # Model Discovery Tutorials @@ -35,7 +35,7 @@ hello_world.py: pytorch_model (executed 1x - 0.00s) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/hello_world.py, line 29 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/hello_world.py, line 29 Parameters: 55 (<0.1 MB) Hash: 479b1332 @@ -46,7 +46,7 @@ Woohoo! The 'benchmark' command is complete. You can see that the model is discovered, and some stats are printed, but no build or benchmark took place. -> See the [Analyze Only documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#analyze-only) for more details. +> See the [Analyze Only documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#analyze-only) for more details. ## Benchmark Multiple Scripts @@ -74,7 +74,7 @@ hello_world.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/hello_world.py, line 29 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/hello_world.py, line 29 Parameters: 55 (<0.1 MB) Hash: 479b1332 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -85,7 +85,7 @@ two_models.py: another_pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/two_models.py, line 40 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/two_models.py, line 40 Parameters: 510 (<0.1 MB) Hash: 215ca1e3 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -96,7 +96,7 @@ max_depth.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: TwoLayerModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/max_depth.py, line 41 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/max_depth.py, line 41 Parameters: 85 (<0.1 MB) Hash: 80b93950 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -119,7 +119,7 @@ two_models.py max_depth.py ``` -> See the [Benchmark Multiple Scripts documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#benchmark-multiple-scripts) for more details. +> See the [Benchmark Multiple Scripts documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#benchmark-multiple-scripts) for more details. ## Maximum Analysis Depth @@ -133,7 +133,7 @@ For example, if you run this command: turnkey benchmark scripts/max_depth.py ``` -You will get a result that looks very similar to the [Hello World tutorial](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#hello-world) tutorial. However, if you peek into `max_depth.py`, you can see that there are two `torch.nn.Linear` modules that make up the top-level model. +You will get a result that looks very similar to the [Hello World tutorial](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#hello-world) tutorial. However, if you peek into `max_depth.py`, you can see that there are two `torch.nn.Linear` modules that make up the top-level model. You can analyze and benchmark those `torch.nn.Linear` modules with this command: @@ -150,7 +150,7 @@ max_depth.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: TwoLayerModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/max_depth.py, line 41 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/max_depth.py, line 41 Parameters: 85 (<0.1 MB) Hash: 80b93950 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -179,15 +179,15 @@ max_depth.py: You can see that the two instances of `torch.nn.Linear`, `fc` and `fc2`, are benchmarked in addition to the top-level model, `pytorch_model`. -> See the [Max Depth documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#maximum-analysis-depth) for more details. +> See the [Max Depth documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#maximum-analysis-depth) for more details. ## Filtering Model Hashes -When you ran the example from the [Multiple Models per Script](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#multiple-models-per-script) tutorial, you saw that `turnkey` discovered, built, and benchmarked two models. What if you only wanted to build and benchmark one of the models? +When you ran the example from the [Multiple Models per Script](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#multiple-models-per-script) tutorial, you saw that `turnkey` discovered, built, and benchmarked two models. What if you only wanted to build and benchmark one of the models? -You can leverage the model hashes feature of `turnkey` to filter which models are acted on. You can see in the result from [Multiple Models per Script](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md#multiple-models-per-script) that the two models, `pytorch_model` and `another_pytorch_model`, have hashes `479b1332` and `215ca1e3`, respectively. +You can leverage the model hashes feature of `turnkey` to filter which models are acted on. You can see in the result from [Multiple Models per Script](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md#multiple-models-per-script) that the two models, `pytorch_model` and `another_pytorch_model`, have hashes `479b1332` and `215ca1e3`, respectively. If you wanted to only build and benchmark `another_pytorch_model`, you could use this command, which filters `two_models.py` with the hash `215ca1e3`: @@ -204,14 +204,14 @@ two_models.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/two_models.py, line 32 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/two_models.py, line 32 Parameters: 55 (<0.1 MB) Hash: 479b1332 another_pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/two_models.py, line 40 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/two_models.py, line 40 Parameters: 510 (<0.1 MB) Hash: 215ca1e3 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -227,7 +227,7 @@ Woohoo! The 'benchmark' command is complete. You can see that both models are discovered, but only `another_pytorch_model` was built and benchmarked. -> See the [Input Script documentation](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#input-script) for more details. +> See the [Input Script documentation](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#input-script) for more details. ## Filtering Model Labels @@ -248,7 +248,7 @@ hello_world.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/hello_world.py, line 30 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/hello_world.py, line 30 Parameters: 55 (<0.1 MB) Hash: 479b1332 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -263,7 +263,7 @@ Woohoo! The 'benchmark' command is complete. # Thanks! Now that you have completed this tutorial, make sure to check out the other tutorials if you want to learn more: -1. [Getting Started](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/readme.md) +1. [Getting Started](https://github.com/onnx/turnkeyml/blob/main/examples/cli/readme.md) 1. Guiding Model Discovery (this document): `turnkey` arguments that customize the model discovery process to help streamline your workflow. -1. [Working with the Cache](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `cache`. -1. [Customizing Builds](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. +1. [Working with the Cache](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `cache`. +1. [Customizing Builds](https://github.com/onnx/turnkeyml/blob/main/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. diff --git a/examples/cli/plugins/example_combined/turnkeyml_plugin_example_combined/runtime.py b/examples/cli/plugins/example_combined/turnkeyml_plugin_example_combined/runtime.py index 00d8f1a9..ee10c557 100644 --- a/examples/cli/plugins/example_combined/turnkeyml_plugin_example_combined/runtime.py +++ b/examples/cli/plugins/example_combined/turnkeyml_plugin_example_combined/runtime.py @@ -7,12 +7,13 @@ import turnkeyml.common.exceptions as exp import turnkeyml.common.build as build from turnkeyml.run.onnxrt.within_conda import dummy_inputs -from turnkeyml.common.performance import MeasuredPerformance, Device +from turnkeyml.common.performance import MeasuredPerformance from turnkeyml.common.filesystem import Stats combined_rt_name = "example-combined-rt" + class CombinedExampleRT(BaseRT): def __init__( self, @@ -46,6 +47,9 @@ def __init__( inputs=inputs, ) + self.throughput_ips = None + self.mean_latency_ms = None + def _setup(self): # The BaseRT abstract base class requires us to overload this function, # however our simple example runtime does not require any additional @@ -109,4 +113,7 @@ def throughput(self) -> float: @property def device_name(self) -> str: - return f"Device Family {self.device_type.family}, Device Part {self.device_type.part}, Device Configuration {self.device_type.config}" + return ( + f"Device Family {self.device_type.family}, Device Part {self.device_type.part}, " + f"Device Configuration {self.device_type.config}" + ) diff --git a/examples/cli/plugins/readme.md b/examples/cli/plugins/readme.md index fd092d94..dc5b06db 100644 --- a/examples/cli/plugins/readme.md +++ b/examples/cli/plugins/readme.md @@ -5,4 +5,4 @@ This directory contains plugins that can be installed to demonstrate how turnkey - `example_seq`: Example of a sequence plugin. Install with `pip install -e example_seq` to add the `example-seq` sequence to your turnkey CLI. - `example_combined`: Example of a plugin that includes both a sequence and a runtime. Install with `pip install -e example_combined` to add the `example-combined-rt` runtime and `example-combined-seq` sequence to your turnkey CLI. -See the [Tools User Guide plugins section](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md#plugins) for information about how to create plugins. \ No newline at end of file +See the [Tools User Guide plugins section](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md#plugins) for information about how to create plugins. \ No newline at end of file diff --git a/examples/cli/readme.md b/examples/cli/readme.md index 40657a21..3787a86b 100644 --- a/examples/cli/readme.md +++ b/examples/cli/readme.md @@ -1,16 +1,16 @@ # Learning the `turnkey` CLI -This document is a tutorial for exploring the different features of the `turnkey` command line interface (CLI). You can learn the details of those features in the [Tools User Guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md) and learn about their implementation in the [Code Organization Guide](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/code.md). +This document is a tutorial for exploring the different features of the `turnkey` command line interface (CLI). You can learn the details of those features in the [Tools User Guide](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md) and learn about their implementation in the [Code Organization Guide](https://github.com/onnx/turnkeyml/blob/main/docs/code.md). We've created this tutorial document because `turnkey` is a CLI that benchmarks the contents of `.py` scripts. So all of the `.py` scripts in the `examples/cli/scripts` directory are meant to be fed into `turnkey` to demonstrate some specific functionality. -Once you've familiarized yourself with these features, head over to the [`models` directory](https://github.com/aig-bench/onnxmodelzoo/tree/main/models) to learn how to use `turnkey` with real world machine learning models. +Once you've familiarized yourself with these features, head over to the [`models` directory](https://github.com/onnx/turnkeyml/tree/main/models) to learn how to use `turnkey` with real world machine learning models. The tutorials are organized into a few chapters: 1. Getting Started (this document) -1. [Guiding Model Discovery](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. -1. [Working with the Cache](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `cache`. -1. [Customizing Builds](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. +1. [Guiding Model Discovery](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. +1. [Working with the Cache](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `cache`. +1. [Customizing Builds](https://github.com/onnx/turnkeyml/blob/main/examples/cli/build.md): `turnkey` arguments that customize build behavior to unlock new workflows. In this tutorial you will learn things such as: - [How to benchmark BERT with one command](#just-benchmark-bert) @@ -29,7 +29,7 @@ models=$(turnkey models location --quiet) turnkey $models/transformers/bert.py ``` -> _Note_: You will need to [install the models benchmark requirements](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md#turnkeyml-models-requirements), if you haven't already. +> _Note_: You will need to [install the models benchmark requirements](https://github.com/onnx/turnkeyml/blob/main/docs/install.md#turnkeyml-models-requirements), if you haven't already. This will produce a result that looks like this, which shows you the performance of BERT-Base on your CPU: @@ -40,7 +40,7 @@ bert.py: model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: BertModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/models/transformers/bert.py, line 18 + Location: /home/jfowers/turnkeyml/models/transformers/bert.py, line 18 Parameters: 109,482,240 (208.8 MB) Hash: d59172a2 Status: Successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz (ort v1.14.1) @@ -53,7 +53,7 @@ bert.py: All of the following tutorials assume that your current working directory is in the same location as this readme file (`examples/cli`). -These tutorials assume you have used the [cloning install](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/install.md#cloning-install) since that provides you with the required tutorial files in `examples/cli/scripts`. +These tutorials also assume you have [installed from source](https://github.com/onnx/turnkeyml/blob/main/docs/install.md) since that provides you with the required tutorial files in `examples/cli/scripts`. ## Hello World @@ -80,7 +80,7 @@ hello_world.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/hello_world.py, line 29 + Location: /home/jfowers/turnkeyml/examples/cli/hello_world.py, line 29 Parameters: 55 (<0.1 MB) Hash: 479b1332 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -113,7 +113,7 @@ hello_world.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/hello_world.py, line 29 + Location: /home/jfowers/turnkeyml/examples/cli/hello_world.py, line 29 Parameters: 55 (<0.1 MB) Hash: 479b1332 Status: Model successfully benchmarked on NVIDIA A100-SXM4-40GB @@ -146,7 +146,7 @@ two_models.py: pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/two_models.py, line 32 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/two_models.py, line 32 Parameters: 55 (<0.1 MB) Hash: 479b1332 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -156,7 +156,7 @@ two_models.py: another_pytorch_model (executed 1x) Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/jfowers/onnxmodelzoo/toolchain/examples/cli/scripts/two_models.py, line 40 + Location: /home/jfowers/turnkeyml/examples/cli/scripts/two_models.py, line 40 Parameters: 510 (<0.1 MB) Hash: 215ca1e3 Status: Model successfully benchmarked on Intel(R) Xeon(R) CPU @ 2.20GHz @@ -194,7 +194,7 @@ multiple_invocations.py: pytorch_model Model Type: Pytorch (torch.nn.Module) Class: SmallModel () - Location: /home/dhnoronha/onnxmodelzoo/toolchain/examples/cli/scripts/multiple_invocations.py, line 40 + Location: /home/dhnoronha/turnkeyml/examples/cli/scripts/multiple_invocations.py, line 40 Parameters: 60 (<0.1 MB) With input shape 1 (executed 2x) @@ -229,7 +229,7 @@ Building "sample" ✓ Receiving ONNX Model ✓ Finishing up -Woohoo! Saved to ~/onnxmodelzoo/toolchain/examples/cli/onnx/tmp_cache/sample +Woohoo! Saved to ~/turnkeyml/examples/cli/onnx/tmp_cache/sample Info: Benchmarking on local x86... @@ -241,6 +241,6 @@ Info: Performance of build sample on x86 device Intel(R) Xeon(R) CPU @ 2.20GHz i # Thanks! Now that you have completed this tutorial, make sure to check out the other tutorials if you want to learn more: -1. [Guiding Model Discovery](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. -1. [Working with the Cache](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. -1. [Customizing Builds](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/examples/cli/cache.md): `turnkey` arguments that customize build behavior to unlock new workflows. \ No newline at end of file +1. [Guiding Model Discovery](https://github.com/onnx/turnkeyml/blob/main/examples/cli/discovery.md): `turnkey` arguments that customize the model discovery process to help streamline your workflow. +1. [Working with the Cache](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` arguments and commands that help you understand, inspect, and manipulate the `turnkey cache`. +1. [Customizing Builds](https://github.com/onnx/turnkeyml/blob/main/examples/cli/cache.md): `turnkey` arguments that customize build behavior to unlock new workflows. \ No newline at end of file diff --git a/models/readme.md b/models/readme.md index d67150cc..2917b57b 100644 --- a/models/readme.md +++ b/models/readme.md @@ -1,6 +1,6 @@ # TurnkeyML Models -This directory contains the TurnkeyML models, which is a large collection of models that can be evaluated using the [`turnkey` CLI tool](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md). +This directory contains the TurnkeyML models, which is a large collection of models that can be evaluated using the [`turnkey` CLI tool](https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md). ## Table of Contents @@ -21,9 +21,7 @@ This directory contains the TurnkeyML models, which is a large collection of mod The TurnkeyML collection is made up of several corpora of models (_corpora_ is the plural of _corpus_... we had to look it up too). Each corpus is named after the online repository that the models were sourced from. Each corpus gets its own subdirectory in the `models` directory. The corpora are: -- `diffusers`: models from the [Huggingface `diffusers` library](https://huggingface.co/docs/diffusers/index), including the models that make up Stable Diffusion. - `graph_convolutions`: Graph Neural Network (GNN) models from a variety of publications. See the docstring on each .py file for the source. -- `popular_on_huggingface`: hundreds of the most-downloaded models from the [Huggingface models repository](https://huggingface.co/models). - `selftest`: a small corpus with small models that can be used for testing out the tools. - `torch_hub`: a variety of models, including many image classification models, from the [Torch Hub repository](https://github.com/pytorch/hub). - `torchvision`: image recognition models from the [`torchvision` library](https://pytorch.org/vision/stable/index.html). @@ -36,9 +34,9 @@ The corpora are: ### Prerequisites Before running the benchmark we suggest you: -1. Install the `turnkey` package by following the [install instructions](https://github.com/aig-bench/onnxmodelzoo/tree/main/docs/install.md). -1. Go through the [`turnkey` CLI tutorials](https://github.com/aig-bench/onnxmodelzoo/tree/main/examples/cli/readme.md). -1. Familiarize yourself with the [`turnkey` CLI tool](https://github.com/aig-bench/onnxmodelzoo/blob/main/toolchain/docs/turnkey_user_guide.md) documentation. +1. Install the `turnkey` package by following the [install instructions](https://github.com/onnx/turnkeyml/tree/main/docs/install.md). +1. Go through the [`turnkey` CLI tutorials](https://github.com/onnx/turnkeyml/tree/main/examples/cli/readme.md). +1. Familiarize yourself with the [`turnkey` CLI tool](https://github.com/onnx/turnkeyml/blob/main/docs/turnkey_user_guide.md) documentation. You must also run the following command to install all of the models' dependencies into your Python environment. @@ -49,13 +47,13 @@ You must also run the following command to install all of the models' dependenci Once you have fulfilled the prerequisites, you can evaluate one model from the benchmark with a command like this: ``` -cd OMZ_ROOT/toolchain/models # OMZ_ROOT is where you cloned onnxmodelzoo +cd REPO_ROOT/models # REPO_ROOT is where you cloned turnkeyml turnkey selftest/linear.py ``` -You can also run the entire all models in one shot with: +You can also run all models in one shot with: ``` -cd OMZ_ROOT/toolchain/models # OMZ_ROOT is where you cloned onnxmodelzoo +cd REPO_ROOT/models # REPO_ROOT is where you cloned turnkeyml turnkey */*.py ``` diff --git a/models/torchvision/skip/ssdlite320_mobilenet_v3_large.py b/models/torchvision/skip/ssdlite320_mobilenet_v3_large.py index b019830f..7cb539af 100644 --- a/models/torchvision/skip/ssdlite320_mobilenet_v3_large.py +++ b/models/torchvision/skip/ssdlite320_mobilenet_v3_large.py @@ -6,7 +6,7 @@ # Skip reason: triggers a bug in analysis where two models are discovered during # profiling instead of one. -# Reinstate this model once https://github.com/aigdat/onnxmodelzoo/issues/239 is fixed +# Reinstate this model once https://github.com/onnx/turnkeyml/issues/239 is fixed # Models discovered during profiling: diff --git a/setup.py b/setup.py index b7204610..381b340a 100644 --- a/setup.py +++ b/setup.py @@ -21,11 +21,7 @@ "turnkeyml.cli", "turnkeyml.common", "turnkeyml_models", - "turnkeyml_models.diffusers", "turnkeyml_models.graph_convolutions", - "turnkeyml_models.llm", - "turnkeyml_models.llm_layer", - "turnkeyml_models.popular_on_huggingface", "turnkeyml_models.selftest", "turnkeyml_models.timm", "turnkeyml_models.torch_hub", @@ -68,7 +64,6 @@ long_description_content_type="text/markdown", include_package_data=True, package_data={ - "turnkeyml.api": ["Dockerfile"], "turnkeyml_models": ["requirements.txt", "readme.md"], }, ) diff --git a/src/turnkeyml/analyze/script.py b/src/turnkeyml/analyze/script.py index 8af916f4..27c13efb 100644 --- a/src/turnkeyml/analyze/script.py +++ b/src/turnkeyml/analyze/script.py @@ -203,7 +203,7 @@ def explore_invocation( relative_path = tracer_args.input.replace( fs.MODELS_DIR, - f"https://github.com/aigdat/onnxmodelzoo/tree/{git_hash}/toolchain/models", + f"https://github.com/onnx/turnkeyml/tree/{git_hash}/models", ).replace("\\", "/") stats.save_stat(fs.Keys.MODEL_SCRIPT, relative_path) diff --git a/src/turnkeyml/analyze/util.py b/src/turnkeyml/analyze/util.py index e485ee2a..d5dc7c6e 100644 --- a/src/turnkeyml/analyze/util.py +++ b/src/turnkeyml/analyze/util.py @@ -132,7 +132,7 @@ def populate_onnx_model_info(onnx_model) -> Dict: ) except ValueError: # Models >2GB on disk cannot have their model size measured this - # way and will throw a ValueError https://github.com/aig-bench/onnxmodelzoo/issues/318 + # way and will throw a ValueError https://github.com/onnx/turnkeyml/issues/318 pass return result_dict diff --git a/src/turnkeyml/build/stage.py b/src/turnkeyml/build/stage.py index cb943db7..cd2ec537 100644 --- a/src/turnkeyml/build/stage.py +++ b/src/turnkeyml/build/stage.py @@ -268,7 +268,7 @@ def launch(self, state: build.State) -> build.State: build_model() is running a build on a model that already built successfully, which should not happen because the build should have loaded from cache or rebuilt from scratch. If you are using custom Stages and Sequences then you have some debugging to do. Otherwise, - please file an issue at https://github.com/aig-bench/onnxmodelzoo/issues + please file an issue at https://github.com/onnx/turnkeyml/issues """ raise exp.Error(msg) diff --git a/src/turnkeyml/build_api.py b/src/turnkeyml/build_api.py index 0da17efc..5cf681b2 100644 --- a/src/turnkeyml/build_api.py +++ b/src/turnkeyml/build_api.py @@ -60,7 +60,7 @@ def build_model( part within a family, or configuration of a part model, respectively. More information is available in the Tools User Guide: - https://github.com/aigdat/onnxmodelzoo/blob/main/toolchain/docs/tools_user_guide.md + https://github.com/onnx/turnkeyml/blob/main/docs/tools_user_guide.md """ # Allow monitor to be globally disabled by an environment variable diff --git a/src/turnkeyml/cli/cli.py b/src/turnkeyml/cli/cli.py index c00b9d73..25db55e9 100644 --- a/src/turnkeyml/cli/cli.py +++ b/src/turnkeyml/cli/cli.py @@ -433,7 +433,7 @@ def check_extension(choices, file_name): ) # Design note: the `models` command is simple right now, however some additional ideas - # are documented in https://github.com/aig-bench/onnxmodelzoo/issues/247 + # are documented in https://github.com/onnx/turnkeyml/issues/247 models_subparsers = models_parser.add_subparsers( title="models", diff --git a/src/turnkeyml/cli/setup_venv.sh b/src/turnkeyml/cli/setup_venv.sh index 1edba27d..8d2856d4 100644 --- a/src/turnkeyml/cli/setup_venv.sh +++ b/src/turnkeyml/cli/setup_venv.sh @@ -32,9 +32,9 @@ fi # Install turnkey and model requirements cd "$TURNKEY_PATH" || exit -pip install -e toolchain +pip install -e . if [[ "$SKIP_REQUIREMENTS_INSTALL" != "True" ]] then - cd toolchain/models || exit + cd models || exit pip install -r requirements.txt fi \ No newline at end of file diff --git a/src/turnkeyml/common/labels.py b/src/turnkeyml/common/labels.py index e27f46ba..a3f28078 100644 --- a/src/turnkeyml/common/labels.py +++ b/src/turnkeyml/common/labels.py @@ -16,7 +16,7 @@ def to_dict(label_list: List[str]) -> Dict[str, List[str]]: except ValueError: # FIXME: Create a proper warning for this once we have the right # infrastructure for doing so. - # https://github.com/aig-bench/onnxmodelzoo/issues/55 + # https://github.com/onnx/turnkeyml/issues/55 printing.log_warning( ( f"Malformed label {item} found. " diff --git a/test/cli.py b/test/cli.py index 671246f5..b38cedab 100644 --- a/test/cli.py +++ b/test/cli.py @@ -138,7 +138,7 @@ def assert_success_of_builds( # Figure out the build name by surveying the build cache # for a build that includes test_script_name in the name # TODO: simplify this code when - # https://github.com/aig-bench/onnxmodelzoo/issues/16 + # https://github.com/onnx/turnkeyml/issues/16 # is done builds = filesystem.get_all(cache_dir) builds_found = 0 diff --git a/trackers/huggingface/app.py b/trackers/huggingface/app.py index fb00369b..c9b62ac7 100644 --- a/trackers/huggingface/app.py +++ b/trackers/huggingface/app.py @@ -61,7 +61,7 @@ def add_faq() -> None: ( "All TurnkeyML results have been generated using the turnkey tool v1.0.0, which is part " "of the TurnkeyML Github Repository. You can learn more about it " - 'here.' + 'here.' ), ) faq.add_section( @@ -71,7 +71,7 @@ def add_faq() -> None: "nvidia: NVIDIA A100 40GB on Google Cloud (a2-highgpu-1g) and TensorRT version 22.12-py3.", ( "You can find more details about the methodology " - 'here.' + 'here.' ), ], ) @@ -86,7 +86,7 @@ def add_faq() -> None: ( "Results are currently being validated. You can have a look at our current validation " "tasks and other limitations " - 'here.' + 'here.' ), ], ) @@ -117,7 +117,7 @@ def add_faq() -> None: "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, " "OUT OF OR IN CONNECTION WITH THE BENCHMARK OR THE USE OR OTHER DEALINGS IN THE " "BENCHMARK. Read more about it " - 'here.' + 'here.' ), ) @@ -126,7 +126,6 @@ def add_faq() -> None: # Add all filters to sidebar with st.sidebar: - st.markdown("# Filters") # Get all reports of a given test type diff --git a/trackers/report_plots.py b/trackers/report_plots.py index a71ba5de..5d64b7a5 100644 --- a/trackers/report_plots.py +++ b/trackers/report_plots.py @@ -4,7 +4,7 @@ import plotly.express as px import numpy as np -df = pd.read_csv(r'C:\Users\danie\onnxmodelzoo\toolchain\models\timm\2023-08-30.csv') +df = pd.read_csv(r"C:\Users\danie\turnkeyml\models\timm\2023-08-30.csv") colors = { "blue": "#5470c6", @@ -22,41 +22,67 @@ def throughput_acceleration(df): ort_results = df[df["runtime"] == "ort"] assert len(vitisep_results) == len(ort_results) on_ipu = vitisep_results.ipu_compilation_successful.to_numpy() - ratio = vitisep_results.throughput.to_numpy()/ort_results.throughput.to_numpy() - - y0 = [ratio[idx] for idx in range(len(ratio)) if on_ipu[idx] == 'True'] - y1 = [ratio[idx] for idx in range(len(ratio)) if on_ipu[idx] == 'False'] - y2 = np.concatenate ([y0,y1]) - - y0_label = ["Yes"]*len(y0) - y1_label = ["No"]*len(y1) - y2_label = y0_label+y1_label - - df = pd.DataFrame({'graph_name':['Running on IPU']*len(y0)+['Fallback to CPU']*len(y1)+['All models']*len(y2), - 'value': np.concatenate([y0,y1,y2],0), - 'Actually running on the IPU?':y0_label+y1_label+y2_label} - ) - - fig = px.strip(df, - x='graph_name', - y='value', - color='Actually running on the IPU?', - stripmode='overlay') - - fig.add_trace(go.Box(y=df.query('graph_name == "Running on IPU"')['value'], name='Running on IPU',marker=dict(opacity=0.1))) - fig.add_trace(go.Box(y=df.query('graph_name == "Fallback to CPU"')['value'], name='Fallback to CPU')) - fig.add_trace(go.Box(y=df.query('graph_name == "All models"')['value'], name='All models')) - - fig.update_layout(autosize=False, - legend={'traceorder':'normal'}, - ) + ratio = vitisep_results.throughput.to_numpy() / ort_results.throughput.to_numpy() + + y0 = [ratio[idx] for idx in range(len(ratio)) if on_ipu[idx] == "True"] + y1 = [ratio[idx] for idx in range(len(ratio)) if on_ipu[idx] == "False"] + y2 = np.concatenate([y0, y1]) + + y0_label = ["Yes"] * len(y0) + y1_label = ["No"] * len(y1) + y2_label = y0_label + y1_label + + df = pd.DataFrame( + { + "graph_name": ["Running on IPU"] * len(y0) + + ["Fallback to CPU"] * len(y1) + + ["All models"] * len(y2), + "value": np.concatenate([y0, y1, y2], 0), + "Actually running on the IPU?": y0_label + y1_label + y2_label, + } + ) + + fig = px.strip( + df, + x="graph_name", + y="value", + color="Actually running on the IPU?", + stripmode="overlay", + ) + + fig.add_trace( + go.Box( + y=df.query('graph_name == "Running on IPU"')["value"], + name="Running on IPU", + marker=dict(opacity=0.1), + ) + ) + fig.add_trace( + go.Box( + y=df.query('graph_name == "Fallback to CPU"')["value"], + name="Fallback to CPU", + ) + ) + fig.add_trace( + go.Box(y=df.query('graph_name == "All models"')["value"], name="All models") + ) + + fig.update_layout( + autosize=False, + legend={"traceorder": "normal"}, + ) fig.update_yaxes(title_text="Acceleration compared to OnnxRuntime CPU EP") fig.update_xaxes(title_text="") fig.show() + def parameter_histogram(df: pd.DataFrame) -> None: # Add parameters histogram - all_models = [float(x) / 1000000 for x in df[df["runtime"] == "vitisep"]["parameters"] if x != "-"] + all_models = [ + float(x) / 1000000 + for x in df[df["runtime"] == "vitisep"]["parameters"] + if x != "-" + ] hist_data = [] group_labels = [] @@ -65,7 +91,6 @@ def parameter_histogram(df: pd.DataFrame) -> None: hist_data.append(all_models) group_labels.append("All models") - if hist_data: fig = ff.create_distplot( hist_data, @@ -82,28 +107,38 @@ def parameter_histogram(df: pd.DataFrame) -> None: fig.show() + def throughput_plot(df): vitisep_results = df[df["runtime"] == "vitisep"] ort_results = df[df["runtime"] == "ort"] - fig = go.Figure(data=[ - go.Bar(name='VitisEP', x=vitisep_results.model_name, y=vitisep_results.throughput), - go.Bar(name='OnnxRuntime CPU EP', x=ort_results.model_name, y=ort_results.throughput) - ]) + fig = go.Figure( + data=[ + go.Bar( + name="VitisEP", + x=vitisep_results.model_name, + y=vitisep_results.throughput, + ), + go.Bar( + name="OnnxRuntime CPU EP", + x=ort_results.model_name, + y=ort_results.throughput, + ), + ] + ) # Set x and y axis labels - fig.update_layout( - barmode='group', - xaxis_title="", - yaxis_title="Throughput" - ) + fig.update_layout(barmode="group", xaxis_title="", yaxis_title="Throughput") fig.show() - def compilation_time(df): # Add compilation time histogram - all_models = [float(x) for x in df[df["runtime"] == "vitisep"]["ipu_compilation_seconds"] if x != "-"] + all_models = [ + float(x) + for x in df[df["runtime"] == "vitisep"]["ipu_compilation_seconds"] + if x != "-" + ] hist_data = [] group_labels = [] @@ -126,7 +161,8 @@ def compilation_time(df): fig.show() + parameter_histogram(df) throughput_plot(df) throughput_acceleration(df) -compilation_time(df) \ No newline at end of file +compilation_time(df)