diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b43d3fadf0..346ae60941 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,16 +1,19 @@ ### Description of Change(s) +### Link to proposal ([if applicable](https://openusd.org/release/contributing_to_usd.html#step-1-get-consensus-for-major-changes)) + ### Fixes Issue(s) -- - - -- [ ] I have verified that all unit tests pass with the proposed changes - -- [ ] I have submitted a signed Contributor License Agreement + +### Checklist + +[ ] I have created this PR based on the dev branch + +[ ] I have followed the [coding conventions](https://openusd.org/release/api/_page__coding__guidelines.html) + +[ ] I have added unit tests that exercise this functionality (Reference: +[testing guidelines](https://openusd.org/release/api/_page__testing__guidelines.html)) + +[ ] I have verified that all unit tests pass with the proposed changes + +[ ] I have submitted a signed Contributor License Agreement (Reference: +[Contributor License Agreement instructions](https://openusd.org/release/contributing_to_usd.html#contributor-license-agreement)) diff --git a/.github/workflows/buildusd.yml b/.github/workflows/buildusd.yml new file mode 100644 index 0000000000..a90a903aa5 --- /dev/null +++ b/.github/workflows/buildusd.yml @@ -0,0 +1,165 @@ +name: BuildUSD + +on: + push: + branches: + - dev + - release + issue_comment: + types: [created] + +env: + PYTHON_VERSION: "3.9" + PYTHON_VERSION_MAC: "3.11" + +jobs: + GetUser: + runs-on: ubuntu-20.04 + timeout-minutes: 5 + outputs: + require-result: ${{ steps.check.outputs.require-result }} + steps: + - uses: actions-cool/check-user-permission@v2 + id: check + with: + require: 'write' + username: ${{ github.event.comment.user.login }} + Linux: + needs: [GetUser] + if: ${{ (github.event.issue.pull_request && contains(github.event.comment.body, '/AzurePipelines run') && needs.GetUser.outputs.require-result == 'true' ) || github.event_name == 'push' }} + runs-on: ubuntu-20.04 + timeout-minutes: 120 + steps: + - run: echo ${{ needs.GetUser.outputs.require-result }} + - name: Checkout code + uses: actions/checkout@v4 + - name: Restore cached artifacts + id: cache-usd-build-dependency + uses: actions/cache/restore@v4 + with: + path: | + USDinst + key: ${{ runner.os }}-BuildUSD-py${{ env.PYTHON_VERSION }}-${{ hashFiles('build_scripts/**/*') }} + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + check-latest: false + - name: Install dependencies + run: | + sudo apt-get -qq update + sudo apt-get install -y python3-setuptools libglew-dev libxrandr-dev libxcursor-dev libxinerama-dev libxi-dev + pip install --upgrade pip + pip install PySide2 PyOpenGL + - name: Build USD + run: | + python3 build_scripts/build_usd.py --no-materialx --build USDgen/build --src USDgen/src USDinst -v + - name: Save build artifacts to cache + if: steps.cache-usd-build-dependency.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + USDinst + key: ${{ runner.os }}-BuildUSD-py${{ env.PYTHON_VERSION }}-${{ hashFiles('build_scripts/**/*') }} + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: usd-linux + path: USDinst + + macOS: + needs: [GetUser] + if: ${{ (github.event.issue.pull_request && contains(github.event.comment.body, '/AzurePipelines run') && needs.GetUser.outputs.require-result == 'true' ) || github.event_name == 'push' }} + runs-on: macos-12 + timeout-minutes: 120 + steps: + - run: echo ${{ needs.GetUser.outputs.require-result }} + - name: Checkout code + uses: actions/checkout@v4 + - name: Restore cached artifacts + id: cache-usd-build-dependency + uses: actions/cache/restore@v4 + with: + path: | + USDinst + key: ${{ runner.os }}-BuildUSD-py${{ env.PYTHON_VERSION_MAC }}-${{ hashFiles('build_scripts/**/*') }} + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION_MAC }} + check-latest: false + - name: Install dependencies + run: | + export PATH=/Applications/CMake.app/Contents/bin:$PATH + sudo xcode-select -s /Applications/Xcode_13.3.app/Contents/Developer + # Set SYSTEM_VERSION_COMPAT while installing Python packages to + # accommodate the macOS version numbering change from 10.x to 11 + export SYSTEM_VERSION_COMPAT=1 + pip install PySide6 PyOpenGL setuptools + export -n SYSTEM_VERSION_COMPAT + - name: Build USD + run: | + export PATH=/Applications/CMake.app/Contents/bin:$PATH + python3 build_scripts/build_usd.py --no-materialx --generator Xcode --build USDgen/build --src USDgen/src USDinst -v + - name: Save build artifacts to cache + if: steps.cache-usd-build-dependency.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + USDinst + key: ${{ runner.os }}-BuildUSD-py${{ env.PYTHON_VERSION_MAC }}-${{ hashFiles('build_scripts/**/*') }} + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: usd-macOS + path: USDinst + + Windows: + needs: [GetUser] + if: ${{ (github.event.issue.pull_request && contains(github.event.comment.body, '/AzurePipelines run') && needs.GetUser.outputs.require-result == 'true' ) || github.event_name == 'push' }} + runs-on: windows-2019 + timeout-minutes: 120 + steps: + - run: echo ${{ needs.GetUser.outputs.require-result }} + - name: Checkout code + uses: actions/checkout@v4 + - name: Restore cached artifacts + id: cache-usd-build-dependency + uses: actions/cache/restore@v4 + with: + path: | + USDinst + key: ${{ runner.os }}-BuildUSD-py${{ env.PYTHON_VERSION }}-${{ hashFiles('build_scripts/**/*') }} + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + check-latest: false + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install PyOpenGL PySide2 + - name: Build USD + run: | + REM Unset BOOST_ROOT on Windows. The VS2017-Win2016 image + REM sets this env var to a pre-installed boost which causes + REM the USD build to favor this over the boost built by + REM build_usd.py. This causes the build to fail, because + REM the pre-installed boost does not include boost_python27 + call set BOOST_ROOT= + python build_scripts/build_usd.py --no-materialx --generator "Visual Studio 16 2019" --build USDgen/build --src USDgen/src USDinst --build-args USD,"-DPXR_ENABLE_PRECOMPILED_HEADERS=OFF" -v + shell: cmd + - name: Save build artifacts to cache + if: steps.cache-usd-build-dependency.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + USDinst + key: ${{ runner.os }}-BuildUSD-py${{ env.PYTHON_VERSION }}-${{ hashFiles('build_scripts/**/*') }} + - name: Upload artifacts + # use v3 because actions/upload-artifact@v4 fails + # see https://github.com/actions/upload-artifact/issues/485 + uses: actions/upload-artifact@v3 + with: + name: usd-win64 + path: USDinst \ No newline at end of file diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml new file mode 100644 index 0000000000..7342f27792 --- /dev/null +++ b/.github/workflows/pypi.yml @@ -0,0 +1,342 @@ +name: PyPiPackaging + +# Trigger this build whenever the dev or release branches are updated +# or on-demand. +# +# Ideally we'd run this pipeline for all pull requests, but doing so consumes +# our limited number of slots and almost always just duplicates the +# build done in the main pipeline. +on: + push: + branches: + - dev + - release + workflow_dispatch: + inputs: + post_release_tag: + description: 'post release tag' + default: '' + +env: + POST_RELEASE_TAG: ${{ github.event.inputs.post_release_tag == '' && ' ' || format('--post-release-tag {0}', github.event.inputs.post_release_tag) }} + +jobs: + Linux: + strategy: + matrix: + PYTHON: + - TAG: cp37 + INTERPRETER: /opt/python/cp37-cp37m/bin/python + VERSION_SPEC: '3.7' + - TAG: cp38 + INTERPRETER: /opt/python/cp38-cp38/bin/python + VERSION_SPEC: '3.8' + - TAG: cp39 + INTERPRETER: /opt/python/cp39-cp39/bin/python + VERSION_SPEC: '3.9' + - TAG: cp310 + INTERPRETER: /opt/python/cp310-cp310/bin/python + VERSION_SPEC: '3.10' + - TAG: cp311 + INTERPRETER: /opt/python/cp311-cp311/bin/python + VERSION_SPEC: '3.11' + runs-on: ubuntu-20.04 + timeout-minutes: 60 + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.PYTHON.VERSION_SPEC }} + check-latest: false + - name: Setting up docker + run: | + docker build -t manylinuxwithcmake build_scripts/pypi/docker + docker run --name usdmanylinux --rm -id -v ./:/opt/USD -v /home/vsts/dist:/opt/USD-dist manylinuxwithcmake + - name: Building USD + run: | + # Terrible, terrible hack. The manylinux Docker image used to build the + # Python wheel does not include the corresponding Python shared library + # to link against. https://peps.python.org/pep-0513/#libpythonx-y-so-1 + # describes why this is so. However, the FindPython CMake module used + # by USD's build system requires that the library exists and will error + # out otherwise, even though we explicitly avoid linking against Python + # via the PXR_PY_UNDEFINED_DYNAMIC_LOOKUP flag. + # + # To work around this, we create a dummy file for the library using + # the same logic as build_usd.py to determine where the library should + # exist (see GetPythonInfo). FindPython will see that the library exists + # and allow the build to continue. The file is 100% bogus, but the + # PXR_PY_UNDEFINED_DYNAMIC_LOOKUP flag will ensure that we never try to + # link against this library anyway, so it doesn't matter. + docker exec usdmanylinux ${{ matrix.PYTHON.INTERPRETER }} -c "import pathlib,sysconfig; pathlib.Path(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')).touch()" + docker exec usdmanylinux ${{ matrix.PYTHON.INTERPRETER }} build_scripts/build_usd.py --build-args USD,"-DPXR_PY_UNDEFINED_DYNAMIC_LOOKUP=ON -DPXR_BUILD_USD_TOOLS=OFF -DPXR_INSTALL_LOCATION=../pxr/pluginfo" --no-materialx --no-imaging --no-examples --no-tutorials --build /opt/USD/gen/build --src /opt/USD/gen/src /opt/USD/inst -v + - name: Creating packaging directory + run: | + docker exec usdmanylinux mkdir ./packaging + docker exec usdmanylinux cp -R /opt/USD/inst ./packaging + docker exec usdmanylinux sh -c 'cp build_scripts/pypi/package_files/* ./packaging' + docker exec usdmanylinux sh -c 'cp LICENSE.txt ./packaging' + - name: Running setup.py + run: | + docker exec -w /opt/USD/packaging usdmanylinux ${{ matrix.PYTHON.INTERPRETER }} setup.py ${{ env.POST_RELEASE_TAG }} bdist_wheel --python-tag ${{ matrix.PYTHON.TAG }} + - name: Running auditwheel repair (moves .so files into package) + run: | + docker exec usdmanylinux /bin/bash -c 'PYTHONPATH=/opt/USD/packaging/pypi/lib/python LD_LIBRARY_PATH=/opt/USD/packaging/pypi/lib:$LD_LIBRARY_PATH auditwheel repair packaging/dist/*.whl' + - name: Updating pluginfo paths + run: | + WHEEL_PACKAGE_NAME=`docker exec usdmanylinux ls wheelhouse` + docker exec usdmanylinux ${{ matrix.PYTHON.INTERPRETER }} build_scripts/pypi/updatePluginfos.py "wheelhouse/$WHEEL_PACKAGE_NAME" "/opt/USD-dist/$WHEEL_PACKAGE_NAME" + - name: Stopping docker container + run: | + docker stop usdmanylinux + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: dist-linux-${{ matrix.PYTHON.TAG }} + path: /home/vsts/dist + macOS: + strategy: + matrix: + PYTHON: + - VERSION_SPEC: '3.7' + INTERPRETER: python3.7 + TAG: cp37 + - VERSION_SPEC: '3.8' + INTERPRETER: python3.8 + TAG: cp38 + - VERSION_SPEC: '3.9' + INTERPRETER: python3.9 + TAG: cp39 + - VERSION_SPEC: '3.10' + INTERPRETER: python3.10 + TAG: cp310 + - VERSION_SPEC: '3.11' + INTERPRETER: python3.11 + TAG: cp311 + runs-on: macos-12 + timeout-minutes: 120 + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.PYTHON.VERSION_SPEC }} + check-latest: false + - name: Install dependencies + run: | + ${{ matrix.PYTHON.INTERPRETER }} -m pip install --upgrade pip + ${{ matrix.PYTHON.INTERPRETER }} -m pip install delocate~=0.10.2 wheel + - name: Build USD + run: | + sudo xcode-select -s /Applications/Xcode_13.3.app/Contents/Developer + ${{ matrix.PYTHON.INTERPRETER }} build_scripts/build_usd.py --build-args USD,"-DPXR_PY_UNDEFINED_DYNAMIC_LOOKUP=ON -DPXR_BUILD_USD_TOOLS=OFF -DPXR_INSTALL_LOCATION=../pluginfo" --no-materialx --no-imaging --no-examples --no-tutorials --generator Xcode --build-target universal --build $GITHUB_WORKSPACE/USDgen/build --src $GITHUB_WORKSPACE/USDgen/src $GITHUB_WORKSPACE/USDinst -v + - name: Packaging USD + run: | + pwd + ls -la + mkdir ./packaging + mkdir ./packaging/inst + cp -R $GITHUB_WORKSPACE/USDinst/* ./packaging/inst + cp build_scripts/pypi/package_files/* ./packaging + cp LICENSE.txt ./packaging + ls -la ./packaging + ls -la ./packaging/inst + - name: Running setup.py + run: | + cd ./packaging + ${{ matrix.PYTHON.INTERPRETER }} setup.py ${{ env.POST_RELEASE_TAG }} bdist_wheel --python-tag ${{ matrix.PYTHON.TAG }} --plat-name macosx_10_9_universal2 + - name: Running delocate + run: | + # set DYLD_FALLBACK_LIBRARY_PATH for delocate-wheel to resolve libs + # https://github.com/pypa/cibuildwheel/issues/816 + export DYLD_FALLBACK_LIBRARY_PATH=`readlink -f USDinst/lib` + echo $DYLD_FALLBACK_LIBRARY_PATH + ls -la $DYLD_FALLBACK_LIBRARY_PATH + delocate-wheel -vv -w dist-delocated packaging/dist/* + ls -la packaging/dist + ls -la dist-delocated + - name: Updating pluginfo paths + run: | + WHEEL_PACKAGE_NAME=`ls ./packaging/dist` + mkdir -p ./dist + ls -la ./packaging/dist + ls -la ./dist + ${{ matrix.PYTHON.INTERPRETER }} build_scripts/pypi/updatePluginfos.py "./dist-delocated/$WHEEL_PACKAGE_NAME" "./dist/$WHEEL_PACKAGE_NAME" + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: dist-mac-${{ matrix.PYTHON.TAG }} + path: ./dist + Windows: + strategy: + matrix: + PYTHON: + - VERSION_SPEC: '3.7' + TAG: cp37 + - VERSION_SPEC: '3.8' + TAG: cp38 + - VERSION_SPEC: '3.9' + TAG: cp39 + - VERSION_SPEC: '3.10' + TAG: cp310 + - VERSION_SPEC: '3.11' + TAG: cp311 + runs-on: windows-2019 + timeout-minutes: 60 + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.PYTHON.VERSION_SPEC }} + check-latest: false + - name: Install dependencies + run: | + python -m pip install wheel + shell: cmd + - name: Build USD + run: | + call C:\"Program Files (x86)"\"Microsoft Visual Studio"\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat + set BOOST_ROOT= + python --version + python build_scripts/build_usd.py --build-args USD,"-DPXR_ENABLE_PRECOMPILED_HEADERS=OFF -DPXR_PY_UNDEFINED_DYNAMIC_LOOKUP=ON -DPXR_BUILD_USD_TOOLS=OFF -DPXR_INSTALL_LOCATION=../pxr/pluginfo" --no-materialx --no-imaging --no-examples --no-tutorials --build USDgen/build --src USDgen/src USDinst -v + shell: cmd + - name: Packaging USD + run: | + dir + mkdir D:\packaging + xcopy /E /I USDinst D:\packaging\inst + copy build_scripts\pypi\package_files\* D:\packaging + copy LICENSE.txt D:\packaging + dir D:\packaging + dir D:\packaging\inst + shell: cmd + - name: Running setup.py + run: | + D: + cd D:\packaging + python setup.py ${{ env.POST_RELEASE_TAG }} bdist_wheel --python-tag ${{ matrix.PYTHON.TAG }} --plat-name win_amd64 + dir + shell: cmd + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: dist-windows-${{ matrix.PYTHON.TAG }} + path: D:\packaging\dist + + CollectPackages: + if: ${{ always() }} + needs: [Linux, macOS, Windows] + timeout-minutes: 5 + runs-on: ubuntu-24.04 + steps: + - uses: actions/download-artifact@v4 + with: + path: dist-final + pattern: dist-*-* + merge-multiple: true + - name: Display structure of downloaded files + run: ls -R dist-final + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: dist + path: dist-final + + Test: + needs: [CollectPackages] + timeout-minutes: 5 + strategy: + matrix: + BUILD_CONFIG: + - NAME: Linux_Python37 + PYTHON_VERSION_SPEC: '3.7' + IMAGE: ubuntu-20.04 + PYTHON_INTERPRETER: python3 + - NAME: Linux_Python38 + PYTHON_VERSION_SPEC: '3.8' + IMAGE: ubuntu-20.04 + PYTHON_INTERPRETER: python3 + - NAME: Linux_Python39 + PYTHON_VERSION_SPEC: '3.9' + IMAGE: ubuntu-20.04 + PYTHON_INTERPRETER: python3 + - NAME: Linux_Python310 + PYTHON_VERSION_SPEC: '3.10' + IMAGE: ubuntu-20.04 + PYTHON_INTERPRETER: python3 + - NAME: Linux_Python311 + PYTHON_VERSION_SPEC: '3.11' + IMAGE: ubuntu-20.04 + PYTHON_INTERPRETER: python3 + - NAME: Mac_Python37 + PYTHON_VERSION_SPEC: '3.7' + IMAGE: macos-12 + PYTHON_INTERPRETER: python3 + - NAME: Mac_Python38 + PYTHON_VERSION_SPEC: '3.8' + IMAGE: macos-12 + PYTHON_INTERPRETER: python3 + - NAME: Mac_Python39 + PYTHON_VERSION_SPEC: '3.9' + IMAGE: macos-12 + PYTHON_INTERPRETER: python3 + - NAME: Mac_Python310 + PYTHON_VERSION_SPEC: '3.10' + IMAGE: macos-12 + PYTHON_INTERPRETER: python3 + - NAME: Mac_Python311 + PYTHON_VERSION_SPEC: '3.11' + IMAGE: macos-12 + PYTHON_INTERPRETER: python3 + - NAME: Windows_Python37 + PYTHON_VERSION_SPEC: '3.7' + IMAGE: windows-2019 + PYTHON_INTERPRETER: python3 + - NAME: Windows_Python38 + PYTHON_VERSION_SPEC: '3.8' + IMAGE: windows-2019 + PYTHON_INTERPRETER: python3 + - NAME: Windows_Python39 + PYTHON_VERSION_SPEC: '3.9' + IMAGE: windows-2019 + PYTHON_INTERPRETER: python3 + - NAME: Windows_Python310 + PYTHON_VERSION_SPEC: '3.10' + IMAGE: windows-2019 + PYTHON_INTERPRETER: python3 + - NAME: Windows_Python311 + PYTHON_VERSION_SPEC: '3.11' + IMAGE: windows-2019 + PYTHON_INTERPRETER: python3 + runs-on: ${{ matrix.BUILD_CONFIG.IMAGE }} + steps: + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.BUILD_CONFIG.PYTHON_VERSION_SPEC }} + check-latest: false + - name: Checkout code + uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + name: dist + merge-multiple: true + - name: Packaging USD + run: | + which python3 + ${{ matrix.BUILD_CONFIG.PYTHON_INTERPRETER }} --version + ${{ matrix.BUILD_CONFIG.PYTHON_INTERPRETER }} -m pip install pytest + ${{ matrix.BUILD_CONFIG.PYTHON_INTERPRETER }} -m pip install --no-index --find-links=file://${{ github.workspace }} usd-core + py.test --junitxml TEST-usdinstall-${{ matrix.BUILD_CONFIG.NAME }}.xml build_scripts/pypi/test.py + - name: Upload pytest test results + uses: actions/upload-artifact@v4 + with: + name: TEST-usdinstall-${{ matrix.BUILD_CONFIG.NAME }} + path: TEST-usdinstall-${{ matrix.BUILD_CONFIG.NAME }}.xml + # Use always() to always run this step to publish test results when there are test failures + if: ${{ always() }} \ No newline at end of file diff --git a/LICENSE.txt b/LICENSE.txt index a2cdda092e..06a5b356ee 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -508,6 +508,23 @@ Redistributions in binary form must reproduce the above copyright notice, this l THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +============================================================ +pbrt (sampling functions in hdEmbree/pxrPbrt/pbrUtils.h) +============================================================ + +Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. ============================================================ Draco @@ -850,3 +867,31 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ +boost +================================================================ + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index 19ad9ce782..1458206dde 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,8 @@ visit our [forum](https://groups.google.com/forum/#!forum/usd-interest). If you are experiencing undocumented problems with the software, please [file a bug](https://github.com/PixarAnimationStudios/OpenUSD/issues/new). +If you need to report a security issue with the software, please review the +[Security Policy](SECURITY.md). Supported Platforms ------------------- diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..6e1b0d8c2f --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +We appreciate your efforts to responsibly disclose your findings and will make +every effort to acknowledge your contributions. + +## Reporting a Vulnerability + +Please use the GitHub Security Advisory +["Report a Vulnerability" tab](https://github.com/PixarAnimationStudios/OpenUSD/security/advisories/new) +to report a security issue. Please do not report security vulnerabilities +through public issues, discussions, or change requests. + +The team will send a response indicating the next steps in handling your report. +After the initial reply to your report, the security team will keep you informed +of the progress towards a fix and may ask for additional information or guidance. + +Report security bugs in third-party plugins/applications to the team maintaining +the application. diff --git a/build_scripts/build_usd.py b/build_scripts/build_usd.py index f31eecd19f..aa1feab9a0 100644 --- a/build_scripts/build_usd.py +++ b/build_scripts/build_usd.py @@ -2578,6 +2578,7 @@ def _JoinVersion(v): OpenVDB support: {enableOpenVDB} OpenImageIO support: {buildOIIO} OpenColorIO support: {buildOCIO} + Embree support: {buildEmbree} PRMan support: {buildPrman} UsdImaging {buildUsdImaging} usdview: {buildUsdview} @@ -2641,6 +2642,7 @@ def FormatBuildArguments(buildArgs): enableOpenVDB=("On" if context.enableOpenVDB else "Off"), buildOIIO=("On" if context.buildOIIO else "Off"), buildOCIO=("On" if context.buildOCIO else "Off"), + buildEmbree=("On" if context.buildEmbree else "Off"), buildPrman=("On" if context.buildPrman else "Off"), buildUsdImaging=("On" if context.buildUsdImaging else "Off"), buildUsdview=("On" if context.buildUsdview else "Off"), diff --git a/cmake/defaults/CXXDefaults.cmake b/cmake/defaults/CXXDefaults.cmake index efcf54f099..c215d46a00 100644 --- a/cmake/defaults/CXXDefaults.cmake +++ b/cmake/defaults/CXXDefaults.cmake @@ -83,6 +83,12 @@ else() set(PXR_PYTHON_SUPPORT_ENABLED "0") endif() +if (PXR_USE_BOOST_PYTHON) + set(PXR_USE_INTERNAL_BOOST_PYTHON "0") +else() + set(PXR_USE_INTERNAL_BOOST_PYTHON "1") +endif() + # Set safety/performance configuration if (PXR_PREFER_SAFETY_OVER_SPEED) set(PXR_PREFER_SAFETY_OVER_SPEED "1") diff --git a/cmake/defaults/Options.cmake b/cmake/defaults/Options.cmake index 6c118a9479..739a8a7937 100644 --- a/cmake/defaults/Options.cmake +++ b/cmake/defaults/Options.cmake @@ -26,6 +26,7 @@ option(PXR_BUILD_PYTHON_DOCUMENTATION "Generate Python documentation" OFF) option(PXR_BUILD_HTML_DOCUMENTATION "Generate HTML documentation if PXR_BUILD_DOCUMENTATION is ON" ON) option(PXR_ENABLE_PYTHON_SUPPORT "Enable Python based components for USD" ON) option(PXR_USE_DEBUG_PYTHON "Build with debug python" OFF) +option(PXR_USE_BOOST_PYTHON "Use boost::python for Python bindings" ON) option(PXR_ENABLE_HDF5_SUPPORT "Enable HDF5 backend in the Alembic plugin for USD" OFF) option(PXR_ENABLE_OSL_SUPPORT "Enable OSL (OpenShadingLanguage) based components" OFF) option(PXR_ENABLE_PTEX_SUPPORT "Enable Ptex support" OFF) diff --git a/cmake/defaults/Packages.cmake b/cmake/defaults/Packages.cmake index fe60570b34..cf03d33b91 100644 --- a/cmake/defaults/Packages.cmake +++ b/cmake/defaults/Packages.cmake @@ -23,6 +23,10 @@ set(PXR_THREAD_LIBS "${CMAKE_THREAD_LIBS_INIT}") if(PXR_ENABLE_PYTHON_SUPPORT OR PXR_ENABLE_OPENVDB_SUPPORT) # Find Boost package before getting any boost specific components as we need to # disable boost-provided cmake config, based on the boost version found. + # + # XXX: + # Boost is currently required even when PXR_USE_BOOST_PYTHON is OFF, since + # pxr_boost::python still relies on header-only boost libraries. find_package(Boost REQUIRED) # Boost provided cmake files (introduced in boost version 1.70) result in @@ -95,31 +99,33 @@ if(PXR_ENABLE_PYTHON_SUPPORT) # USD builds only work with Python3 setup_python_package(Python3) - if(WIN32 AND PXR_USE_DEBUG_PYTHON) - set(Boost_USE_DEBUG_PYTHON ON) - endif() + if(PXR_USE_BOOST_PYTHON) + if(WIN32 AND PXR_USE_DEBUG_PYTHON) + set(Boost_USE_DEBUG_PYTHON ON) + endif() - # Manually specify VS2022, 2019, and 2017 as USD's supported compiler versions - if(WIN32) - set(Boost_COMPILER "-vc143;-vc142;-vc141") - endif() + # Manually specify VS2022, 2019, and 2017 as USD's supported compiler versions + if(WIN32) + set(Boost_COMPILER "-vc143;-vc142;-vc141") + endif() - # As of boost 1.67 the boost_python component name includes the - # associated Python version (e.g. python27, python36). - # XXX: After boost 1.73, boost provided config files should be able to - # work without specifying a python version! - # https://github.com/boostorg/boost_install/blob/master/BoostConfig.cmake - - # Find the component under the versioned name and then set the generic - # Boost_PYTHON_LIBRARY variable so that we don't have to duplicate this - # logic in each library's CMakeLists.txt. - set(python_version_nodot "${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}") - find_package(Boost - COMPONENTS - python${python_version_nodot} - REQUIRED - ) - set(Boost_PYTHON_LIBRARY "${Boost_PYTHON${python_version_nodot}_LIBRARY}") + # As of boost 1.67 the boost_python component name includes the + # associated Python version (e.g. python27, python36). + # XXX: After boost 1.73, boost provided config files should be able to + # work without specifying a python version! + # https://github.com/boostorg/boost_install/blob/master/BoostConfig.cmake + + # Find the component under the versioned name and then set the generic + # Boost_PYTHON_LIBRARY variable so that we don't have to duplicate this + # logic in each library's CMakeLists.txt. + set(python_version_nodot "${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}") + find_package(Boost + COMPONENTS + python${python_version_nodot} + REQUIRED + ) + set(Boost_PYTHON_LIBRARY "${Boost_PYTHON${python_version_nodot}_LIBRARY}") + endif() # --Jinja2 find_package(Jinja2) @@ -180,14 +186,6 @@ if (PXR_BUILD_DOCUMENTATION) endif() endif() -if (PXR_VALIDATE_GENERATED_CODE) - find_package(BISON 2.4.1 EXACT) - # Flex 2.5.39+ is required, generated API is generated incorrectly in - # 2.5.35, at least. scan_bytes generates with (..., int len, ...) instead of - # the correct (..., yy_size_t len, ...). Lower at your own peril. - find_package(FLEX 2.5.39 EXACT) -endif() - # Imaging Components Package Requirements # ---------------------------------------------- @@ -243,10 +241,7 @@ if (PXR_BUILD_IMAGING) endforeach() # Find the OS specific libs we need - if (APPLE) - find_library(MVK_LIBRARIES NAMES MoltenVK PATHS $ENV{VULKAN_SDK}/lib) - list(APPEND VULKAN_LIBS ${MVK_LIBRARIES}) - elseif (UNIX AND NOT APPLE) + if (UNIX AND NOT APPLE) find_package(X11 REQUIRED) list(APPEND VULKAN_LIBS ${X11_LIBRARIES}) elseif (WIN32) diff --git a/cmake/defaults/msvcdefaults.cmake b/cmake/defaults/msvcdefaults.cmake index 78bde7a77e..13d77d0ae7 100644 --- a/cmake/defaults/msvcdefaults.cmake +++ b/cmake/defaults/msvcdefaults.cmake @@ -87,10 +87,6 @@ _add_define("_SCL_SECURE_NO_WARNINGS") # will conflict with std::min() and std::max(). _add_define("NOMINMAX") -# Needed to prevent YY files trying to include unistd.h -# (which doesn't exist on Windows) -_add_define("YY_NO_UNISTD_H") - # Forces all libraries that have separate source to be linked as # DLL's rather than static libraries on Microsoft Windows, unless # explicitly told otherwise. diff --git a/cmake/macros/Private.cmake b/cmake/macros/Private.cmake index 48fe107dd7..775cf7e1c4 100644 --- a/cmake/macros/Private.cmake +++ b/cmake/macros/Private.cmake @@ -224,7 +224,7 @@ function(_install_python LIBRARY_NAME) add_custom_target(${LIBRARY_NAME}_pythonfiles DEPENDS ${files_copied} ) - add_dependencies(python ${LIBRARY_NAME}_pythonfiles) + add_dependencies(python_modules ${LIBRARY_NAME}_pythonfiles) _get_folder("_python" folder) set_target_properties(${LIBRARY_NAME}_pythonfiles @@ -254,6 +254,7 @@ function(_install_resource_files NAME pluginInstallPrefix pluginToLibraryPath) # A resource file may be specified like : to # indicate that it should be installed to a different location in # the resources area. Check if this is the case. + set(plugInfoNoSubstitution) string(REPLACE ":" ";" resourceFile "${resourceFile}") list(LENGTH resourceFile n) if (n EQUAL 1) @@ -330,7 +331,7 @@ function(_install_pyside_ui_files LIBRARY_NAME) add_custom_target(${LIBRARY_NAME}_pysideuifiles DEPENDS ${uiFiles} ) - add_dependencies(python ${LIBRARY_NAME}_pythonfiles) + add_dependencies(python_modules ${LIBRARY_NAME}_pythonfiles) _get_folder("_pysideuifiles" folder) set_target_properties( @@ -991,7 +992,7 @@ function(_pxr_python_module NAME) SHARED ${args_CPPFILES} ) - add_dependencies(python ${LIBRARY_NAME}) + add_dependencies(python_modules ${LIBRARY_NAME}) if(args_PYTHON_FILES) add_dependencies(${LIBRARY_NAME} ${LIBRARY_NAME}_pythonfiles) endif() diff --git a/cmake/macros/Public.cmake b/cmake/macros/Public.cmake index 9f3d358377..1b809e738d 100644 --- a/cmake/macros/Public.cmake +++ b/cmake/macros/Public.cmake @@ -76,7 +76,7 @@ function(pxr_python_bin BIN_NAME) ) # If we can't build Python modules then do nothing. - if(NOT TARGET python) + if(NOT TARGET python_modules) message(STATUS "Skipping Python program ${BIN_NAME}, Python modules required") return() endif() @@ -141,7 +141,7 @@ function(pxr_python_bin BIN_NAME) add_custom_target(${BIN_NAME}_script DEPENDS ${outputs} ${pb_DEPENDENCIES} ) - add_dependencies(python ${BIN_NAME}_script) + add_dependencies(python_modules ${BIN_NAME}_script) _get_folder("" folder) set_target_properties(${BIN_NAME}_script @@ -207,6 +207,7 @@ endfunction() function(pxr_library NAME) set(options DISABLE_PRECOMPILED_HEADERS + INCLUDE_SCHEMA_FILES ) set(oneValueArgs TYPE @@ -242,23 +243,90 @@ function(pxr_library NAME) # If python support is enabled, merge the python specific categories # with the more general before setting up compilation. if(PXR_ENABLE_PYTHON_SUPPORT) + set(libraryRequiresPython 0) if(args_PYTHON_PUBLIC_CLASSES) list(APPEND args_PUBLIC_CLASSES ${args_PYTHON_PUBLIC_CLASSES}) + set(libraryRequiresPython 1) endif() if(args_PYTHON_PUBLIC_HEADERS) list(APPEND args_PUBLIC_HEADERS ${args_PYTHON_PUBLIC_HEADERS}) + set(libraryRequiresPython 1) endif() if(args_PYTHON_PRIVATE_CLASSES) list(APPEND args_PRIVATE_CLASSES ${args_PYTHON_PRIVATE_CLASSES}) + set(libraryRequiresPython 1) endif() if(args_PYTHON_PRIVATE_HEADERS) list(APPEND args_PRIVATE_HEADERS ${args_PYTHON_PRIVATE_HEADERS}) + set(libraryRequiresPython 1) endif() if(args_PYTHON_CPPFILES) list(APPEND args_CPPFILES ${args_PYTHON_CPPFILES}) + set(libraryRequiresPython 1) + endif() + + if(libraryRequiresPython) + list(APPEND args_LIBRARIES ${PYTHON_LIBRARIES} python) + list(APPEND args_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) endif() endif() + # If this is a schema library, add schema classes + if (args_INCLUDE_SCHEMA_FILES) + set(filePath "generatedSchema.classes.txt") + + # Register a dependency so that cmake will regenerate the build + # system if generatedSchema.classes.txt changes + set_property( + DIRECTORY + APPEND + PROPERTY CMAKE_CONFIGURE_DEPENDS + ${filePath} + ) + + # Read the generated classes + file(STRINGS ${filePath} fileContents) + + # fileType potential values: + # -1: Skip line + # 0: Public Classes + # 1: Python Module Files + # 2: Resource Files + set(fileType -1) + + foreach(line ${fileContents}) + # Determine which section of the generated file we are in. + if (${fileType} EQUAL -1) + string(FIND ${line} "# Public Classes" found) + if (NOT ${found} EQUAL -1) + set(fileType 0) + continue() + endif() + elseif(${fileType} EQUAL 0) + string(FIND ${line} "# Python Module Files" found) + if (NOT ${found} EQUAL -1) + set(fileType 1) + continue() + endif() + elseif(${fileType} EQUAL 1) + string(FIND ${line} "# Resource Files" found) + if (NOT ${found} EQUAL -1) + set(fileType 2) + continue() + endif() + endif() + + # Depending on the file type, append to the appropriate list. + if (${fileType} EQUAL 0) + list(APPEND args_PUBLIC_CLASSES ${line}) + elseif(${fileType} EQUAL 1) + list(APPEND args_PYMODULE_CPPFILES ${line}) + elseif(${fileType} EQUAL 2) + list(APPEND args_RESOURCE_FILES ${line}) + endif() + endforeach() + endif() + # Collect libraries. if(NOT args_TYPE STREQUAL "PLUGIN") get_property(help CACHE PXR_ALL_LIBS PROPERTY HELPSTRING) @@ -336,13 +404,15 @@ function(pxr_library NAME) ) if(PXR_ENABLE_PYTHON_SUPPORT AND (args_PYMODULE_CPPFILES OR args_PYMODULE_FILES OR args_PYSIDE_UI_FILES)) + list(APPEND pythonModuleIncludeDirs ${PYTHON_INCLUDE_DIRS}) + _pxr_python_module( ${NAME} WRAPPED_LIB_INSTALL_PREFIX "${libInstallPrefix}" PYTHON_FILES ${args_PYMODULE_FILES} PYSIDE_UI_FILES ${args_PYSIDE_UI_FILES} CPPFILES ${args_PYMODULE_CPPFILES} - INCLUDE_DIRS ${args_INCLUDE_DIRS} + INCLUDE_DIRS "${args_INCLUDE_DIRS};${pythonModuleIncludeDirs}" PRECOMPILED_HEADERS ${pch} PRECOMPILED_HEADER_NAME ${args_PRECOMPILED_HEADER_NAME} ) @@ -388,7 +458,7 @@ endfunction() # pxr_setup_python function (pxr_create_test_module MODULE_NAME) # If we can't build Python modules then do nothing. - if(NOT TARGET python) + if(NOT TARGET python_modules) return() endif() @@ -573,7 +643,7 @@ endfunction() # pxr_build_test function(pxr_test_scripts) # If we can't build Python modules then do nothing. - if(NOT TARGET python) + if(NOT TARGET python_modules) return() endif() @@ -666,7 +736,7 @@ function(pxr_register_test TEST_NAME) endif() endif() - if(NOT TARGET python) + if(NOT TARGET python_modules) # Implicit requirement. Python modules require shared USD # libraries. If the test runs python it's certainly going # to load USD modules. If the test uses C++ to load USD @@ -1030,9 +1100,9 @@ function(pxr_toplevel_prologue) endif() # Create a target for targets that require Python. Each should add - # itself as a dependency to the "python" target. + # itself as a dependency to the "python_modules" target. if(TARGET shared_libs AND PXR_ENABLE_PYTHON_SUPPORT) - add_custom_target(python ALL) + add_custom_target(python_modules ALL) endif() endfunction() # pxr_toplevel_prologue @@ -1317,4 +1387,22 @@ function(pxr_docs_only_dir NAME) ${args_DOXYGEN_FILES} ) endif() -endfunction() # pxr_docs_only_dir \ No newline at end of file +endfunction() # pxr_docs_only_dir + +# Sets rpaths for the specified TARGET to the given RPATHS. The target's +# runtime destination directory is given by ORIGIN. If ORIGIN is not +# absolute it is assumed to be relative to CMAKE_INSTALL_PREFIX. +function(pxr_set_rpaths_for_target TARGET) + set(oneValueArgs ORIGIN) + set(multiValueArgs RPATHS) + cmake_parse_arguments(args "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + _pxr_init_rpath(rpath ${args_ORIGIN}) + + foreach(path IN LISTS args_RPATHS) + _pxr_add_rpath(rpath ${path}) + endforeach() + + _pxr_install_rpath(rpath ${TARGET}) + +endfunction() # pxr_set_rpaths_for_target diff --git a/docs/contributing_to_usd.rst b/docs/contributing_to_usd.rst index 98bf013555..5d96be6114 100644 --- a/docs/contributing_to_usd.rst +++ b/docs/contributing_to_usd.rst @@ -7,6 +7,9 @@ Contributing to USD **We're excited to collaborate with the community and look forward to the many improvements you can make to USD!** +.. _contributor_license_agreement: + +***************************** Contributor License Agreement ***************************** @@ -24,47 +27,68 @@ CLAs: : please sign this one if you're an individual contributor Once your CLA is signed, send it to `usd-cla@pixar.com -`_ (please make sure to include your github username) -and wait for confirmation that we've received it. After that, you can submit -pull requests. +`__. **Please make sure to include your GitHub username** +so we can grant your GitHub account appropriate permissions to the OpenUSD repo. + +You can start to make code contributions once you’ve received confirmation that +we've received your CLA. If you are planning on making a major change (for +example, adding a new feature, or making a code change that modifies dozens of +lines of code across several different files), see +:ref:`planning_major_changes` for recommended next steps. Otherwise, for +smaller changes, such as bugfixes, you can submit GitHub pull requests for +consideration, using the +:ref:`pull request guidelines`. +.. _coding_conventions: + +****************** Coding Conventions ****************** -Please follow the coding convention and style in each file and in each library -when adding new files. +Please review the coding conventions described in the +`Coding Guidelines `__ and +`Testing Guidelines `__ and follow the +coding conventions and styles in each file and library when making changes. + +.. _pull_request_guidelines: +*********************** Pull Request Guidelines *********************** - * All development on USD should happen against the "**dev**" branch of the + * All development should happen against the "**dev**" branch of the repository. Please make sure the base branch of your pull request is set to the "**dev**" branch when filing your pull request. - * Please make sure all tests are passing with your change prior to - submitting a pull request. Keep in mind the current github CI pipeline - does not run any tests, however tests will be run when reviewing your - submitted change for consideration. + * Please make pull requests that are small and atomic. In general, it is + easier for us to merge pull requests that serve a single + purpose than those that combine several functional pieces. * Please make sure that your pull requests are clean. Use the rebase and squash git facilities as needed to ensure that the pull request is as clean as possible. - * Please make pull requests that are small and atomic. In general, it is - easier for us to merge pull requests that serve a single - purpose than those that combine several functional pieces. + * Please make sure all tests are passing with your change prior to + submitting a pull request. Keep in mind the current GitHub CI pipeline + does not run any tests, however tests will be run when reviewing your + submitted change for consideration. + + * Please search through + `existing open GitHub issues `__ + and associate your PR with issues that your change addresses. If there are + no issues related to your change, you do not need to create a new issue. + However, if your change requires multiple pull requests, it can be helpful + to create a single issue to link together and organize related PRs. - * Please search through existing open github issues and associate your PR - with issues that your change addresses, as described in :ref:`github_issues`. - If there are no issues related to your change, you do not need to create - a new issue. +.. _git_workflow: +************ Git Workflow ************ -Here is the workflow we recommend for contributing changes to USD: +Here is the workflow we recommend for contributing changes to OpenUSD: - #. Use the github website to fork your own private repository. + #. Use the GitHub website to fork your own private repository. .. | space | @@ -76,7 +100,7 @@ Here is the workflow we recommend for contributing changes to USD: - #. Add Pixar's USD repo as upstream to make it easier to update your remote + #. Add Pixar's OpenUSD repo as upstream to make it easier to update your remote and local repos with the latest changes: .. code-block:: sh @@ -86,7 +110,7 @@ Here is the workflow we recommend for contributing changes to USD: - #. Now fetch the latest changes from Pixar's USD repo like this: + #. Now fetch the latest changes from Pixar's OpenUSD repo like this: .. code-block:: sh @@ -105,7 +129,7 @@ Here is the workflow we recommend for contributing changes to USD: - #. Now you can work in your branch locally. Please review the USD + #. Now you can work in your branch locally. Please review the `Coding Guidelines `__ and `Testing Guidelines `__ when making code changes. @@ -131,25 +155,135 @@ Here is the workflow we recommend for contributing changes to USD: #. Now your remote branch will have your dev_mybugfix branch, which you can - now pull request (to USD's dev branch) using the github UI. + now pull request (to OpenUSD's dev branch) using the GitHub UI. When your pull request is merged, it will be available in the next dev and full -release. For USD release schedules, see :ref:`release_schedule` +release. For OpenUSD release schedules, see :ref:`release_schedule`. .. _github_issues: -Github Issues -############# - -Use github issues to report problems or suggestions that need discussion, or -that you might not be able to address yourself. - -You do not need to log an issue for contributing changes, or if your change -fixes an unreported issue. However, if your change requires multiple pull -requests, a single issue can be created and referenced from those PRs to -organize them. - -Before sending your change in for consideration, search through the list of -`open issues on github `_ -and check if your change addresses any issue. If so, associate your pull -request with that issue. +GitHub Issues +============= + +Use GitHub issues to report problems or suggestions that need discussion, or +that you might not be able to address yourself. Please check that your issue +does not already exist in the list of +`open issues on GitHub `__ +before submitting to avoid duplicates. + +When new issues are filed in GitHub, Pixar makes a copy in our internal issue +tracker. When "Filed as internal issue USD-XXXX" is added to an issue, this is +an automated acknowledgment that the issue has been captured in our tracker and +will be triaged for review. It does not mean work has started on the issue yet. +Some GitHub issues may be tagged with labels following triage to invite +contributions from the community. See the definitions of each label in GitHub +`here `__. + +.. _planning_major_changes: + +******************** +Making Major Changes +******************** + +Please communicate your intent to make major changes with Pixar before starting +work, to ensure your changes align with the OpenUSD strategy and reduce rework +later. Below is the recommended workflow, with each step described in more detail +afterwards. + +.. image:: contributing_workflow_diagram.svg + +Step 1. Get consensus for major changes +======================================= + +If you would like to propose a major change that is not an architectural change, +please give us a heads up by finding and commenting on the existing +:ref:`GitHub issue ` that represents the problem, or creating a +new one if an appropriate issue doesn't already exist. Please briefly explain +your high-level approach so the community and Pixar engineers can comment if +there is already related work in progress or if the approach raises any concerns. + +If you are proposing architectural changes such as schema changes/additions, +major C++ API changes/additions, or new build dependencies for OpenUSD, we +recommend writing and posting a proposal to build consensus with the broader +OpenUSD community. Proposals should be posted in the +`OpenUSD-proposals GitHub repo `__. +See the `repo README `__ +for more details on the OpenUSD proposals process. + +Proposals are often brought up and discussed in +`ASWF USD Working Group meetings `__, +which are typically held on Zoom every other Wednesday at 1pm PST. + +Proposals are ready to move forward when they've moved to the +`Published `__ +phase. + +Step 2. Make code changes +========================= + +Make code changes using the :ref:`git workflow ` described +above, following the `Coding Guidelines `__. + +Don't forget to +`add API and user documentation `__ +as needed. + +Where possible, break up your changes into smaller commits that are +functionally complete. This makes it easier for Pixar to review and, if +necessary, to troubleshoot any regressions, when you submit your changes for +review (Step 4 below). See +`Make Small Atomic Changes `__ +for some approaches on splitting changes into separate commits. + +Step 3. Test code changes +========================= + +Test your code changes, following the +`Testing Guidelines `__ as needed. +With major changes, make sure to extend existing test coverage or provide new +tests for any added functionality. + +Step 4. Submit code for review +============================== + +Once your code is ready for review, submit a GitHub PR using the +:ref:`pull request guidelines `. + +For your PR description, begin with a brief summary of the change in 50 +characters or less, followed by a blank line and then a more detailed +description of the change. Some questions to consider when drafting your +description: + +* How did this behave prior to your change? +* How was that behavior problematic? +* How does your change modify the behavior? +* What are the benefits of the modified behavior? +* Are there direct impacts to users? +* Are there follow up changes or asset changes required? + +Be clear and concise. Include related PRs and issue identifiers, if +applicable. **Please keep PR descriptions up-to-date with any code iterations.** + +Pixar will do a code review of your pull request. Use GitHub PR review comments +to discuss and review suggestions and questions that come up during the code +review. Any code review comments will need to be either addressed or further +discussed before the change can be merged. If reviewers use +`GitHub suggested changes `__, +you can use the web UI to automatically apply those changes if you agree with +the suggestion. + +Code submitted for review is expected to be buildable and testable. There may be +exceptions if you're actively engaged in discussions with a Pixar engineer about +specific parts of code. + +Step 5. Pixar will test and land your changes +============================================= + +GitHub PRs are not landed directly into the GitHub dev branch, but rather into +Pixar's internal development tree. We do this to facilitate the automated +correctness and performance testing using production assets prior to merging the +change. The open source branch is then extracted from Pixar's internal +development tree and pushed to the OpenUSD GitHub **dev** branch on a +:ref:`regular cadence `. Once your PR has been incorporated +internally and the OpenUSD repo dev branch has been updated, Pixar will +automatically close your PR. \ No newline at end of file diff --git a/docs/contributing_workflow_diagram.svg b/docs/contributing_workflow_diagram.svg new file mode 100644 index 0000000000..215db5c680 --- /dev/null +++ b/docs/contributing_workflow_diagram.svg @@ -0,0 +1,4 @@ + + + +
Start
Start
Yes
Yes
No
No
Consensus on 
proposed work?
Consensus on...
Continue discussing
Continue discus...
Yes
Yes
No
No

Did the tests pass?

Did the tests pass...
Consult as needed
Consult...
Test code
Test code
Write code & draft documentation
Write code & draft d...
Submit for code review
Submit for code re...
No
No
Yes
Yes
Are there comments to address?
Are there comment...
Pixar runs internal tests
Pixar runs interna...
Yes
Yes
No
No
Code lands in GitHub dev branch
Code lands in GitH...
Finish
Finish
1
1
2
2
3
3
4
4
5
5

Did the tests pass?

Did the tests pass...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dl_downloads.rst b/docs/dl_downloads.rst index 26ce384d47..948b0470d6 100644 --- a/docs/dl_downloads.rst +++ b/docs/dl_downloads.rst @@ -4,6 +4,11 @@ Downloads and Videos ==================== +SIGGRAPH 2024 Notes +=================== + +| `SIGGRAPH 2024 USD, Hydra, and OpenSubdiv Birds of a Feather Notes `__ + SIGGRAPH 2023 Notes =================== diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in index d8c444a718..26767ecd16 100644 --- a/docs/doxygen/Doxyfile.in +++ b/docs/doxygen/Doxyfile.in @@ -1,4 +1,4 @@ -# Doxyfile 1.8.6 +# Doxyfile 1.9.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -12,6 +12,16 @@ # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). +# +# Note: +# +# Use doxygen to compare the used configuration file with the template +# configuration file: +# doxygen -x [configFile] +# Use doxygen to compare the used configuration file with the template +# configuration file without replacing the environment variables or CMake type +# replacement variables: +# doxygen -x_noenv [configFile] #--------------------------------------------------------------------------- # Project related configuration options @@ -71,6 +81,25 @@ OUTPUT_DIRECTORY = CREATE_SUBDIRS = NO +# Controls the number of sub-directories that will be created when +# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every +# level increment doubles the number of directories, resulting in 4096 +# directories at level 8 which is the default and also the maximum value. The +# sub-directories are organized in 2 levels, the first level always has a fixed +# number of 16 directories. +# Minimum value: 0, maximum value: 8, default value: 8. +# This tag requires that the tag CREATE_SUBDIRS is set to YES. + +CREATE_SUBDIRS_LEVEL = 8 + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. @@ -172,6 +201,16 @@ SHORT_NAMES = NO JAVADOC_AUTOBRIEF = YES +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus @@ -192,6 +231,14 @@ QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. @@ -227,12 +274,6 @@ ALIASES = "scriptableClass=\todo" \ "scriptableEnum=" \ "factoryFunc=" -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all @@ -261,6 +302,14 @@ OPTIMIZE_FOR_FORTRAN = NO OPTIMIZE_OUTPUT_VHDL = NO +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it @@ -287,6 +336,15 @@ EXTENSION_MAPPING = h=C++ MARKDOWN_SUPPORT = YES +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by by putting a % sign in front of the word @@ -337,14 +395,21 @@ IDL_PROPERTY_SUPPORT = YES DISTRIBUTE_GROUP_DOC = NO +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. -SUBGROUPING = YES +SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) @@ -391,6 +456,19 @@ TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 3 +# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- @@ -411,6 +489,12 @@ EXTRACT_ALL = NO EXTRACT_PRIVATE = NO +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. # The default value is: NO. @@ -448,6 +532,13 @@ EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation @@ -461,15 +552,15 @@ HIDE_UNDOC_MEMBERS = NO # to NO these classes will be included in the various overviews. This option has # no effect if EXTRACT_ALL is enabled. # The default value is: NO. -HIDE_UNDOC_CLASSES = YES +HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO these declarations will be # included in the documentation. # The default value is: NO. -HIDE_FRIEND_COMPOUNDS = NO +HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO these @@ -501,6 +592,19 @@ CASE_SENSE_NAMES = NO HIDE_SCOPE_NAMES = YES +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. @@ -714,6 +818,13 @@ WARN_IF_UNDOCUMENTED = NO WARN_IF_DOC_ERROR = NO +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = NO + # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO doxygen will only warn about wrong or incomplete parameter @@ -722,6 +833,23 @@ WARN_IF_DOC_ERROR = NO WARN_NO_PARAMDOC = NO +# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, doxygen will warn about +# undocumented enumeration values. If set to NO, doxygen will accept +# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: NO. + +WARN_IF_UNDOC_ENUM_VAL = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated @@ -732,6 +860,16 @@ WARN_NO_PARAMDOC = NO WARN_FORMAT = "$file:$line: $text" +# In the $text part of the WARN_FORMAT command it is possible that a reference +# to a more specific place is given. To make it easier to jump to this place +# (outside of doxygen) the user can define a custom "cut" / "paste" string. +# Example: +# WARN_LINE_FORMAT = "'vi $file +$line'" +# See also: WARN_FORMAT +# The default value is: at line $line of file $file. + +WARN_LINE_FORMAT = "at line $line of file $file" + # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). @@ -774,6 +912,16 @@ INPUT = docs/doxygen/externalOverview.dox \ INPUT_ENCODING = UTF-8 +# This tag can be used to specify the character encoding of the source files +# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify +# character encoding on a per file pattern basis. Doxygen will compare the file +# name with each pattern and apply the encoding instead of the default +# INPUT_ENCODING) if there is a match. The character encodings are a list of the +# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding +# "INPUT_ENCODING" for further information on supported encodings. + +INPUT_FILE_ENCODING = + # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank the @@ -1018,6 +1166,46 @@ USE_HTAGS = NO VERBATIM_HEADERS = YES +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: +# http://clang.llvm.org/) for more accurate parsing at the cost of reduced +# performance. This can be particularly helpful with template rich C++ code for +# which doxygen's built-in parser lacks the necessary type information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If the CLANG_ASSISTED_PARSING tag is set to YES and the CLANG_ADD_INC_PATHS +# tag is set to YES then doxygen will add the directory of each input to the +# include path. +# The default value is: YES. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_ADD_INC_PATHS = YES + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +# If clang assisted parsing is enabled you can provide the clang parser with the +# path to the directory containing a file called compile_commands.json. This +# file is the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the +# options used when the source files were built. This is equivalent to +# specifying the -p option to a clang tool, such as clang-check. These options +# will then be passed to the parser. Any options specified with CLANG_OPTIONS +# will be added as well. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. + +CLANG_DATABASE_PATH = + #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- @@ -1029,13 +1217,6 @@ VERBATIM_HEADERS = YES ALPHABETICAL_INDEX = YES -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored @@ -1118,7 +1299,7 @@ HTML_STYLESHEET = # Doxygen will copy the style sheet file to the output directory. For an example # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -# !!NOTE: We override HTML_EXTRA_STYLESHEET in the doxygen awesome settings below +# NOTE: We override HTML_EXTRA_STYLESHEET in the Doxygen Awesome settings below # HTML_EXTRA_STYLESHEET = docs/doxygen/usd_style.css @@ -1129,10 +1310,23 @@ HTML_STYLESHEET = # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. -# NOTE: See also Doxygen Awesome additions below +# NOTE: We also override HTML_EXTRA_FILES in Doxygen Awesome additions below HTML_EXTRA_FILES = +# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output +# should be rendered with a dark or light theme. +# Possible values are: LIGHT always generate light mode output, DARK always +# generate dark mode output, AUTO_LIGHT automatically set the mode according to +# the user preference, use light mode if no preference is set (the default), +# AUTO_DARK automatically set the mode according to the user preference, use +# dark mode if no preference is set and TOGGLE allow to user to switch between +# light and dark mode via a button. +# The default value is: AUTO_LIGHT. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE = LIGHT + # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the stylesheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see @@ -1171,6 +1365,17 @@ HTML_COLORSTYLE_GAMMA = 80 HTML_TIMESTAMP = YES +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. @@ -1214,6 +1419,13 @@ GENERATE_DOCSET = NO DOCSET_FEEDNAME = "Doxygen generated docs" +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. @@ -1234,7 +1446,7 @@ DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. -DOCSET_PUBLISHER_NAME = Pixar Animation Studios +DOCSET_PUBLISHER_NAME = "Pixar Animation Studios" # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The @@ -1407,6 +1619,18 @@ DISABLE_INDEX = NO GENERATE_TREEVIEW = YES +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # @@ -1431,6 +1655,24 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML @@ -1440,16 +1682,11 @@ EXT_LINKS_IN_WINDOW = NO FORMULA_FONTSIZE = 10 -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. -FORMULA_TRANSPARENT = YES +FORMULA_MACROFILE = # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering @@ -1462,6 +1699,17 @@ FORMULA_TRANSPARENT = YES USE_MATHJAX = NO +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. @@ -1713,16 +1961,6 @@ LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO -# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source -# code with syntax highlighting in the LaTeX output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_SOURCE_CODE = NO - # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # http://en.wikipedia.org/wiki/BibTeX and \cite for more info. @@ -1851,6 +2089,13 @@ XML_OUTPUT = docs/doxy_xml XML_PROGRAMLISTING = YES +# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include +# namespace members in file scope as well, matching the HTML output. +# The default value is: NO. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_NS_MEMB_FILE_SCOPE = NO + #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- @@ -2047,34 +2292,10 @@ EXTERNAL_GROUPS = NO EXTERNAL_PAGES = NO -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- -# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram -# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to -# NO turns the diagrams off. Note that this option also works with HAVE_DOT -# disabled, but it is recommended to install and use dot, since it yields more -# powerful graphs. -# The default value is: YES. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. @@ -2107,22 +2328,35 @@ HAVE_DOT = @DOXYGEN_GENERATE_HTML@ DOT_NUM_THREADS = 0 -# When you want a differently looking font n the dot files that doxygen -# generates you can specify the font name using DOT_FONTNAME. You need to make -# sure dot is able to find the font, which can be done by putting it in a -# standard location or by setting the DOTFONTPATH environment variable or by -# setting DOT_FONTPATH to the directory containing the font. -# The default value is: Helvetica. +# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of +# subgraphs. When you want a differently looking font in the dot files that +# doxygen generates you can specify fontname, fontcolor and fontsize attributes. +# For details please see Node, +# Edge and Graph Attributes specification You need to make sure dot is able +# to find the font, which can be done by putting it in a standard location or by +# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. Default graphviz fontsize is 14. +# The default value is: fontname=Helvetica,fontsize=10. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_FONTNAME = Helvetica +DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10" -# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of -# dot graphs. -# Minimum value: 4, maximum value: 24, default value: 10. +# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can +# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. Complete documentation about +# arrows shapes. +# The default value is: labelfontname=Helvetica,labelfontsize=10. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_FONTSIZE = 10 +DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10" + +# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes +# around nodes set 'shape=plain' or 'shape=plaintext' Shapes specification +# The default value is: shape=box,height=0.2,width=0.4. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4" # By default doxygen will tell dot to use the default font as specified with # DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set @@ -2176,6 +2410,28 @@ UML_LOOK = NO UML_LIMIT_NUM_FIELDS = 10 +# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and +# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS +# tag is set to YES, doxygen will add type and arguments for attributes and +# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen +# will not generate fields with class member information in the UML graphs. The +# class diagrams will look similar to the default class diagrams but using UML +# notation for the relationships. +# Possible values are: NO, YES and NONE. +# The default value is: NO. +# This tag requires that the tag UML_LOOK is set to YES. + +DOT_UML_DETAILS = NO + +# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters +# to display on a single line. If the actual line length exceeds this threshold +# significantly it will wrapped across multiple lines. Some heuristics are apply +# to avoid ugly line breaks. +# Minimum value: 0, maximum value: 1000, default value: 17. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_WRAP_THRESHOLD = 17 + # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. @@ -2240,6 +2496,13 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES +# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels +# of child directories generated in directory dependency graphs by dot. +# Minimum value: 1, maximum value: 25, default value: 1. +# This tag requires that the tag DIRECTORY_GRAPH is set to YES. + +DIR_GRAPH_MAX_DEPTH = 1 + # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order @@ -2288,6 +2551,24 @@ MSCFILE_DIRS = DIAFILE_DIRS = +# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the +# path where java can find the plantuml.jar file or to the filename of jar file +# to be used. If left blank, it is assumed PlantUML is not used or called during +# a preprocessing step. Doxygen will generate a warning when it encounters a +# \startuml command in this case and will not generate output for the diagram. + +PLANTUML_JAR_PATH = + +# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a +# configuration file for plantuml. + +PLANTUML_CFG_FILE = + +# When using plantuml, the specified paths are searched for files specified by +# the !include statement in a plantuml block. + +PLANTUML_INCLUDE_PATH = + # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes # that will be shown in the graph. If the number of nodes in a graph becomes # larger than this value, doxygen will truncate the graph, which is visualized @@ -2312,18 +2593,6 @@ DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not seem -# to support this out of the box. -# -# Warning: Depending on the platform used, enabling this option may lead to -# badly anti-aliased labels on the edges of a graph (i.e. they become hard to -# read). -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_TRANSPARENT = YES - # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support @@ -2348,15 +2617,10 @@ GENERATE_LEGEND = YES DOT_CLEANUP = YES -#CLANG_ASSISTED_PARSING = YES - # Setup for Doxygen Awesome CSS -FULL_SIDEBAR = NO HTML_EXTRA_STYLESHEET = docs/doxygen/doxygen-awesome-css/doxygen-awesome.css \ docs/doxygen/usd_style.css HTML_EXTRA_FILES = docs/doxygen/doxygen-awesome-css/doxygen-awesome-darkmode-toggle.js \ docs/doxygen/doxygen-awesome-css/doxygen-awesome-fragment-copy-button.js \ docs/doxygen/doxygen-awesome-css/doxygen-awesome-paragraph-link.js -# HTML_COLORSTYLE is a Doxygen 1.9.x config required for Doxygen Awesome -HTML_COLORSTYLE = LIGHT \ No newline at end of file diff --git a/docs/spec_usdz.rst b/docs/spec_usdz.rst index b7603d0322..1d2ef2f6fc 100644 --- a/docs/spec_usdz.rst +++ b/docs/spec_usdz.rst @@ -60,15 +60,15 @@ Usdz Specification A usdz package is an uncompressed zip archive that is allowed to contain the following file types: - +-------------+--------------------------------+ - | Kind | Allowed File Types | - +=============+================================+ - | USD | **usda**, **usdc**, **usd** | - +-------------+--------------------------------+ - | Image | **png**, **jpeg**, **exr** | - +-------------+--------------------------------+ - | Audio | **M4A**, **MP3**, **WAV** | - +-------------+--------------------------------+ + +-------------+----------------------------------------+ + | Kind | Allowed File Types | + +=============+========================================+ + | USD | **usda**, **usdc**, **usd** | + +-------------+----------------------------------------+ + | Image | **png**, **jpeg**, **exr**, **avif** | + +-------------+----------------------------------------+ + | Audio | **M4A**, **MP3**, **WAV** | + +-------------+----------------------------------------+ The rest of the section goes into more detail about the specification. @@ -211,8 +211,8 @@ currently. Allowable file types are currently: OS updates) * **png**, **jpeg** (any of the multiple common extensions for - jpeg), and **OpenEXR** files for images/textures. See - :ref:`Working With Image File Formats` for more + jpeg), **OpenEXR** and **AV1 Image (AVIF)** files for images/textures. + See :ref:`Working With Image File Formats` for more details on supported image file formats. * **M4A, MP3, WAV** files for embedded audio (given in order of preferred diff --git a/docs/tut_helloworld.rst b/docs/tut_helloworld.rst index aba9a314b5..2576a1e0c3 100644 --- a/docs/tut_helloworld.rst +++ b/docs/tut_helloworld.rst @@ -45,7 +45,7 @@ Use :ref:`toolset:usdview` to visualize and inspect the stage. .. image:: http://openusd.org/images/tut_helloworld_sphere_1.png -#. You can refine the geometry with the :menuselection:`View --> Complexity` +#. You can refine the geometry with the :menuselection:`Display --> Complexity` menu item or use the hotkeys :kbd:`Ctrl-+` and :kbd:`Ctrl--` to increase or decrease the refinement. diff --git a/docs/tut_simple_shading.rst b/docs/tut_simple_shading.rst index 37d68c2555..3f49e7f800 100644 --- a/docs/tut_simple_shading.rst +++ b/docs/tut_simple_shading.rst @@ -33,7 +33,8 @@ text :filename:`.usda` outputs. The relevant scripts and data files reside in the USD distribution in :filename:`USD/extras/usd/tutorials/simpleShading`. Run :filename:`generate_simpleShading.py` in that directory to generate all of the -snippets for each step shown below. +snippets for each step shown below, or examine the pre-generated +`simpleShading.usda `__ file. Making a Model ============== diff --git a/docs/tut_usdview_plugin.rst b/docs/tut_usdview_plugin.rst index e552738b10..8b62eeeaf7 100644 --- a/docs/tut_usdview_plugin.rst +++ b/docs/tut_usdview_plugin.rst @@ -135,7 +135,7 @@ the Interpreter window in :program:`usdview` * :python:`viewSettings` - A collection of settings which only affect the viewport. Most of these settings are normally controlled using - usdview's 'View' menu. Some examples are listed below. + usdview's 'Display' menu. Some examples are listed below. * :python:`complexity` - The scene's subdivision complexity. diff --git a/docs/usd_products.rst b/docs/usd_products.rst index 7d6856c024..cfd6ed44f9 100644 --- a/docs/usd_products.rst +++ b/docs/usd_products.rst @@ -111,13 +111,6 @@ Motion -------- -AnimVR -====== - -`AnimVR `_ is a virtual reality animation tool. - --------- - ArcGIS CityEngine ================= @@ -286,14 +279,6 @@ Intel OSPRay `Intel's OSPRay renderer `_ is a path-traced renderer with an `open source Hydra delegate `_. - --------- - -Isotropix Clarisse -================== - -`Clarisse `_ is a 3D application for look development, lighting and rendering. - -------- Dreamworks Moonray @@ -352,8 +337,7 @@ ZBrush `ZBrush `_ is a 3D sculpting and painting application. -`ZBrush USD documenation `_ - +`ZBrush USD documenation `_ -------- diff --git a/docs/usdfaq.rst b/docs/usdfaq.rst index 0bde721d62..0dcd79a07c 100644 --- a/docs/usdfaq.rst +++ b/docs/usdfaq.rst @@ -694,7 +694,7 @@ Build and Runtime Issues ======================== How do I use the ``TF_DEBUG`` mechanism? -#################################### +######################################## The ``TF_DEBUG`` mechanism is a powerful tool for debugging USD applications. It allows you to enable and disable debugging output at runtime, and to control diff --git a/docs/user_guides/render_user_guide.rst b/docs/user_guides/render_user_guide.rst index bf190db1e8..c3810b7b65 100644 --- a/docs/user_guides/render_user_guide.rst +++ b/docs/user_guides/render_user_guide.rst @@ -626,7 +626,7 @@ from RenderMan. These are: interpolated over each face of the mesh. Bilinear interpolation is used for interpolation between the four values. -For a graphical illustration of these modes, see `Primvar Interpolation `__ +For a graphical illustration of these modes, see `Primvar Interpolation `__ As :usda:`faceVarying` allows for per-vertex-per-face values, you can use this interpolation to create discontinuous vertex UVs or normals. For example, with @@ -1141,11 +1141,29 @@ compression is applied. It is expected that more complex treatment of OpenEXR files including the construction of multilayer files will be completed by pipeline tools. +AV1 Image File Format (AVIF) +============================ + +The AV1 Image Format is a royalty-free open-source picture format, with modern +compression, flexible color specification, high dynamic range values, depth +images and alpha channels, and support for layered and sequential images. + +The supported feature set in Hydra's builtin texture manager is currently +restricted to single frame images, which are decoded to linear Rec709 RGB or +RGBA if an alpha channel is present. + +Reading is implemented through the use of libaom, and libavif. +YUV to RGB decoding is accomplished by libyuv. + +libaom is the reference codec library created by the Alliance for Open Media. +libavif is a portable C implementation of the AV1 Image File Format. + See also: - `OpenEXR reference `__ - `Rec709 standard `__ - `Wikipedia entry on Rec709 standard `__ +- `AV1 Image File Format specification `__ .. _render_camera: diff --git a/docs/wp_ar2.rst b/docs/wp_ar2.rst index b8427433b0..e9657c3320 100644 --- a/docs/wp_ar2.rst +++ b/docs/wp_ar2.rst @@ -51,10 +51,7 @@ used to override behavior. The API on :cpp:`ArResolver` will all be marked const to aid with writing const-correct code and as an indicator that these functions may be called concurrently and must be thread-safe (following the semantics used by the -Standard Library, see -`here -`__ -for more details) +Standard Library). Add Documentation and Examples ****************************** diff --git a/docs/wp_usdaudio.rst b/docs/wp_usdaudio.rst index c9fd626176..1cd726775a 100644 --- a/docs/wp_usdaudio.rst +++ b/docs/wp_usdaudio.rst @@ -202,7 +202,7 @@ values. Note that the only non-uniform (i.e. animatable) property is :bi:`gain`. ratios of signal, not an absolute scale factor, except theoretically as :math:`- inf` dB. Further, given the intended use of SpatialAudio for content delivery in usdz assets, the commonality with the `Web Audio API - `_ seems relevant. + `_ seems relevant. USD Sample ========== diff --git a/extras/imaging/docs/CMakeLists.txt b/extras/imaging/docs/CMakeLists.txt index 6245a6244d..5713608714 100644 --- a/extras/imaging/docs/CMakeLists.txt +++ b/extras/imaging/docs/CMakeLists.txt @@ -8,6 +8,7 @@ pxr_docs_only_dir(${PXR_PACKAGE} hydra_prim_schemas.dox examples_hydra_getting_started.cpp app_renderer_filters.png + HdSceneIndexPrimMaterialDiagram.drawio.svg hdschema.png legacy_scene_delegate_emulation.png nested_datasources.png diff --git a/extras/imaging/docs/HdSceneIndexPrimMaterialDiagram.drawio.svg b/extras/imaging/docs/HdSceneIndexPrimMaterialDiagram.drawio.svg new file mode 100644 index 0000000000..567b05c3e7 --- /dev/null +++ b/extras/imaging/docs/HdSceneIndexPrimMaterialDiagram.drawio.svg @@ -0,0 +1,4 @@ + + + +
"Plastic_Material" [scene index prim name]
"Plastic_Material" [scene index prim name]
materialOverride
materialOverride
interfaceValues

*globalVal = 0.2

*globalSpecularKface = 0.666
interfaceValues...
material
material
ri [materialNetwork for Renderman render context]
ri [materialNetwork for Renderman render context]
interfaceMappings

*globalVal o

*globalSpecularKface o
interfaceMappings...
nodes
nodes
"Color_Manipulate" [materialNode]

*adjustVal
"Color_Manipulate" [m...
*out
*out
"Color_RetargetLayer" [materialNode]

*valRemapAmount
"Color_RetargetLayer"...
*out
*out
"MaterialLayer"  [materialNode]

 *specularKface

 *diffuseK = 0.12

o*someInput_A

o*someInput_B
"MaterialLayer"  [mat...
*out
*out
"PxrSurface" [materialNode]

o*materialIn
"PxrSurface" [materia...
*out
*out
terminals

o*surface
terminals...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/extras/imaging/docs/hydra_prim_schemas.dox b/extras/imaging/docs/hydra_prim_schemas.dox index 823e18f048..20ac1fdede 100644 --- a/extras/imaging/docs/hydra_prim_schemas.dox +++ b/extras/imaging/docs/hydra_prim_schemas.dox @@ -136,8 +136,39 @@ have these. node output providing the material terminal - upstreamNodePath (TfToken) - upstreamNodeOutputName (TfToken) + - interfaceMappings (HdContainerDataSource) + - (HdVectorDataSource) + - (HdMaterialInterfaceMappingsSchema) + - nodePath (TfToken) + - inputName (TfToken) +- materialOverride (HdMaterialOverrideSchema) + - interfaceValues (HdContainerDataSource) + - (HdMaterialNodeParameterSchema) + - value (HdSampledDataSource) + - _Optional_ - colorSpace (TfToken) - primvars (HdPrimvarsSchema) +The following diagram depicts an example scene index prim "Plastic_Material". +"Plastic_Material" is a scene index prim that has multiple container data +sources, like 'material' (HdMaterialSchema) and 'materialOverride' +(HdMaterialOverrideSchema). "Plastic_Material" as a scene index prim also has +a scene index prim type, which is type 'material'. (See HdPrimTypeTokens). + +The diagram intends to show how the two sibling container data sources +"materialOverride" and "material" relate to each other. + +Note the following: +1. The data flows from left to right. +2. The dotted lines between "materialOverride" and "interfaceMappings" are not +true connections and are not backed by the MaterialConnection schema. Each item +within "materialOverride" and "interfaceMappings" is loosely coupled by their +matching names. +3. The connections in the diagram are drawn with an 'o' to indicate the source +where the connection was authored, and these connections are backed by the +MaterialConnection schema. + +\image html HdSceneIndexPrimMaterialDiagram.drawio.svg + Light - material (HdMaterialSchema) - we expect a "light" terminal diff --git a/extras/usd/examples/usdDancingCubesExample/dataImpl.cpp b/extras/usd/examples/usdDancingCubesExample/dataImpl.cpp index 14625acc89..219e810ce5 100644 --- a/extras/usd/examples/usdDancingCubesExample/dataImpl.cpp +++ b/extras/usd/examples/usdDancingCubesExample/dataImpl.cpp @@ -247,7 +247,7 @@ UsdDancingCubesExample_DataImpl::List(const SdfPath &path) const const _LeafPrimPropertyInfo *propInfo = TfMapLookupPtr(*_LeafPrimProperties, path.GetNameToken()); if (propInfo &&_leafPrimDataMap.count(path.GetAbsoluteRootOrPrimPath())) { - // Include time sample field in the property is animated. + // Include time sample field if the property is animated. if (propInfo->isAnimated) { static std::vector animPropFields( {SdfFieldKeys->TypeName, diff --git a/extras/usd/examples/usdResolverExample/wrapResolverContext.cpp b/extras/usd/examples/usdResolverExample/wrapResolverContext.cpp index 4a15c18483..453550d1e8 100644 --- a/extras/usd/examples/usdResolverExample/wrapResolverContext.cpp +++ b/extras/usd/examples/usdResolverExample/wrapResolverContext.cpp @@ -11,14 +11,14 @@ #include "pxr/usd/ar/pyResolverContext.h" #include "pxr/base/tf/pyUtils.h" -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/return_value_policy.hpp" #include PXR_NAMESPACE_USING_DIRECTIVE -using namespace boost::python; +using namespace pxr_boost::python; static size_t diff --git a/extras/usd/examples/usdSchemaExamples/CMakeLists.txt b/extras/usd/examples/usdSchemaExamples/CMakeLists.txt index 0f471627d8..c19bf3e204 100644 --- a/extras/usd/examples/usdSchemaExamples/CMakeLists.txt +++ b/extras/usd/examples/usdSchemaExamples/CMakeLists.txt @@ -1,6 +1,8 @@ set(PXR_PACKAGE usdSchemaExamples) pxr_plugin(${PXR_PACKAGE} + INCLUDE_SCHEMA_FILES + LIBRARIES tf sdf @@ -14,28 +16,10 @@ pxr_plugin(${PXR_PACKAGE} PUBLIC_HEADERS api.h - PUBLIC_CLASSES - simple - complex - paramsAPI - tokens - PYTHON_CPPFILES moduleDeps.cpp PYMODULE_FILES __init__.py - - PYMODULE_CPPFILES - module.cpp - wrapComplex.cpp - wrapParamsAPI.cpp - wrapSimple.cpp - wrapTokens.cpp - - RESOURCE_FILES - generatedSchema.usda - plugInfo.json - schema.usda:usdSchemaExamples/schema.usda ) diff --git a/extras/usd/examples/usdSchemaExamples/generatedSchema.classes.txt b/extras/usd/examples/usdSchemaExamples/generatedSchema.classes.txt new file mode 100644 index 0000000000..77ecf17751 --- /dev/null +++ b/extras/usd/examples/usdSchemaExamples/generatedSchema.classes.txt @@ -0,0 +1,19 @@ +# WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT. + +# Public Classes +complex +paramsAPI +simple +tokens + +# Python Module Files +module.cpp +wrapComplex.cpp +wrapParamsAPI.cpp +wrapSimple.cpp +wrapTokens.cpp + +# Resource Files +generatedSchema.usda +plugInfo.json +schema.usda:usdSchemaExamples/schema.usda diff --git a/extras/usd/examples/usdSchemaExamples/generatedSchema.module.h b/extras/usd/examples/usdSchemaExamples/generatedSchema.module.h new file mode 100644 index 0000000000..51366ecbd1 --- /dev/null +++ b/extras/usd/examples/usdSchemaExamples/generatedSchema.module.h @@ -0,0 +1,12 @@ +// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// + +// WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT. +TF_WRAP(UsdSchemaExamplesSimple); +TF_WRAP(UsdSchemaExamplesComplex); +TF_WRAP(UsdSchemaExamplesParamsAPI); +TF_WRAP(UsdSchemaExamplesTokens); diff --git a/extras/usd/examples/usdSchemaExamples/module.cpp b/extras/usd/examples/usdSchemaExamples/module.cpp index 64e79a44d5..c1a6ea4c51 100644 --- a/extras/usd/examples/usdSchemaExamples/module.cpp +++ b/extras/usd/examples/usdSchemaExamples/module.cpp @@ -12,8 +12,6 @@ PXR_NAMESPACE_USING_DIRECTIVE TF_WRAP_MODULE { - TF_WRAP(UsdSchemaExamplesSimple); - TF_WRAP(UsdSchemaExamplesComplex); - TF_WRAP(UsdSchemaExamplesParamsAPI); - TF_WRAP(UsdSchemaExamplesTokens); + // Generated Schema classes. Do not remove or edit the following line. + #include "generatedSchema.module.h" } diff --git a/extras/usd/examples/usdSchemaExamples/plugInfo.json b/extras/usd/examples/usdSchemaExamples/plugInfo.json index 8323f81965..fda9a91d82 100644 --- a/extras/usd/examples/usdSchemaExamples/plugInfo.json +++ b/extras/usd/examples/usdSchemaExamples/plugInfo.json @@ -14,6 +14,7 @@ "bases": [ "UsdSchemaExamplesSimple" ], + "schemaIdentifier": "ComplexPrim", "schemaKind": "concreteTyped" }, "UsdSchemaExamplesParamsAPI": { @@ -24,6 +25,7 @@ "bases": [ "UsdAPISchemaBase" ], + "schemaIdentifier": "ParamsAPI", "schemaKind": "singleApplyAPI" }, "UsdSchemaExamplesSimple": { @@ -34,6 +36,7 @@ "bases": [ "UsdTyped" ], + "schemaIdentifier": "SimplePrim", "schemaKind": "abstractTyped" } } diff --git a/extras/usd/examples/usdSchemaExamples/wrapComplex.cpp b/extras/usd/examples/usdSchemaExamples/wrapComplex.cpp index 3ffb511948..877a0895a3 100644 --- a/extras/usd/examples/usdSchemaExamples/wrapComplex.cpp +++ b/extras/usd/examples/usdSchemaExamples/wrapComplex.cpp @@ -15,14 +15,14 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include +#include "pxr/external/boost/python.hpp" #include -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { #define WRAP_CUSTOM \ diff --git a/extras/usd/examples/usdSchemaExamples/wrapParamsAPI.cpp b/extras/usd/examples/usdSchemaExamples/wrapParamsAPI.cpp index f9d528b799..0768f1b25d 100644 --- a/extras/usd/examples/usdSchemaExamples/wrapParamsAPI.cpp +++ b/extras/usd/examples/usdSchemaExamples/wrapParamsAPI.cpp @@ -16,14 +16,14 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include +#include "pxr/external/boost/python.hpp" #include -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { #define WRAP_CUSTOM \ diff --git a/extras/usd/examples/usdSchemaExamples/wrapSimple.cpp b/extras/usd/examples/usdSchemaExamples/wrapSimple.cpp index fba73f4a7f..f2aea5b623 100644 --- a/extras/usd/examples/usdSchemaExamples/wrapSimple.cpp +++ b/extras/usd/examples/usdSchemaExamples/wrapSimple.cpp @@ -15,14 +15,14 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include +#include "pxr/external/boost/python.hpp" #include -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { #define WRAP_CUSTOM \ diff --git a/extras/usd/examples/usdSchemaExamples/wrapTokens.cpp b/extras/usd/examples/usdSchemaExamples/wrapTokens.cpp index 5ced1826e2..08df8db562 100644 --- a/extras/usd/examples/usdSchemaExamples/wrapTokens.cpp +++ b/extras/usd/examples/usdSchemaExamples/wrapTokens.cpp @@ -5,55 +5,25 @@ // https://openusd.org/license. // // GENERATED FILE. DO NOT EDIT. -#include +#include "pxr/external/boost/python/class.hpp" #include "./tokens.h" PXR_NAMESPACE_USING_DIRECTIVE -namespace { - -// Helper to return a static token as a string. We wrap tokens as Python -// strings and for some reason simply wrapping the token using def_readonly -// bypasses to-Python conversion, leading to the error that there's no -// Python type for the C++ TfToken type. So we wrap this functor instead. -class _WrapStaticToken { -public: - _WrapStaticToken(const TfToken* token) : _token(token) { } - - std::string operator()() const - { - return _token->GetString(); - } - -private: - const TfToken* _token; -}; - -template -void -_AddToken(T& cls, const char* name, const TfToken& token) -{ - cls.add_static_property(name, - boost::python::make_function( - _WrapStaticToken(&token), - boost::python::return_value_policy< - boost::python::return_by_value>(), - boost::mpl::vector1())); -} - -} // anonymous +#define _ADD_TOKEN(cls, name) \ + cls.add_static_property(#name, +[]() { return UsdSchemaExamplesTokens->name.GetString(); }); void wrapUsdSchemaExamplesTokens() { - boost::python::class_ - cls("Tokens", boost::python::no_init); - _AddToken(cls, "complexString", UsdSchemaExamplesTokens->complexString); - _AddToken(cls, "intAttr", UsdSchemaExamplesTokens->intAttr); - _AddToken(cls, "paramsMass", UsdSchemaExamplesTokens->paramsMass); - _AddToken(cls, "paramsVelocity", UsdSchemaExamplesTokens->paramsVelocity); - _AddToken(cls, "paramsVolume", UsdSchemaExamplesTokens->paramsVolume); - _AddToken(cls, "target", UsdSchemaExamplesTokens->target); - _AddToken(cls, "ComplexPrim", UsdSchemaExamplesTokens->ComplexPrim); - _AddToken(cls, "ParamsAPI", UsdSchemaExamplesTokens->ParamsAPI); - _AddToken(cls, "SimplePrim", UsdSchemaExamplesTokens->SimplePrim); + pxr_boost::python::class_ + cls("Tokens", pxr_boost::python::no_init); + _ADD_TOKEN(cls, complexString); + _ADD_TOKEN(cls, intAttr); + _ADD_TOKEN(cls, paramsMass); + _ADD_TOKEN(cls, paramsVelocity); + _ADD_TOKEN(cls, paramsVolume); + _ADD_TOKEN(cls, target); + _ADD_TOKEN(cls, ComplexPrim); + _ADD_TOKEN(cls, ParamsAPI); + _ADD_TOKEN(cls, SimplePrim); } diff --git a/extras/usd/tutorials/simpleShading/simpleShading.usda b/extras/usd/tutorials/simpleShading/simpleShading.usda new file mode 100644 index 0000000000..0582102349 --- /dev/null +++ b/extras/usd/tutorials/simpleShading/simpleShading.usda @@ -0,0 +1,56 @@ +#usda 1.0 +( + upAxis = "Y" +) + +def Xform "TexModel" ( + kind = "component" +) +{ + def Mesh "card" ( + prepend apiSchemas = ["MaterialBindingAPI"] + ) + { + float3[] extent = [(-430, -145, 0), (430, 145, 0)] + int[] faceVertexCounts = [4] + int[] faceVertexIndices = [0, 1, 2, 3] + rel material:binding = + point3f[] points = [(-430, -145, 0), (430, -145, 0), (430, 145, 0), (-430, 145, 0)] + texCoord2f[] primvars:st = [(0, 0), (2, 0), (2, 2), (0, 2)] ( + interpolation = "varying" + ) + } + + def Material "boardMat" + { + token inputs:frame:stPrimvarName = "st" + token outputs:surface.connect = + + def Shader "PBRShader" + { + uniform token info:id = "UsdPreviewSurface" + color3f inputs:diffuseColor.connect = + float inputs:metallic = 0 + float inputs:roughness = 0.4 + token outputs:surface + } + + def Shader "stReader" + { + uniform token info:id = "UsdPrimvarReader_float2" + string inputs:varname.connect = + float2 outputs:result + } + + def Shader "diffuseTexture" + { + uniform token info:id = "UsdUVTexture" + asset inputs:file = @USDLogoLrg.png@ + float2 inputs:st.connect = + token inputs:wrapS = "repeat" + token inputs:wrapT = "repeat" + float3 outputs:rgb + } + } +} + diff --git a/pxr/CMakeLists.txt b/pxr/CMakeLists.txt index 00cafd3d98..4c7301b871 100644 --- a/pxr/CMakeLists.txt +++ b/pxr/CMakeLists.txt @@ -1,8 +1,13 @@ pxr_core_prologue() +add_subdirectory(external) add_subdirectory(base) add_subdirectory(usd) +if (EXISTS "${PROJECT_SOURCE_DIR}/pxr/exec") + add_subdirectory(exec) +endif() + if (${PXR_BUILD_IMAGING}) add_subdirectory(imaging) if (${PXR_BUILD_USD_IMAGING}) diff --git a/pxr/base/arch/stackTrace.cpp b/pxr/base/arch/stackTrace.cpp index 1d1e92bc57..b93ec6fda4 100644 --- a/pxr/base/arch/stackTrace.cpp +++ b/pxr/base/arch/stackTrace.cpp @@ -94,7 +94,7 @@ using namespace std; typedef int (*ForkFunc)(void); ForkFunc Arch_nonLockingFork = #if defined(ARCH_OS_LINUX) - (ForkFunc)dlsym(RTLD_NEXT, "__libc_fork"); + (ForkFunc)dlsym(RTLD_DEFAULT, "_Fork"); #elif defined(ARCH_OS_DARWIN) NULL; #else @@ -579,7 +579,7 @@ nonLockingLinux__execve (const char *file, /* * We make a direct system call here, because we can't find an * execve which corresponds with the non-locking fork we call - * (__libc_fork().) + * (_Fork().) * * This code doesn't mess with other threads, and avoids the bug * that calling regular execv after the nonLockingFork() causes diff --git a/pxr/base/gf/CMakeLists.txt b/pxr/base/gf/CMakeLists.txt index b98922bf47..aa515ab818 100644 --- a/pxr/base/gf/CMakeLists.txt +++ b/pxr/base/gf/CMakeLists.txt @@ -95,6 +95,8 @@ pxr_library(gf module.cpp wrapBBox3d.cpp wrapCamera.cpp + wrapColor.cpp + wrapColorSpace.cpp wrapDualQuatd.cpp wrapDualQuatf.cpp wrapDualQuath.cpp @@ -178,16 +180,17 @@ pxr_build_test(testGfHardToReach testenv/testGfHardToReach.cpp ) -pxr_build_test(testGfColor +pxr_build_test(testGfColorCpp LIBRARIES gf CPPFILES - testenv/testGfColor.cpp + testenv/testGfColorCpp.cpp ) pxr_test_scripts( testenv/testGfBBox3d.py testenv/testGfCamera.py + testenv/testGfColorPy.py testenv/testGfDecomposeRotation.py testenv/testGfDualQuaternion.py testenv/testGfFrustum.py @@ -214,8 +217,12 @@ pxr_register_test(testGfBBox3d PYTHON COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testGfBBox3d" ) -pxr_register_test(testGfColor - COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testGfColor" +pxr_register_test(testGfColorPy + PYTHON + COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testGfColorPy" +) +pxr_register_test(testGfColorCpp + COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testGfColorCpp" ) pxr_register_test(testGfDecomposeRotation PYTHON diff --git a/pxr/base/gf/dualQuat.template.h b/pxr/base/gf/dualQuat.template.h index 617fce23ba..0cd3e26b7e 100644 --- a/pxr/base/gf/dualQuat.template.h +++ b/pxr/base/gf/dualQuat.template.h @@ -47,7 +47,7 @@ struct GfIsGfDualQuat { static const bool value = true; }; /// /// References: /// https://www.cs.utah.edu/~ladislav/kavan06dual/kavan06dual.pdf -/// http://web.cs.iastate.edu/~cs577/handouts/dual-quaternion.pdf +/// https://faculty.sites.iastate.edu/jia/files/inline-files/dual-quaternion.pdf /// class {{ DUALQUAT }} final { diff --git a/pxr/base/gf/dualQuatd.h b/pxr/base/gf/dualQuatd.h index 4aa7aafb1f..9677ea2756 100644 --- a/pxr/base/gf/dualQuatd.h +++ b/pxr/base/gf/dualQuatd.h @@ -44,7 +44,7 @@ double GfDot(const GfDualQuatd& dq1, const GfDualQuatd& dq2); /// /// References: /// https://www.cs.utah.edu/~ladislav/kavan06dual/kavan06dual.pdf -/// http://web.cs.iastate.edu/~cs577/handouts/dual-quaternion.pdf +/// https://faculty.sites.iastate.edu/jia/files/inline-files/dual-quaternion.pdf /// class GfDualQuatd final { diff --git a/pxr/base/gf/dualQuatf.h b/pxr/base/gf/dualQuatf.h index a1bbba2a60..e24ca2ec35 100644 --- a/pxr/base/gf/dualQuatf.h +++ b/pxr/base/gf/dualQuatf.h @@ -44,7 +44,7 @@ float GfDot(const GfDualQuatf& dq1, const GfDualQuatf& dq2); /// /// References: /// https://www.cs.utah.edu/~ladislav/kavan06dual/kavan06dual.pdf -/// http://web.cs.iastate.edu/~cs577/handouts/dual-quaternion.pdf +/// https://faculty.sites.iastate.edu/jia/files/inline-files/dual-quaternion.pdf /// class GfDualQuatf final { diff --git a/pxr/base/gf/dualQuath.h b/pxr/base/gf/dualQuath.h index 95a6c1770c..c458c1eaf8 100644 --- a/pxr/base/gf/dualQuath.h +++ b/pxr/base/gf/dualQuath.h @@ -45,7 +45,7 @@ GfHalf GfDot(const GfDualQuath& dq1, const GfDualQuath& dq2); /// /// References: /// https://www.cs.utah.edu/~ladislav/kavan06dual/kavan06dual.pdf -/// http://web.cs.iastate.edu/~cs577/handouts/dual-quaternion.pdf +/// https://faculty.sites.iastate.edu/jia/files/inline-files/dual-quaternion.pdf /// class GfDualQuath final { diff --git a/pxr/base/gf/frustum.cpp b/pxr/base/gf/frustum.cpp index 7fb9664d45..8afc610244 100644 --- a/pxr/base/gf/frustum.cpp +++ b/pxr/base/gf/frustum.cpp @@ -160,7 +160,7 @@ GfFrustum::GetPerspective(bool isFovVertical, } double -GfFrustum::GetFOV(bool isFovVertical /* = false */) +GfFrustum::GetFOV(bool isFovVertical /* = false */) const { double result = 0.0; diff --git a/pxr/base/gf/frustum.h b/pxr/base/gf/frustum.h index 4afbe177bc..49744de490 100644 --- a/pxr/base/gf/frustum.h +++ b/pxr/base/gf/frustum.h @@ -379,7 +379,7 @@ class GfFrustum { /// \note The default value for \c isFovVertical is false so calling \c /// GetFOV without an argument will return the horizontal field of view /// which is compatible with menv2x's old GfFrustum::GetFOV routine. - GF_API double GetFOV(bool isFovVertical = false); + GF_API double GetFOV(bool isFovVertical = false) const; /// Sets up the frustum in a manner similar to \c glOrtho(). /// diff --git a/pxr/base/gf/math.cpp b/pxr/base/gf/math.cpp index 220df9fcb8..ca638f16c6 100644 --- a/pxr/base/gf/math.cpp +++ b/pxr/base/gf/math.cpp @@ -30,4 +30,66 @@ GfMod(float a, float b) return c; } +double +GfSmoothStep(double min, double max, double val, double slope0, double slope1) +{ + // Implements standard hermite formulation: + // p(h) = (2h^3 - 3h^2 + 1)p0 + (h^3 - 2h^2 + h)m0 + + // (-2h^3 + 3h^2)p1 + (h^3 - h^2)m1; + if (val >= max) return 1.0; + if (val < min) return 0.0; + + // Note due to above, if here, max != min + double dv = max - min; + + double h = ((val - min) / (max - min)); + + double h2 = h * h; + double h3 = h2 * h; + + // p1 term + double v = -2.0 * h3 + 3.0 * h2; + + // p0 is always zero + + if (slope0 != 0.0) { + // normalize the slope + slope0 /= dv; + v += (h3 - 2 * h2 + h) * slope0; + } + + if (slope1 != 0.0) { + // normalize the slope + slope1 /= dv; + v += (h3 - h2) * slope1; + } + + return v; +} + +double +GfSmoothRamp(double tmin, double tmax, double t, double w0, double w1) +{ + if (t <= tmin) { + return 0.0; + } + + if (t >= tmax) { + return 1.0; + } + + double x = (t-tmin)/(tmax-tmin); + double xr = 2.0 - w0 - w1; + + if (x < w0) { + return (x*x)/(w0 * xr); + } + + if (x > (1.0 - w1) ) { + return (1.0 - ((1.0 - x) * (1.0 - x))/ (w1 * xr)); + } + + return (2.0 * x - w0)/xr; +} + PXR_NAMESPACE_CLOSE_SCOPE diff --git a/pxr/base/gf/math.h b/pxr/base/gf/math.h index 1d4a2ac4f2..b66a7a1a74 100644 --- a/pxr/base/gf/math.h +++ b/pxr/base/gf/math.h @@ -38,6 +38,130 @@ inline double GfDegreesToRadians(double degrees) { return degrees * (M_PI / 180.0); } +/// Smooth step function using a cubic hermite blend. +/// \ingroup group_gf_BasicMath +/// +/// Returns 0 if \p val <= \p min, and 1 if \p val >= \p max. +/// As \p val varies between \p min and \p max, the return value smoothly +/// varies from 0 to 1 using a cubic hermite blend, with given slopes at the +/// min and max points. The slopes are in the space that min and max are in. +GF_API +double GfSmoothStep(double min, double max, double val, + double slope0 = 0.0, double slope1 = 0.0); + +/// Smooth Step with independently controllable shoulders +/// \ingroup group_gf_BasicMath +/// +/// Based on an idea and different implementation by Rob Cook. See his +/// notes attached at the end. +/// +/// I (whorfin) extended this to have independently controllable shoulders at +/// either end, and to specify shoulders directly in the domain of the curve. +/// Rob's derivation frankly confused me, so I proceeded slightly differently. +/// This derivation has more degrees of freedom but is the same order, so some +/// tricks must be done. +/// +/// Summary: This function is similar to "smoothstep" except that instead of +/// using a Hermite curve, the interpolation is done with a linear ramp with +/// smooth shoulders (i.e., C1 = continuous first derivatives). +/// +/// Conceptually, it's a line with variable C1 zero-slope junctures. +/// +/// Additionally, w0 + w1 <= 1. Otherwise, the curves will take up +/// more space than is available, and "that would be bad". +/// +/// A value of 0 for w0 and w1 gives a pure linear ramp. +/// A reasonable value for a symmetric smooth ramp is .2 for w0 and w1. +/// this means that the middle 60% of the ramp is linear, and the left +/// 20% and right 20% are the transition into and out of the linear ramp. +/// +/// The ramp looks like this: +///
+///                              smooth ********** <-result = 1
+///                                  ***|
+///                                **   |
+///                               * |   |
+///                      linear  *  |   |
+///                             *   |   |
+///                            *    |   |
+///                   smooth **     |   tmax = end of ramp
+///                       *** |     |
+///    result=0 -> *******    |     tmax - w1*(tmax-tmin) = end of linear region
+///                      |    |
+///                      |    tmin + w0*(tmax-tmin) = start of linear region
+///                      |
+///                      tmin = start of ramp
+/// 
+/// Derivation: +/// +/// We're going to splice parabolas onto both ends for the "0 slope smooth" +/// connectors. So we therefore constrain the parabolic sections to have +/// a given width and given slope (the slope of the connecting line segment) +/// at the "w" edge. +/// +/// We'll first derive the equations for the parabolic splicing segment, +/// expressed at the origin (but generalizable by flipping). +/// +/// Given: +///
+///  f(t) = a tďż˝ + b t + c
+///  f(0) = 0
+///  f'(0) = 0
+///  f(w) = y    At the "w" edge of the shoulder, value is y
+///  f'(w) = s   ...what is the slope there? s...
+///
+///  -->
+///      c = 0
+///      b = 0
+///      a = ďż˝ s/w
+///      y = ďż˝ w s
+///  -->
+///      g(t,w,s) = ďż˝ s tďż˝ / w   # Our parabolic segment
+/// 
+/// +/// Now, in our desired composite curve, the slope is the same at +/// both endpoints (since they're connected by a line). +/// This slope is (1-y0-y1)/(1-w0-w1) [from simple geometry]. +/// +/// More formally, let's express the constraints +/// Given: +///
+///  y(w,s) = w s /2
+///  s = ( 1 - y(w0, s) - y(w1, s) ) / (1 - w0 - w1)
+///
+///  -->
+///      s(w0,w1) = 2 / (2 - w0 - w1)
+/// 
+/// +/// So now we're done; we splice these two together and connect +/// with a line. +/// +/// The domain and range of this function is [0,1] +///
+///      f(t, w0, w1) =
+///              g(t, w0, s(w0,w1))      t1-w1
+///
+///          s(w0,w1) t - y(w0, s(w0,w1))    w0 <= t <= 1-w1
+/// 
+/// +/// Expanding and collecting terms gives us the result expressed in the +/// code below. We also generalize to tmin/tmax form, in keeping with +/// smoothstep. This simply involves reranging to [0,1] on input. +/// +/// @param tmin where the ramp starts +/// @param tmax where the ramp ends (must be > tmin) +/// @param t location to evaluate in this call +/// @param w0 size of the first smooth section as a fraction of +/// the size of the ramp (tmax-tmin). This value must +/// be in the range 0-1. +/// @param w1 size of the second smooth section as a fraction of +/// the size of the ramp (tmax-tmin). This value must +/// be in the range 0-1. +GF_API +double GfSmoothRamp(double tmin, double tmax, double t, double w0, double w1); + /// Returns the inner product of \c x with itself: specifically, \c x*x. /// Defined for \c int, \c float, \c double, and all \c GfVec types. /// \ingroup group_gf_BasicMath diff --git a/pxr/base/gf/matrix.template.cpp b/pxr/base/gf/matrix.template.cpp index 10dacf1552..444c5d0e94 100644 --- a/pxr/base/gf/matrix.template.cpp +++ b/pxr/base/gf/matrix.template.cpp @@ -190,34 +190,6 @@ operator -(const {{ MAT }}& m) return *this; } -{% if SCL == 'double' %} - -/* - * Define multiplication between floating vector and double matrix. - */ -GfVec{{ DIM }}f -operator *(const GfVec{{ DIM }}f &vec, const {{ MAT }} &m) -{ - return GfVec{{ DIM }}f( -{% for COL in range(DIM) %} - float({{ LIST("vec[%%(i)s] * m._mtx[%%(i)s][%(COL)s]" % {'COL':COL}, - sep=" + ") }}) -{%- if not loop.last %}{{ ",\n" }}{% endif %} -{% endfor %}); -} - -GfVec{{ DIM }}f -operator *(const {{ MAT }}& m, const GfVec{{ DIM }}f &vec) -{ - return GfVec{{ DIM }}f( -{% for ROW in range(DIM) %} - float({{ LIST("vec[%%(i)s] * m._mtx[%(ROW)s][%%(i)s]" % {'ROW':ROW}, - sep=" + ") }}) -{%- if not loop.last %}{{ ",\n" }}{% endif %} -{% endfor %}); -} -{% endif -%} - {% block customXformFunctions %} {% endblock customXformFunctions %} diff --git a/pxr/base/gf/matrix.template.h b/pxr/base/gf/matrix.template.h index d2aebb07e4..fac7fa3f35 100644 --- a/pxr/base/gf/matrix.template.h +++ b/pxr/base/gf/matrix.template.h @@ -328,28 +328,6 @@ class {{ MAT }} {% endfor %}); } -{% if SCL == 'double' %} - /// Returns the product of a matrix \e m and a column vector \e vec. - /// Note that the return type is a \c GfVec{{ DIM }}f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec{{ SUFFIX }} operator *(const {{ MAT }}&, const GfVec{{ SUFFIX }} &) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec{{ DIM }}f operator *(const {{ MAT }}& m, const GfVec{{ DIM }}f& vec); - - /// Returns the product of row vector \e vec and a matrix \e m. - /// Note that the return type is a \c GfVec{{ DIM }}f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec{{ SUFFIX }} operator *(const GfVec{{ SUFFIX }} &, const {{ MAT }}&) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec{{ DIM }}f operator *(const GfVec{{ DIM }}f &vec, const {{ MAT }}& m); - -{% endif %} {% block customXformFunctions %} {% endblock customXformFunctions %} diff --git a/pxr/base/gf/matrix2d.cpp b/pxr/base/gf/matrix2d.cpp index b1a0e1dab9..f578c038dd 100644 --- a/pxr/base/gf/matrix2d.cpp +++ b/pxr/base/gf/matrix2d.cpp @@ -234,25 +234,6 @@ GfMatrix2d::operator*=(const GfMatrix2d &m) return *this; } -/* - * Define multiplication between floating vector and double matrix. - */ -GfVec2f -operator *(const GfVec2f &vec, const GfMatrix2d &m) -{ - return GfVec2f( - float(vec[0] * m._mtx[0][0] + vec[1] * m._mtx[1][0]), - float(vec[0] * m._mtx[0][1] + vec[1] * m._mtx[1][1])); -} - -GfVec2f -operator *(const GfMatrix2d& m, const GfVec2f &vec) -{ - return GfVec2f( - float(vec[0] * m._mtx[0][0] + vec[1] * m._mtx[0][1]), - float(vec[0] * m._mtx[1][0] + vec[1] * m._mtx[1][1])); -} - bool GfIsClose(GfMatrix2d const &m1, GfMatrix2d const &m2, double tolerance) diff --git a/pxr/base/gf/matrix2d.h b/pxr/base/gf/matrix2d.h index 9b2e7631e7..6fcaf716cb 100644 --- a/pxr/base/gf/matrix2d.h +++ b/pxr/base/gf/matrix2d.h @@ -329,26 +329,6 @@ class GfMatrix2d vec[0] * m._mtx[0][1] + vec[1] * m._mtx[1][1]); } - /// Returns the product of a matrix \e m and a column vector \e vec. - /// Note that the return type is a \c GfVec2f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec2d operator *(const GfMatrix2d&, const GfVec2d &) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec2f operator *(const GfMatrix2d& m, const GfVec2f& vec); - - /// Returns the product of row vector \e vec and a matrix \e m. - /// Note that the return type is a \c GfVec2f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec2d operator *(const GfVec2d &, const GfMatrix2d&) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec2f operator *(const GfVec2f &vec, const GfMatrix2d& m); - private: /// Matrix storage, in row-major order. diff --git a/pxr/base/gf/matrix3d.cpp b/pxr/base/gf/matrix3d.cpp index 6e1995362a..d6e92da385 100644 --- a/pxr/base/gf/matrix3d.cpp +++ b/pxr/base/gf/matrix3d.cpp @@ -378,27 +378,6 @@ GfMatrix3d::operator*=(const GfMatrix3d &m) return *this; } - -/* - * Define multiplication between floating vector and double matrix. - */ -GfVec3f -operator *(const GfVec3f &vec, const GfMatrix3d &m) -{ - return GfVec3f( - float(vec[0] * m._mtx[0][0] + vec[1] * m._mtx[1][0] + vec[2] * m._mtx[2][0]), - float(vec[0] * m._mtx[0][1] + vec[1] * m._mtx[1][1] + vec[2] * m._mtx[2][1]), - float(vec[0] * m._mtx[0][2] + vec[1] * m._mtx[1][2] + vec[2] * m._mtx[2][2])); -} - -GfVec3f -operator *(const GfMatrix3d& m, const GfVec3f &vec) -{ - return GfVec3f( - float(vec[0] * m._mtx[0][0] + vec[1] * m._mtx[0][1] + vec[2] * m._mtx[0][2]), - float(vec[0] * m._mtx[1][0] + vec[1] * m._mtx[1][1] + vec[2] * m._mtx[1][2]), - float(vec[0] * m._mtx[2][0] + vec[1] * m._mtx[2][1] + vec[2] * m._mtx[2][2])); -} GfMatrix3d & GfMatrix3d::SetScale(double s) { diff --git a/pxr/base/gf/matrix3d.h b/pxr/base/gf/matrix3d.h index b5b6de86f8..fedf96daa2 100644 --- a/pxr/base/gf/matrix3d.h +++ b/pxr/base/gf/matrix3d.h @@ -407,26 +407,6 @@ class GfMatrix3d vec[0] * m._mtx[0][2] + vec[1] * m._mtx[1][2] + vec[2] * m._mtx[2][2]); } - /// Returns the product of a matrix \e m and a column vector \e vec. - /// Note that the return type is a \c GfVec3f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec3d operator *(const GfMatrix3d&, const GfVec3d &) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec3f operator *(const GfMatrix3d& m, const GfVec3f& vec); - - /// Returns the product of row vector \e vec and a matrix \e m. - /// Note that the return type is a \c GfVec3f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec3d operator *(const GfVec3d &, const GfMatrix3d&) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec3f operator *(const GfVec3f &vec, const GfMatrix3d& m); - /// Sets matrix to specify a uniform scaling by \e scaleFactor. GF_API GfMatrix3d& SetScale(double scaleFactor); diff --git a/pxr/base/gf/matrix4d.cpp b/pxr/base/gf/matrix4d.cpp index eff71714ae..7642a876ce 100644 --- a/pxr/base/gf/matrix4d.cpp +++ b/pxr/base/gf/matrix4d.cpp @@ -652,29 +652,6 @@ GfMatrix4d::operator*=(const GfMatrix4d &m) return *this; } - -/* - * Define multiplication between floating vector and double matrix. - */ -GfVec4f -operator *(const GfVec4f &vec, const GfMatrix4d &m) -{ - return GfVec4f( - float(vec[0] * m._mtx[0][0] + vec[1] * m._mtx[1][0] + vec[2] * m._mtx[2][0] + vec[3] * m._mtx[3][0]), - float(vec[0] * m._mtx[0][1] + vec[1] * m._mtx[1][1] + vec[2] * m._mtx[2][1] + vec[3] * m._mtx[3][1]), - float(vec[0] * m._mtx[0][2] + vec[1] * m._mtx[1][2] + vec[2] * m._mtx[2][2] + vec[3] * m._mtx[3][2]), - float(vec[0] * m._mtx[0][3] + vec[1] * m._mtx[1][3] + vec[2] * m._mtx[2][3] + vec[3] * m._mtx[3][3])); -} - -GfVec4f -operator *(const GfMatrix4d& m, const GfVec4f &vec) -{ - return GfVec4f( - float(vec[0] * m._mtx[0][0] + vec[1] * m._mtx[0][1] + vec[2] * m._mtx[0][2] + vec[3] * m._mtx[0][3]), - float(vec[0] * m._mtx[1][0] + vec[1] * m._mtx[1][1] + vec[2] * m._mtx[1][2] + vec[3] * m._mtx[1][3]), - float(vec[0] * m._mtx[2][0] + vec[1] * m._mtx[2][1] + vec[2] * m._mtx[2][2] + vec[3] * m._mtx[2][3]), - float(vec[0] * m._mtx[3][0] + vec[1] * m._mtx[3][1] + vec[2] * m._mtx[3][2] + vec[3] * m._mtx[3][3])); -} // Leaves the [3][3] element as 1 GfMatrix4d & GfMatrix4d::SetScale(double s) diff --git a/pxr/base/gf/matrix4d.h b/pxr/base/gf/matrix4d.h index 6a7bb2be3d..5f27d355f4 100644 --- a/pxr/base/gf/matrix4d.h +++ b/pxr/base/gf/matrix4d.h @@ -488,26 +488,6 @@ class GfMatrix4d vec[0] * m._mtx[0][3] + vec[1] * m._mtx[1][3] + vec[2] * m._mtx[2][3] + vec[3] * m._mtx[3][3]); } - /// Returns the product of a matrix \e m and a column vector \e vec. - /// Note that the return type is a \c GfVec4f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec4d operator *(const GfMatrix4d&, const GfVec4d &) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec4f operator *(const GfMatrix4d& m, const GfVec4f& vec); - - /// Returns the product of row vector \e vec and a matrix \e m. - /// Note that the return type is a \c GfVec4f. - /// - /// \deprecated - /// This function is deprecated, as it can result in unintentional loss of - /// precision. Call GfVec4d operator *(const GfVec4d &, const GfMatrix4d&) - /// instead and explicitly convert the result to GfVec3f, if necessary. - GF_API - friend GfVec4f operator *(const GfVec4f &vec, const GfMatrix4d& m); - /// Sets matrix to specify a uniform scaling by \e scaleFactor. GF_API GfMatrix4d& SetScale(double scaleFactor); diff --git a/pxr/base/gf/module.cpp b/pxr/base/gf/module.cpp index ab8d7fec54..14eb9254c0 100644 --- a/pxr/base/gf/module.cpp +++ b/pxr/base/gf/module.cpp @@ -13,6 +13,8 @@ PXR_NAMESPACE_USING_DIRECTIVE TF_WRAP_MODULE { TF_WRAP( BBox3d ); + TF_WRAP( Color ); + TF_WRAP( ColorSpace ); TF_WRAP( DualQuatd ); TF_WRAP( DualQuatf ); TF_WRAP( DualQuath ); diff --git a/pxr/base/gf/nc/nanocolor.c b/pxr/base/gf/nc/nanocolor.c index 663ee50837..5860afadb1 100644 --- a/pxr/base/gf/nc/nanocolor.c +++ b/pxr/base/gf/nc/nanocolor.c @@ -28,9 +28,9 @@ struct NcColorSpace { NcM33f rgbToXYZ; }; -static void _NcInitColorSpace(NcColorSpace* cs); +static void _InitColorSpace(NcColorSpace* cs); -static float nc_FromLinear(const NcColorSpace* cs, float t) { +static float _FromLinear(const NcColorSpace* cs, float t) { const float gamma = cs->desc.gamma; if (t < cs->K0 / cs->phi) return t * cs->phi; @@ -38,7 +38,7 @@ static float nc_FromLinear(const NcColorSpace* cs, float t) { return (1.f + a) * powf(t, 1.f / gamma) - a; } -static float nc_ToLinear(const NcColorSpace* cs, float t) { +static float _ToLinear(const NcColorSpace* cs, float t) { const float gamma = cs->desc.gamma; if (t < cs->K0) return t / cs->phi; @@ -83,7 +83,7 @@ const char* Nc_sRGB = _sRGB; static const char _srgb_texture[] = "srgb_texture"; const char* Nc_srgb_texture = _srgb_texture; -NCAPI const char* NcGetDescription(const NcColorSpace* cs) { +NCAPI const char* NcGetDescription(const NcColorSpace* cs) { if (!cs) return NULL; @@ -386,7 +386,7 @@ bool NcColorSpaceEqual(const NcColorSpace* cs1, const NcColorSpace* cs2) { return true; } -static NcM33f NcM3ffInvert(NcM33f m) { +static NcM33f _M3ffInvert(NcM33f m) { NcM33f inv; const int M0 = 0, M1 = 3, M2 = 6, M3 = 1, M4 = 4, M5 = 7, M6 = 2, M7 = 5, M8 = 8; float det = m.m[M0] * (m.m[M4] * m.m[M8] - m.m[M5] * m.m[M7]) - @@ -405,7 +405,7 @@ static NcM33f NcM3ffInvert(NcM33f m) { return inv; } -static NcM33f NcM33fMultiply(NcM33f lh, NcM33f rh) { +static NcM33f _M33fMultiply(NcM33f lh, NcM33f rh) { NcM33f m; m.m[0] = lh.m[0] * rh.m[0] + lh.m[1] * rh.m[3] + lh.m[2] * rh.m[6]; m.m[1] = lh.m[0] * rh.m[1] + lh.m[1] * rh.m[4] + lh.m[2] * rh.m[7]; @@ -419,7 +419,7 @@ static NcM33f NcM33fMultiply(NcM33f lh, NcM33f rh) { return m; } -static void _NcInitColorSpace(NcColorSpace* cs) { +static void _InitColorSpace(NcColorSpace* cs) { if (!cs || cs->rgbToXYZ.m[8] != 0.0) return; @@ -483,7 +483,7 @@ static void _NcInitColorSpace(NcColorSpace* cs) { float W[3] = { white[0] / white[1], white[1] / white[1], white[2] / white[1] }; // compute the coefficients to scale primaries - NcM33f mInv = NcM3ffInvert(m); + NcM33f mInv = _M3ffInvert(m); float C[3] = { mInv.m[0] * W[0] + mInv.m[1] * W[1] + mInv.m[2] * W[2], mInv.m[3] * W[0] + mInv.m[4] * W[1] + mInv.m[5] * W[2], @@ -504,9 +504,9 @@ static void _NcInitColorSpace(NcColorSpace* cs) { cs->rgbToXYZ = m; } -void NcInitColorSpaceLibrary(void) { +void NcInitColorSpaceLibrary(void) { for (size_t i = 0; i < sizeof(_colorSpaces) / sizeof(_colorSpaces[0]); i++) { - _NcInitColorSpace(&_colorSpaces[i]); + _InitColorSpace(&_colorSpaces[i]); } } @@ -517,7 +517,7 @@ const NcColorSpace* NcCreateColorSpace(const NcColorSpaceDescriptor* csd) { NcColorSpace* cs = (NcColorSpace*) calloc(1, sizeof(*cs)); cs->desc = *csd; cs->desc.name = strdup(csd->name); - _NcInitColorSpace(cs); + _InitColorSpace(cs); return cs; } @@ -531,7 +531,7 @@ const NcColorSpace* NcCreateColorSpaceM33(const NcColorSpaceM33Descriptor* csd, cs->desc.gamma = csd->gamma; cs->desc.linearBias = csd->linearBias; cs->rgbToXYZ = csd->rgbToXYZ; - _NcInitColorSpace(cs); + _InitColorSpace(cs); // fill in the assumed chromaticities NcXYZ whiteXYZ = NcRGBToXYZ(cs, (NcRGB){ 1, 1, 1 }); @@ -584,13 +584,7 @@ NcM33f NcGetXYZToRGBMatrix(const NcColorSpace* cs) { if (!cs) return (NcM33f) {1,0,0, 0,1,0, 0,0,1}; - return NcM3ffInvert(NcGetRGBToXYZMatrix(cs)); -} - -NcM33f GetRGBtoRGBMatrix(const NcColorSpace* src, const NcColorSpace* dst) { - NcM33f t = NcM33fMultiply(NcM3ffInvert(NcGetRGBToXYZMatrix(src)), - NcGetXYZToRGBMatrix(dst)); - return t; + return _M3ffInvert(NcGetRGBToXYZMatrix(cs)); } NcM33f NcGetRGBToRGBMatrix(const NcColorSpace* src, const NcColorSpace* dst) { @@ -601,7 +595,7 @@ NcM33f NcGetRGBToRGBMatrix(const NcColorSpace* src, const NcColorSpace* dst) { NcM33f toXYZ = NcGetRGBToXYZMatrix(src); NcM33f fromXYZ = NcGetXYZToRGBMatrix(dst); - NcM33f tx = NcM33fMultiply(fromXYZ, toXYZ); + NcM33f tx = _M33fMultiply(fromXYZ, toXYZ); return tx; } @@ -610,13 +604,13 @@ NcRGB NcTransformColor(const NcColorSpace* dst, const NcColorSpace* src, NcRGB r return rgb; } - NcM33f tx = NcM33fMultiply(NcGetRGBToXYZMatrix(src), + NcM33f tx = _M33fMultiply(NcGetRGBToXYZMatrix(src), NcGetXYZToRGBMatrix(dst)); // if the source color space indicates a curve remove it. - rgb.r = nc_ToLinear(src, rgb.r); - rgb.g = nc_ToLinear(src, rgb.g); - rgb.b = nc_ToLinear(src, rgb.b); + rgb.r = _ToLinear(src, rgb.r); + rgb.g = _ToLinear(src, rgb.g); + rgb.b = _ToLinear(src, rgb.b); NcRGB out; out.r = tx.m[0] * rgb.r + tx.m[1] * rgb.g + tx.m[2] * rgb.b; @@ -624,9 +618,9 @@ NcRGB NcTransformColor(const NcColorSpace* dst, const NcColorSpace* src, NcRGB r out.b = tx.m[6] * rgb.r + tx.m[7] * rgb.g + tx.m[8] * rgb.b; // if the destination color space indicates a curve apply it. - out.r = nc_FromLinear(dst, out.r); - out.g = nc_FromLinear(dst, out.g); - out.b = nc_FromLinear(dst, out.b); + out.r = _FromLinear(dst, out.r); + out.g = _FromLinear(dst, out.g); + out.b = _FromLinear(dst, out.b); return out; } @@ -635,15 +629,15 @@ void NcTransformColors(const NcColorSpace* dst, const NcColorSpace* src, NcRGB* if (!dst || !src || !rgb) return; - NcM33f tx = NcM33fMultiply(NcGetRGBToXYZMatrix(src), + NcM33f tx = _M33fMultiply(NcGetRGBToXYZMatrix(src), NcGetXYZToRGBMatrix(dst)); // if the source color space indicates a curve remove it. for (size_t i = 0; i < count; i++) { NcRGB out = rgb[i]; - out.r = nc_ToLinear(src, out.r); - out.g = nc_ToLinear(src, out.g); - out.b = nc_ToLinear(src, out.b); + out.r = _ToLinear(src, out.r); + out.g = _ToLinear(src, out.g); + out.b = _ToLinear(src, out.b); rgb[i] = out; } @@ -714,9 +708,9 @@ void NcTransformColors(const NcColorSpace* dst, const NcColorSpace* src, NcRGB* // if the destination color space indicates a curve apply it. for (size_t i = 0; i < count; i++) { NcRGB out = rgb[i]; - out.r = nc_FromLinear(dst, out.r); - out.g = nc_FromLinear(dst, out.g); - out.b = nc_FromLinear(dst, out.b); + out.r = _FromLinear(dst, out.r); + out.g = _FromLinear(dst, out.g); + out.b = _FromLinear(dst, out.b); rgb[i] = out; } } @@ -728,15 +722,15 @@ void NcTransformColorsWithAlpha(const NcColorSpace* dst, const NcColorSpace* src if (!dst || !src || !rgba) return; - NcM33f tx = NcM33fMultiply(NcGetRGBToXYZMatrix(src), + NcM33f tx = _M33fMultiply(NcGetRGBToXYZMatrix(src), NcGetXYZToRGBMatrix(dst)); // if the source color space indicates a curve remove it. for (size_t i = 0; i < count; i++) { NcRGB out = { rgba[i * 4 + 0], rgba[i * 4 + 1], rgba[i * 4 + 2] }; - out.r = nc_ToLinear(src, out.r); - out.g = nc_ToLinear(src, out.g); - out.b = nc_ToLinear(src, out.b); + out.r = _ToLinear(src, out.r); + out.g = _ToLinear(src, out.g); + out.b = _ToLinear(src, out.b); rgba[i * 4 + 0] = out.r; rgba[i * 4 + 1] = out.g; rgba[i * 4 + 2] = out.b; @@ -794,39 +788,22 @@ void NcTransformColorsWithAlpha(const NcColorSpace* dst, const NcColorSpace* src // if the destination color space indicates a curve apply it. for (size_t i = 0; i < count; i++) { NcRGB out = { rgba[i * 4 + 0], rgba[i * 4 + 1], rgba[i * 4 + 2] }; - out.r = nc_FromLinear(dst, out.r); - out.g = nc_FromLinear(dst, out.g); - out.b = nc_FromLinear(dst, out.b); + out.r = _FromLinear(dst, out.r); + out.g = _FromLinear(dst, out.g); + out.b = _FromLinear(dst, out.b); rgba[i * 4 + 0] = out.r; rgba[i * 4 + 1] = out.g; rgba[i * 4 + 2] = out.b; } } -NcRGB NcNormalizeLuminance(const NcColorSpace* cs, NcRGB rgb, float luminance) { - if (!cs) - return rgb; - - NcXYZ xyz = NcRGBToXYZ(cs, rgb); - float sum = xyz.x + xyz.y + xyz.z; - if (sum == 0.f) - return rgb; - - // NcRGBtoXYZ will linearize rgb. - NcXYZ XYZ = NcRGBToXYZ(cs, rgb); - rgb.r = nc_FromLinear(cs, luminance * rgb.r / XYZ.y); - rgb.g = nc_FromLinear(cs, luminance * rgb.g / XYZ.y); - rgb.b = nc_FromLinear(cs, luminance * rgb.b / XYZ.y); - return rgb; -} - NcXYZ NcRGBToXYZ(const NcColorSpace* ct, NcRGB rgb) { if (!ct) return (NcXYZ) {0,0,0}; - rgb.r = nc_ToLinear(ct, rgb.r); - rgb.g = nc_ToLinear(ct, rgb.g); - rgb.b = nc_ToLinear(ct, rgb.b); + rgb.r = _ToLinear(ct, rgb.r); + rgb.g = _ToLinear(ct, rgb.g); + rgb.b = _ToLinear(ct, rgb.b); NcM33f m = NcGetRGBToXYZMatrix(ct); return (NcXYZ) { @@ -848,9 +825,9 @@ NcRGB NcXYZToRGB(const NcColorSpace* ct, NcXYZ xyz) { m.m[6] * xyz.x + m.m[7] * xyz.y + m.m[8] * xyz.z }; - rgb.r = nc_FromLinear(ct, rgb.r); - rgb.g = nc_FromLinear(ct, rgb.g); - rgb.b = nc_FromLinear(ct, rgb.b); + rgb.r = _FromLinear(ct, rgb.r); + rgb.g = _FromLinear(ct, rgb.g); + rgb.b = _FromLinear(ct, rgb.b); return rgb; } @@ -874,7 +851,7 @@ const NcColorSpace* NcGetNamedColorSpace(const char* name) if (name) { for (size_t i = 0; i < sizeof(_colorSpaces) / sizeof(_colorSpaces[0]); i++) { if (strcmp(name, _colorSpaces[i].desc.name) == 0) { - _NcInitColorSpace((NcColorSpace*) &_colorSpaces[i]); // ensure initialization + _InitColorSpace((NcColorSpace*) &_colorSpaces[i]); // ensure initialization return &_colorSpaces[i]; } } @@ -884,7 +861,7 @@ const NcColorSpace* NcGetNamedColorSpace(const char* name) return NULL; } -static bool CompareChromaticity(const NcChromaticity* a, const NcChromaticity* b, float threshold) { +static bool _CompareChromaticity(const NcChromaticity* a, const NcChromaticity* b, float threshold) { return fabsf(a->x - b->x) < threshold && fabsf(a->y - b->y) < threshold; } @@ -901,10 +878,10 @@ NcMatchLinearColorSpace(NcChromaticity redPrimary, NcChromaticity greenPrimary, for (size_t i = 0; i < sizeof(_colorSpaces) / sizeof(NcColorSpace); ++i) { if (_colorSpaces[i].desc.gamma != 1.0f) continue; - if (CompareChromaticity(&_colorSpaces[i].desc.redPrimary, &redPrimary, threshold) && - CompareChromaticity(&_colorSpaces[i].desc.greenPrimary, &greenPrimary, threshold) && - CompareChromaticity(&_colorSpaces[i].desc.bluePrimary, &bluePrimary, threshold) && - CompareChromaticity(&_colorSpaces[i].desc.whitePoint, &whitePoint, threshold)) + if (_CompareChromaticity(&_colorSpaces[i].desc.redPrimary, &redPrimary, threshold) && + _CompareChromaticity(&_colorSpaces[i].desc.greenPrimary, &greenPrimary, threshold) && + _CompareChromaticity(&_colorSpaces[i].desc.bluePrimary, &bluePrimary, threshold) && + _CompareChromaticity(&_colorSpaces[i].desc.whitePoint, &whitePoint, threshold)) return _colorSpaces[i].desc.name; } return NULL; @@ -945,7 +922,7 @@ typedef struct { float v; } NcYuvPrime; -NcYxy _NcYuv2Yxy(NcYuvPrime c) { +static NcYxy _NcYuv2Yxy(NcYuvPrime c) { float d = 6.f * c.u - 16.f * c.v + 12.f; return (NcYxy) { c.Y, @@ -969,7 +946,7 @@ NcYxy NcKelvinToYxy(float T, float luminance) { return _NcYuv2Yxy((NcYuvPrime) {luminance, u, 3.f * v / 2.f }); } -NcYxy NcNormalizeYxy(NcYxy c) { +static NcYxy _NormalizeYxy(NcYxy c) { return (NcYxy) { c.Y, c.Y * c.x / c.y, @@ -977,12 +954,12 @@ NcYxy NcNormalizeYxy(NcYxy c) { }; } -static inline float sign_of(float x) { +static inline float _SignOf(float x) { return x > 0 ? 1.f : (x < 0) ? -1.f : 0.f; } NcRGB NcYxyToRGB(const NcColorSpace* cs, NcYxy c) { - NcYxy cYxy = NcNormalizeYxy(c); + NcYxy cYxy = _NormalizeYxy(c); NcRGB rgb = NcXYZToRGB(cs, (NcXYZ) { cYxy.x, cYxy.Y, cYxy.y }); NcRGB magRgb = { fabsf(rgb.r), @@ -992,9 +969,9 @@ NcRGB NcYxyToRGB(const NcColorSpace* cs, NcYxy c) { float maxc = (magRgb.r > magRgb.g) ? magRgb.r : magRgb.g; maxc = maxc > magRgb.b ? maxc : magRgb.b; NcRGB ret = (NcRGB) { - sign_of(rgb.r) * rgb.r / maxc, - sign_of(rgb.g) * rgb.g / maxc, - sign_of(rgb.b) * rgb.b / maxc }; + _SignOf(rgb.r) * rgb.r / maxc, + _SignOf(rgb.g) * rgb.g / maxc, + _SignOf(rgb.b) * rgb.b / maxc }; return ret; } diff --git a/pxr/base/gf/nc/nanocolor.h b/pxr/base/gf/nc/nanocolor.h index 2ccee35342..c63f49b77c 100644 --- a/pxr/base/gf/nc/nanocolor.h +++ b/pxr/base/gf/nc/nanocolor.h @@ -7,6 +7,7 @@ #ifndef PXR_BASE_GF_NC_NANOCOLOR_H #define PXR_BASE_GF_NC_NANOCOLOR_H +#include "pxr/base/arch/export.h" #include #include @@ -14,9 +15,6 @@ // multiple libraries can include the nanocolor library without symbol // conflicts. The default is nc_1_0_ to indicate the 1.0 version of Nanocolor. // -// pxr: note that the PXR namespace macros are in pxr/pxr.h which -// is a C++ only header; so the generated namespace prefixes can't be -// used here. #ifndef NCNAMESPACE #define NCNAMESPACE pxr_nc_1_0_ #endif @@ -28,7 +26,7 @@ // NCAPI may be overridden externally to control symbol visibility. #ifndef NCAPI -#define NCAPI +#define NCAPI ARCH_HIDDEN #endif #ifdef __cplusplus diff --git a/pxr/base/gf/testenv/testGfColor.cpp b/pxr/base/gf/testenv/testGfColorCpp.cpp similarity index 100% rename from pxr/base/gf/testenv/testGfColor.cpp rename to pxr/base/gf/testenv/testGfColorCpp.cpp diff --git a/pxr/base/gf/testenv/testGfColorPy.py b/pxr/base/gf/testenv/testGfColorPy.py new file mode 100644 index 0000000000..df748bd8d5 --- /dev/null +++ b/pxr/base/gf/testenv/testGfColorPy.py @@ -0,0 +1,99 @@ +#!/pxrpythonsubst +# +# Copyright 2024 Pixar +# +# Licensed under the terms set forth in the LICENSE.txt file available at +# https://openusd.org/license/. +# + +import unittest +from pxr import Gf + +def colorApproxEq(c1, c2): + return Gf.IsClose(c1.GetRGB(), c2.GetRGB(), 1e-5) + +class TestGfColor(unittest.TestCase): + def setUp(self): + self.csSRGB = Gf.ColorSpace(Gf.ColorSpaceNames.SRGB) + self.csLinearSRGB = Gf.ColorSpace(Gf.ColorSpaceNames.LinearSRGB) + self.csLinearRec709 = Gf.ColorSpace(Gf.ColorSpaceNames.LinearRec709) + self.csG22Rec709 = Gf.ColorSpace(Gf.ColorSpaceNames.G22Rec709) + self.csAp0 = Gf.ColorSpace(Gf.ColorSpaceNames.LinearAP0) + self.csSRGBP3 = Gf.ColorSpace(Gf.ColorSpaceNames.SRGBDisplayP3) + self.csLinearRec2020 = Gf.ColorSpace(Gf.ColorSpaceNames.LinearRec2020) + self.csIdentity = Gf.ColorSpace(Gf.ColorSpaceNames.Identity) + + self.mauveLinear = Gf.Color(Gf.Vec3f(0.5, 0.25, 0.125), self.csLinearRec709) + self.mauveGamma = Gf.Color(self.mauveLinear, self.csG22Rec709) + + def test_Repr(self): + c = Gf.Color() + self.assertEqual(c, eval(repr(c))) + cs = Gf.ColorSpace("identity") + self.assertEqual(cs, eval(repr(cs))) + + def test_DefaultConstruction(self): + c = Gf.Color() + self.assertEqual(c.GetColorSpace(), self.csLinearRec709) + self.assertEqual(c.GetRGB(), Gf.Vec3f(0, 0, 0)) + + def test_ConstructionWithColorSpace(self): + c = Gf.Color(self.csSRGB) + self.assertEqual(c.GetColorSpace(), self.csSRGB) + self.assertEqual(c.GetRGB(), Gf.Vec3f(0, 0, 0)) + + def test_ConstructionWithColorSpaceAndRgb(self): + c = Gf.Color(Gf.Vec3f(0.5, 0.5, 0.5), self.csSRGB) + self.assertEqual(c.GetColorSpace(), self.csSRGB) + self.assertEqual(c.GetRGB(), Gf.Vec3f(0.5, 0.5, 0.5)) + + def test_EotfCurveLinear(self): + c1 = Gf.Color(self.mauveLinear, self.csSRGB) # convert linear to SRGB + c2 = Gf.Color(c1, self.csLinearSRGB) + self.assertTrue(Gf.IsClose(self.mauveLinear, c2, 1e-5)) + c3 = Gf.Color(c2, self.csSRGB) + self.assertTrue(Gf.IsClose(c1, c3, 1e-5)) + + def test_RoundTrippingToRec2020(self): + c1 = Gf.Color(self.mauveLinear, self.csLinearRec2020) + c2 = Gf.Color(c1, self.csLinearSRGB) + self.assertTrue(Gf.IsClose(self.mauveLinear, c2, 1e-5)) + + def test_ConstructionWithConversion(self): + colG22Rec709 = Gf.Color(self.mauveLinear, self.csG22Rec709) + self.assertTrue(Gf.IsClose(colG22Rec709, self.mauveGamma, 1e-5)) + colLinRec709 = Gf.Color(colG22Rec709, self.csLinearRec709) + self.assertTrue(Gf.IsClose(colLinRec709, self.mauveLinear, 1e-5)) + + self.assertEqual(colG22Rec709.GetColorSpace(), self.csG22Rec709) + self.assertEqual(colLinRec709.GetColorSpace(), self.csLinearRec709) + + colSRGB_2 = Gf.Color(colLinRec709, self.csSRGB) + colAp0 = Gf.Color(colSRGB_2, self.csAp0) + colSRGB_3 = Gf.Color(colAp0, self.csSRGB) + col_SRGBP3 = Gf.Color(colSRGB_3, self.csSRGBP3) + colLinRec709_2 = Gf.Color(col_SRGBP3, self.csLinearRec709) + self.assertTrue(Gf.IsClose(colLinRec709_2, colLinRec709, 1e-5)) + + def test_MoveConstructor(self): + c1 = Gf.Color(Gf.Vec3f(0.5, 0.25, 0.125), self.csAp0) + c2 = Gf.Color(c1) # Python doesn't have move semantics, but this tests copying + self.assertEqual(c2.GetColorSpace(), self.csAp0) + self.assertTrue(Gf.IsClose(c2.GetRGB(), Gf.Vec3f(0.5, 0.25, 0.125), 1e-5)) + + def test_CopyAssignment(self): + c1 = Gf.Color(Gf.Vec3f(0.5, 0.25, 0.125), self.csAp0) + c2 = Gf.Color() + c2 = c1 + self.assertEqual(c2.GetColorSpace(), self.csAp0) + self.assertTrue(Gf.IsClose(c2.GetRGB(), Gf.Vec3f(0.5, 0.25, 0.125), 1e-5)) + + def test_Comparison(self): + c1 = Gf.Color(Gf.Vec3f(0.5, 0.25, 0.125), self.csAp0) + c2 = Gf.Color(Gf.Vec3f(0.5, 0.25, 0.125), self.csAp0) + self.assertTrue(Gf.IsClose(c1, c2, 1e-5)) + self.assertEqual(c1.GetColorSpace(), c2.GetColorSpace()) + self.assertTrue(colorApproxEq(c1, c2)) + +if __name__ == '__main__': + unittest.main() diff --git a/pxr/base/gf/testenv/testGfMath.py b/pxr/base/gf/testenv/testGfMath.py index b3cc13bfc5..f7e62de13c 100644 --- a/pxr/base/gf/testenv/testGfMath.py +++ b/pxr/base/gf/testenv/testGfMath.py @@ -22,6 +22,14 @@ def _AssertListIsClose(self, first, second, delta=1e-6): for (f,s) in zip(first, second): self.assertAlmostEqual(f, s, delta=delta) + def test_SmoothStep(self): + t1 = SmoothStep(0, 1, .25) + t2 = SmoothStep(0, 1, .75) + self.assertEqual(0, SmoothStep(0, 1, 0)) + self.assertEqual(1, SmoothStep(0, 1, 1)) + self.assertEqual(0.5, SmoothStep(0, 1, 0.5)) + self.assertTrue(t1 > 0 and t1 < .5 and t2 > .5 and t2 < 1) + def test_HalfRoundTrip(self): from pxr.Gf import _HalfRoundTrip self.assertEqual(1.0, _HalfRoundTrip(1.0)) diff --git a/pxr/base/gf/wrapBBox3d.cpp b/pxr/base/gf/wrapBBox3d.cpp index db569010b6..b7a5deefe4 100644 --- a/pxr/base/gf/wrapBBox3d.cpp +++ b/pxr/base/gf/wrapBBox3d.cpp @@ -11,19 +11,19 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { string _Repr(GfBBox3d const &self) { diff --git a/pxr/base/gf/wrapCamera.cpp b/pxr/base/gf/wrapCamera.cpp index 389f70f613..88adc73310 100644 --- a/pxr/base/gf/wrapCamera.cpp +++ b/pxr/base/gf/wrapCamera.cpp @@ -12,14 +12,14 @@ #include "pxr/base/gf/frustum.h" #include "pxr/base/tf/pyEnum.h" -#include +#include "pxr/external/boost/python/operators.hpp" #include -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static float diff --git a/pxr/base/gf/wrapColor.cpp b/pxr/base/gf/wrapColor.cpp new file mode 100644 index 0000000000..47e514e253 --- /dev/null +++ b/pxr/base/gf/wrapColor.cpp @@ -0,0 +1,58 @@ +/// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// + +#include "pxr/pxr.h" +#include "pxr/base/gf/color.h" +#include "pxr/base/gf/vec3f.h" +#include "pxr/base/gf/colorSpace.h" +#include "pxr/base/tf/pyUtils.h" +#include "pxr/base/tf/stringUtils.h" +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include + +PXR_NAMESPACE_USING_DIRECTIVE + +using namespace pxr_boost::python; + +namespace { + +std::string __repr__(GfColor const &self) +{ + return TF_PY_REPR_PREFIX + + TfStringPrintf("Color(%s, %s)", + TfPyRepr(self.GetRGB()).c_str(), + TfPyRepr(self.GetColorSpace()).c_str()); +} + +} + +void wrapColor() +{ + class_("Color", + "A class representing a color, supporting different " + "color spaces.") + .def(init<>()) + .def(init()) + .def(init()) + .def(init()) + .def(init()) + .def("__repr__", &__repr__) + .def("SetFromPlanckianLocus", &GfColor::SetFromPlanckianLocus, + (arg("kelvin"), arg("luminance"))) + .def("GetRGB", &GfColor::GetRGB) + .def("GetColorSpace", &GfColor::GetColorSpace) + .def(self == self) + .def(self != self) + ; + + def("IsClose", + (bool (*)(const GfColor &v1, const GfColor &v2, double)) GfIsClose, + (arg("v1"), arg("v2"), arg("tolerance"))); +} + diff --git a/pxr/base/gf/wrapColorSpace.cpp b/pxr/base/gf/wrapColorSpace.cpp new file mode 100644 index 0000000000..902ae826af --- /dev/null +++ b/pxr/base/gf/wrapColorSpace.cpp @@ -0,0 +1,59 @@ +// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// + +#include "pxr/pxr.h" +#include "pxr/base/gf/color.h" +#include "pxr/base/gf/colorSpace.h" +#include "pxr/base/tf/pyStaticTokens.h" +#include "pxr/base/tf/pyUtils.h" +#include "pxr/base/tf/stringUtils.h" + +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include + +PXR_NAMESPACE_USING_DIRECTIVE + +using namespace pxr_boost::python; + +namespace { + +static std::string __repr__(GfColorSpace const &self) +{ + return TF_PY_REPR_PREFIX + + TfStringPrintf("ColorSpace(%s)", TfPyRepr(self.GetName()).c_str()); +} + +} // anon + +void wrapColorSpace() +{ + class_("ColorSpace", init()) + .def(init( + (arg("name"), arg("redChroma"), arg("greenChroma"), + arg("blueChroma"), arg("whitePoint"), arg("gamma"), + arg("linearBias")))) + .def(init( + (arg("name"), arg("rgbToXYZ"), arg("gamma"), arg("linearBias")))) + .def("__repr__", &__repr__) + .def("GetName", &GfColorSpace::GetName) + .def("ConvertRGBSpan", &GfColorSpace::ConvertRGBSpan) + .def("ConvertRGBASpan", &GfColorSpace::ConvertRGBASpan) + .def("Convert", &GfColorSpace::Convert) + .def("GetRGBToXYZ", &GfColorSpace::GetRGBToXYZ) + .def("GetGamma", &GfColorSpace::GetGamma) + .def("GetLinearBias", &GfColorSpace::GetLinearBias) + .def("GetTransferFunctionParams", &GfColorSpace::GetTransferFunctionParams) + .def("GetPrimariesAndWhitePoint", &GfColorSpace::GetPrimariesAndWhitePoint) + .def(self == self) + .def(self != self); + + TF_PY_WRAP_PUBLIC_TOKENS("ColorSpaceNames", GfColorSpaceNames, + GF_COLORSPACE_NAME_TOKENS); +} diff --git a/pxr/base/gf/wrapDualQuat.template.cpp b/pxr/base/gf/wrapDualQuat.template.cpp index 6c35de808e..89215a56b7 100644 --- a/pxr/base/gf/wrapDualQuat.template.cpp +++ b/pxr/base/gf/wrapDualQuat.template.cpp @@ -18,23 +18,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__({{ DUALQUAT }} const &self) { @@ -169,7 +169,7 @@ void wrapDualQuat{{ SUFFIX }}() // __itruediv__ not added by .def( self /= {{ SCL }}() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapDualQuatd.cpp b/pxr/base/gf/wrapDualQuatd.cpp index df5f59ae74..e76d3d6021 100644 --- a/pxr/base/gf/wrapDualQuatd.cpp +++ b/pxr/base/gf/wrapDualQuatd.cpp @@ -18,23 +18,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__(GfDualQuatd const &self) { @@ -165,7 +165,7 @@ void wrapDualQuatd() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapDualQuatf.cpp b/pxr/base/gf/wrapDualQuatf.cpp index c402f8085c..b14df3046c 100644 --- a/pxr/base/gf/wrapDualQuatf.cpp +++ b/pxr/base/gf/wrapDualQuatf.cpp @@ -18,23 +18,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__(GfDualQuatf const &self) { @@ -165,7 +165,7 @@ void wrapDualQuatf() // __itruediv__ not added by .def( self /= float() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapDualQuath.cpp b/pxr/base/gf/wrapDualQuath.cpp index a8a2c24f67..5802475967 100644 --- a/pxr/base/gf/wrapDualQuath.cpp +++ b/pxr/base/gf/wrapDualQuath.cpp @@ -18,23 +18,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__(GfDualQuath const &self) { @@ -165,7 +165,7 @@ void wrapDualQuath() // __itruediv__ not added by .def( self /= GfHalf() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapFrustum.cpp b/pxr/base/gf/wrapFrustum.cpp index d01438c9a3..3395c9c607 100644 --- a/pxr/base/gf/wrapFrustum.cpp +++ b/pxr/base/gf/wrapFrustum.cpp @@ -13,18 +13,18 @@ #include "pxr/base/tf/pyResultConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include - -using namespace boost::python; +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/enum.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static std::string _Repr(GfFrustum const &self) @@ -57,7 +57,7 @@ GetPerspectiveHelper( const GfFrustum &self, bool isFovVertical ) { bool result = self.GetPerspective( isFovVertical, &fov, &aspect, &nearDist, &farDist ); return result ? - boost::python::make_tuple( fov, aspect, nearDist, farDist ) : object(); + pxr_boost::python::make_tuple( fov, aspect, nearDist, farDist ) : object(); } static tuple @@ -66,7 +66,7 @@ GetOrthographicHelper( const GfFrustum &self ) { bool result = self.GetOrthographic( &left, &right, &bottom, &top, &near, &far ); return result ? - boost::python:: + pxr_boost::python:: make_tuple( left, right, bottom, top, near, far ) : tuple(); } @@ -74,11 +74,11 @@ static tuple ComputeViewFrameHelper( const GfFrustum &self ) { GfVec3d side, up, view; self.ComputeViewFrame( &side, &up, &view ); - return boost::python::make_tuple( side, up, view ); + return pxr_boost::python::make_tuple( side, up, view ); } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( FitToSphere_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( FitToSphere_overloads, FitToSphere, 2, 3 ); } // anonymous namespace diff --git a/pxr/base/gf/wrapGamma.cpp b/pxr/base/gf/wrapGamma.cpp index 8ebdcd1949..5f590c8977 100644 --- a/pxr/base/gf/wrapGamma.cpp +++ b/pxr/base/gf/wrapGamma.cpp @@ -4,7 +4,7 @@ // Licensed under the terms set forth in the LICENSE.txt file available at // https://openusd.org/license. // -#include +#include "pxr/external/boost/python/def.hpp" #include "pxr/pxr.h" #include "pxr/base/gf/gamma.h" @@ -15,10 +15,10 @@ #include "pxr/base/tf/wrapTypeHelpers.h" -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + void wrapGamma() { def("ApplyGamma", (GfVec3f(*)(GfVec3f const &,double))GfApplyGamma); diff --git a/pxr/base/gf/wrapHalf.cpp b/pxr/base/gf/wrapHalf.cpp index 38deeb4a5f..a252f82456 100644 --- a/pxr/base/gf/wrapHalf.cpp +++ b/pxr/base/gf/wrapHalf.cpp @@ -8,15 +8,15 @@ #include "pxr/pxr.h" #include "pxr/base/gf/half.h" -#include -#include -#include -#include - -using namespace boost::python; +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/handle.hpp" +#include "pxr/external/boost/python/to_python_converter.hpp" +#include "pxr/external/boost/python/converter/from_python.hpp" PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { // Registers to and from python conversions with boost.python for half. @@ -27,7 +27,7 @@ struct HalfPythonConversions to_python_converter(); // from-python converter::registry::push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } // to-python @@ -71,5 +71,5 @@ static GfHalf _HalfRoundTrip(GfHalf in) { return in; } void wrapHalf() { HalfPythonConversions::Register(); - boost::python::def("_HalfRoundTrip", _HalfRoundTrip); + pxr_boost::python::def("_HalfRoundTrip", _HalfRoundTrip); } diff --git a/pxr/base/gf/wrapHomogeneous.cpp b/pxr/base/gf/wrapHomogeneous.cpp index 9ad5f62b60..1ea7fc966d 100644 --- a/pxr/base/gf/wrapHomogeneous.cpp +++ b/pxr/base/gf/wrapHomogeneous.cpp @@ -4,15 +4,15 @@ // Licensed under the terms set forth in the LICENSE.txt file available at // https://openusd.org/license. // -#include +#include "pxr/external/boost/python/def.hpp" #include "pxr/pxr.h" #include "pxr/base/gf/homogeneous.h" -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + void wrapHomogeneous() { def("GetHomogenized", (GfVec4d (*)(const GfVec4d &)) GfGetHomogenized); diff --git a/pxr/base/gf/wrapInterval.cpp b/pxr/base/gf/wrapInterval.cpp index a5b83f0c8b..da02aed3cf 100644 --- a/pxr/base/gf/wrapInterval.cpp +++ b/pxr/base/gf/wrapInterval.cpp @@ -11,17 +11,17 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/init.hpp" +#include "pxr/external/boost/python/operators.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string diff --git a/pxr/base/gf/wrapLimits.cpp b/pxr/base/gf/wrapLimits.cpp index 686e62b8cd..974bc343a3 100644 --- a/pxr/base/gf/wrapLimits.cpp +++ b/pxr/base/gf/wrapLimits.cpp @@ -4,15 +4,15 @@ // Licensed under the terms set forth in the LICENSE.txt file available at // https://openusd.org/license. // -#include +#include "pxr/external/boost/python/scope.hpp" #include "pxr/pxr.h" #include "pxr/base/gf/limits.h" -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + void wrapLimits() { scope().attr("MIN_VECTOR_LENGTH") = GF_MIN_VECTOR_LENGTH; diff --git a/pxr/base/gf/wrapLine.cpp b/pxr/base/gf/wrapLine.cpp index 14c6266289..ef5ccd526a 100644 --- a/pxr/base/gf/wrapLine.cpp +++ b/pxr/base/gf/wrapLine.cpp @@ -10,21 +10,21 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string _Repr(GfLine const &self) { @@ -39,7 +39,7 @@ FindClosestPointsHelper( const GfLine &l1, const GfLine &l2 ) GfVec3d p1(0), p2(0); double t1 = 0.0, t2 = 0.0; bool result = GfFindClosestPoints( l1, l2, &p1, &p2, &t1, &t2 ); - return boost::python::make_tuple( result, p1, p2, t1, t2 ); + return pxr_boost::python::make_tuple( result, p1, p2, t1, t2 ); } static tuple @@ -47,7 +47,7 @@ FindClosestPointHelper( const GfLine &self, const GfVec3d &point ) { double t; GfVec3d result = self.FindClosestPoint( point, &t ); - return boost::python::make_tuple( result, t ); + return pxr_boost::python::make_tuple( result, t ); } static void diff --git a/pxr/base/gf/wrapLineSeg.cpp b/pxr/base/gf/wrapLineSeg.cpp index 1799ebf27b..db02f5181b 100644 --- a/pxr/base/gf/wrapLineSeg.cpp +++ b/pxr/base/gf/wrapLineSeg.cpp @@ -11,21 +11,21 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string _Repr(GfLineSeg const &self) { @@ -40,7 +40,7 @@ FindClosestPointsHelper1( const GfLine &l1, const GfLineSeg &l2 ) GfVec3d p1(0), p2(0); double t1 = 0, t2 = 0; bool result = GfFindClosestPoints( l1, l2, &p1, &p2, &t1, &t2 ); - return boost::python::make_tuple( result, p1, p2, t1, t2 ); + return pxr_boost::python::make_tuple( result, p1, p2, t1, t2 ); } static tuple @@ -49,7 +49,7 @@ FindClosestPointsHelper2( const GfLineSeg &l1, const GfLineSeg &l2 ) GfVec3d p1(0), p2(0); double t1 = 0, t2 = 0; bool result = GfFindClosestPoints( l1, l2, &p1, &p2, &t1, &t2 ); - return boost::python::make_tuple( result, p1, p2, t1, t2 ); + return pxr_boost::python::make_tuple( result, p1, p2, t1, t2 ); } static tuple @@ -57,7 +57,7 @@ FindClosestPointHelper( const GfLineSeg &self, const GfVec3d &point ) { double t; GfVec3d p1 = self.FindClosestPoint( point, &t ); - return boost::python::make_tuple( p1, t ); + return pxr_boost::python::make_tuple( p1, t ); } } // anonymous namespace diff --git a/pxr/base/gf/wrapMath.cpp b/pxr/base/gf/wrapMath.cpp index 49aa99a98c..032f6ceb5b 100644 --- a/pxr/base/gf/wrapMath.cpp +++ b/pxr/base/gf/wrapMath.cpp @@ -4,7 +4,7 @@ // Licensed under the terms set forth in the LICENSE.txt file available at // https://openusd.org/license. // -#include +#include "pxr/external/boost/python/def.hpp" #include "pxr/pxr.h" #include "pxr/base/tf/pyUtils.h" @@ -21,11 +21,12 @@ #include "pxr/base/gf/vec3d.h" #include "pxr/base/gf/vec4d.h" -using namespace boost::python; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + void wrapMath() { @@ -33,6 +34,10 @@ void wrapMath() def("RadiansToDegrees", GfRadiansToDegrees); def("DegreesToRadians", GfDegreesToRadians); + def("SmoothStep", GfSmoothStep, + (arg("slope0") = 0.0, + arg("slope1") = 0.0)); + def("Sqr", GfSqr); def("Sqr", GfSqr); diff --git a/pxr/base/gf/wrapMatrix.template.cpp b/pxr/base/gf/wrapMatrix.template.cpp index 58338a9f5f..a0ee873ebb 100644 --- a/pxr/base/gf/wrapMatrix.template.cpp +++ b/pxr/base/gf/wrapMatrix.template.cpp @@ -21,25 +21,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -120,7 +121,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -196,18 +197,18 @@ static {{ MAT }} *__init__() { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct {{ MAT }}_Pickle_Suite : boost::python::pickle_suite +struct {{ MAT }}_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const {{ MAT }} &m) + static pxr_boost::python::tuple getinitargs(const {{ MAT }} &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( {{ MATRIX("m[%(i)s][%(j)s]", indent=12) }}); } }; static size_t __hash__({{ MAT }} const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in @@ -312,6 +313,10 @@ void wrapMatrix{{ SUFFIX }}() .def( self * GfVec{{ SUFFIX }}() ) .def( GfVec{{ SUFFIX }}() * self ) {% if SCL == 'double' %} + + // Provide wrapping that makes up for the fact that, in Python, we + // don't allow implicit conversion from GfVec3f to GfVec3d (which we + // do in C++). .def( self * GfVec{{ DIM }}f() ) .def( GfVec{{ DIM }}f() * self ) {% endif %} diff --git a/pxr/base/gf/wrapMatrix2d.cpp b/pxr/base/gf/wrapMatrix2d.cpp index b46a525e0e..c56064eabf 100644 --- a/pxr/base/gf/wrapMatrix2d.cpp +++ b/pxr/base/gf/wrapMatrix2d.cpp @@ -20,25 +20,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -117,7 +118,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -192,11 +193,11 @@ static GfMatrix2d *__init__() { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct GfMatrix2d_Pickle_Suite : boost::python::pickle_suite +struct GfMatrix2d_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfMatrix2d &m) + static pxr_boost::python::tuple getinitargs(const GfMatrix2d &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( m[0][0], m[0][1], m[1][0], m[1][1]); } @@ -204,7 +205,7 @@ struct GfMatrix2d_Pickle_Suite : boost::python::pickle_suite static size_t __hash__(GfMatrix2d const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in @@ -296,6 +297,10 @@ void wrapMatrix2d() .def( self / self ) .def( self * GfVec2d() ) .def( GfVec2d() * self ) + + // Provide wrapping that makes up for the fact that, in Python, we + // don't allow implicit conversion from GfVec3f to GfVec3d (which we + // do in C++). .def( self * GfVec2f() ) .def( GfVec2f() * self ) diff --git a/pxr/base/gf/wrapMatrix2f.cpp b/pxr/base/gf/wrapMatrix2f.cpp index a2977acfd4..db42475826 100644 --- a/pxr/base/gf/wrapMatrix2f.cpp +++ b/pxr/base/gf/wrapMatrix2f.cpp @@ -20,25 +20,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -117,7 +118,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -192,11 +193,11 @@ static GfMatrix2f *__init__() { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct GfMatrix2f_Pickle_Suite : boost::python::pickle_suite +struct GfMatrix2f_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfMatrix2f &m) + static pxr_boost::python::tuple getinitargs(const GfMatrix2f &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( m[0][0], m[0][1], m[1][0], m[1][1]); } @@ -204,7 +205,7 @@ struct GfMatrix2f_Pickle_Suite : boost::python::pickle_suite static size_t __hash__(GfMatrix2f const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in diff --git a/pxr/base/gf/wrapMatrix3d.cpp b/pxr/base/gf/wrapMatrix3d.cpp index 3010f461a0..b4dc340788 100644 --- a/pxr/base/gf/wrapMatrix3d.cpp +++ b/pxr/base/gf/wrapMatrix3d.cpp @@ -23,25 +23,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -121,7 +122,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -197,11 +198,11 @@ static GfMatrix3d *__init__() { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct GfMatrix3d_Pickle_Suite : boost::python::pickle_suite +struct GfMatrix3d_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfMatrix3d &m) + static pxr_boost::python::tuple getinitargs(const GfMatrix3d &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); @@ -210,7 +211,7 @@ struct GfMatrix3d_Pickle_Suite : boost::python::pickle_suite static size_t __hash__(GfMatrix3d const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in @@ -314,6 +315,10 @@ void wrapMatrix3d() .def( self / self ) .def( self * GfVec3d() ) .def( GfVec3d() * self ) + + // Provide wrapping that makes up for the fact that, in Python, we + // don't allow implicit conversion from GfVec3f to GfVec3d (which we + // do in C++). .def( self * GfVec3f() ) .def( GfVec3f() * self ) diff --git a/pxr/base/gf/wrapMatrix3f.cpp b/pxr/base/gf/wrapMatrix3f.cpp index a2df5cb737..0f45c11fe7 100644 --- a/pxr/base/gf/wrapMatrix3f.cpp +++ b/pxr/base/gf/wrapMatrix3f.cpp @@ -23,25 +23,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -121,7 +122,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -197,11 +198,11 @@ static GfMatrix3f *__init__() { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct GfMatrix3f_Pickle_Suite : boost::python::pickle_suite +struct GfMatrix3f_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfMatrix3f &m) + static pxr_boost::python::tuple getinitargs(const GfMatrix3f &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); @@ -210,7 +211,7 @@ struct GfMatrix3f_Pickle_Suite : boost::python::pickle_suite static size_t __hash__(GfMatrix3f const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in diff --git a/pxr/base/gf/wrapMatrix4.template.cpp b/pxr/base/gf/wrapMatrix4.template.cpp index 592b18c0b0..ebc6a89077 100644 --- a/pxr/base/gf/wrapMatrix4.template.cpp +++ b/pxr/base/gf/wrapMatrix4.template.cpp @@ -7,8 +7,8 @@ // This file is generated by a script. Do not edit directly. Edit the // wrapMatrix4.template.cpp file to make changes. -#ifndef BOOST_PYTHON_MAX_ARITY -#define BOOST_PYTHON_MAX_ARITY 20 +#ifndef PXR_BOOST_PYTHON_MAX_ARITY +#define PXR_BOOST_PYTHON_MAX_ARITY 20 #endif {% extends "wrapMatrix.template.cpp" %} @@ -24,14 +24,14 @@ static tuple FactorWithEpsilon({{ MAT }} &self, double eps) { {{ MAT }} r, u, p; GfVec3{{ SCL[0] }} s, t; bool result = self.Factor(&r, &s, &u, &t, &p, eps); - return boost::python::make_tuple(result, r, s, u, t, p); + return pxr_boost::python::make_tuple(result, r, s, u, t, p); } static tuple Factor({{ MAT }} &self) { {{ MAT }} r, u, p; GfVec3{{ SCL[0] }} s, t; bool result = self.Factor(&r, &s, &u, &t, &p); - return boost::python::make_tuple(result, r, s, u, t, p); + return pxr_boost::python::make_tuple(result, r, s, u, t, p); } static {{ MAT }} RemoveScaleShearWrapper( const {{ MAT }} &self ) { diff --git a/pxr/base/gf/wrapMatrix4d.cpp b/pxr/base/gf/wrapMatrix4d.cpp index d0bea4b388..565ef9a6b8 100644 --- a/pxr/base/gf/wrapMatrix4d.cpp +++ b/pxr/base/gf/wrapMatrix4d.cpp @@ -7,8 +7,8 @@ // This file is generated by a script. Do not edit directly. Edit the // wrapMatrix4.template.cpp file to make changes. -#ifndef BOOST_PYTHON_MAX_ARITY -#define BOOST_PYTHON_MAX_ARITY 20 +#ifndef PXR_BOOST_PYTHON_MAX_ARITY +#define PXR_BOOST_PYTHON_MAX_ARITY 20 #endif @@ -27,25 +27,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -126,7 +127,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -203,14 +204,14 @@ static tuple FactorWithEpsilon(GfMatrix4d &self, double eps) { GfMatrix4d r, u, p; GfVec3d s, t; bool result = self.Factor(&r, &s, &u, &t, &p, eps); - return boost::python::make_tuple(result, r, s, u, t, p); + return pxr_boost::python::make_tuple(result, r, s, u, t, p); } static tuple Factor(GfMatrix4d &self) { GfMatrix4d r, u, p; GfVec3d s, t; bool result = self.Factor(&r, &s, &u, &t, &p); - return boost::python::make_tuple(result, r, s, u, t, p); + return pxr_boost::python::make_tuple(result, r, s, u, t, p); } static GfMatrix4d RemoveScaleShearWrapper( const GfMatrix4d &self ) { @@ -220,11 +221,11 @@ static GfMatrix4d RemoveScaleShearWrapper( const GfMatrix4d &self ) { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct GfMatrix4d_Pickle_Suite : boost::python::pickle_suite +struct GfMatrix4d_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfMatrix4d &m) + static pxr_boost::python::tuple getinitargs(const GfMatrix4d &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( m[0][0], m[0][1], m[0][2], m[0][3], m[1][0], m[1][1], m[1][2], m[1][3], m[2][0], m[2][1], m[2][2], m[2][3], @@ -234,7 +235,7 @@ struct GfMatrix4d_Pickle_Suite : boost::python::pickle_suite static size_t __hash__(GfMatrix4d const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in @@ -353,6 +354,10 @@ void wrapMatrix4d() .def( self / self ) .def( self * GfVec4d() ) .def( GfVec4d() * self ) + + // Provide wrapping that makes up for the fact that, in Python, we + // don't allow implicit conversion from GfVec3f to GfVec3d (which we + // do in C++). .def( self * GfVec4f() ) .def( GfVec4f() * self ) diff --git a/pxr/base/gf/wrapMatrix4f.cpp b/pxr/base/gf/wrapMatrix4f.cpp index ac8664f7ad..2fa63a7110 100644 --- a/pxr/base/gf/wrapMatrix4f.cpp +++ b/pxr/base/gf/wrapMatrix4f.cpp @@ -7,8 +7,8 @@ // This file is generated by a script. Do not edit directly. Edit the // wrapMatrix4.template.cpp file to make changes. -#ifndef BOOST_PYTHON_MAX_ARITY -#define BOOST_PYTHON_MAX_ARITY 20 +#ifndef PXR_BOOST_PYTHON_MAX_ARITY +#define PXR_BOOST_PYTHON_MAX_ARITY 20 #endif @@ -27,25 +27,26 @@ #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/detail/api_placeholder.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -126,7 +127,7 @@ static void throwIndexErr( const char *msg ) { PyErr_SetString(PyExc_IndexError, msg); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } static int @@ -203,14 +204,14 @@ static tuple FactorWithEpsilon(GfMatrix4f &self, double eps) { GfMatrix4f r, u, p; GfVec3f s, t; bool result = self.Factor(&r, &s, &u, &t, &p, eps); - return boost::python::make_tuple(result, r, s, u, t, p); + return pxr_boost::python::make_tuple(result, r, s, u, t, p); } static tuple Factor(GfMatrix4f &self) { GfMatrix4f r, u, p; GfVec3f s, t; bool result = self.Factor(&r, &s, &u, &t, &p); - return boost::python::make_tuple(result, r, s, u, t, p); + return pxr_boost::python::make_tuple(result, r, s, u, t, p); } static GfMatrix4f RemoveScaleShearWrapper( const GfMatrix4f &self ) { @@ -220,11 +221,11 @@ static GfMatrix4f RemoveScaleShearWrapper( const GfMatrix4f &self ) { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct GfMatrix4f_Pickle_Suite : boost::python::pickle_suite +struct GfMatrix4f_Pickle_Suite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfMatrix4f &m) + static pxr_boost::python::tuple getinitargs(const GfMatrix4f &m) { - return boost::python::make_tuple( + return pxr_boost::python::make_tuple( m[0][0], m[0][1], m[0][2], m[0][3], m[1][0], m[1][1], m[1][2], m[1][3], m[2][0], m[2][1], m[2][2], m[2][3], @@ -234,7 +235,7 @@ struct GfMatrix4f_Pickle_Suite : boost::python::pickle_suite static size_t __hash__(GfMatrix4f const &m) { return TfHash{}(m); } -static boost::python::tuple get_dimension() +static pxr_boost::python::tuple get_dimension() { // At one time this was a constant static tuple we returned for // dimension. With boost building for python 3 that results in diff --git a/pxr/base/gf/wrapMultiInterval.cpp b/pxr/base/gf/wrapMultiInterval.cpp index 79a8a96b2c..a90fd9ad53 100644 --- a/pxr/base/gf/wrapMultiInterval.cpp +++ b/pxr/base/gf/wrapMultiInterval.cpp @@ -12,17 +12,17 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include +#include "pxr/external/boost/python/iterator.hpp" +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/operators.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string diff --git a/pxr/base/gf/wrapPlane.cpp b/pxr/base/gf/wrapPlane.cpp index 1fb035fb87..8500ae206d 100644 --- a/pxr/base/gf/wrapPlane.cpp +++ b/pxr/base/gf/wrapPlane.cpp @@ -14,20 +14,20 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string _Repr(GfPlane const &self) { diff --git a/pxr/base/gf/wrapQuat.template.cpp b/pxr/base/gf/wrapQuat.template.cpp index 0f6adc4b07..7e6d8b3b86 100644 --- a/pxr/base/gf/wrapQuat.template.cpp +++ b/pxr/base/gf/wrapQuat.template.cpp @@ -17,23 +17,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__({{ QUAT }} const &self) { @@ -164,7 +164,7 @@ void wrapQuat{{ SUFFIX }}() // __itruediv__ not added by .def( self /= {{ SCL }}() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapQuatd.cpp b/pxr/base/gf/wrapQuatd.cpp index cb01e86629..e4ed6ef9a8 100644 --- a/pxr/base/gf/wrapQuatd.cpp +++ b/pxr/base/gf/wrapQuatd.cpp @@ -17,23 +17,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__(GfQuatd const &self) { @@ -160,7 +160,7 @@ void wrapQuatd() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapQuaternion.cpp b/pxr/base/gf/wrapQuaternion.cpp index 62adb52140..7a1618090c 100644 --- a/pxr/base/gf/wrapQuaternion.cpp +++ b/pxr/base/gf/wrapQuaternion.cpp @@ -12,26 +12,26 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( GetNormalized_overloads, GetNormalized, 0, 1 ); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( Normalize_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( Normalize_overloads, Normalize, 0, 1 ); static GfQuaternion __truediv__(const GfQuaternion &self, double value) @@ -123,7 +123,7 @@ void wrapQuaternion() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapQuatf.cpp b/pxr/base/gf/wrapQuatf.cpp index 3b4c77bc6d..6ab14920be 100644 --- a/pxr/base/gf/wrapQuatf.cpp +++ b/pxr/base/gf/wrapQuatf.cpp @@ -17,23 +17,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__(GfQuatf const &self) { @@ -160,7 +160,7 @@ void wrapQuatf() // __itruediv__ not added by .def( self /= float() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapQuath.cpp b/pxr/base/gf/wrapQuath.cpp index 3242a1e24c..cc843e5baa 100644 --- a/pxr/base/gf/wrapQuath.cpp +++ b/pxr/base/gf/wrapQuath.cpp @@ -17,23 +17,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string __repr__(GfQuath const &self) { @@ -160,7 +160,7 @@ void wrapQuath() // __itruediv__ not added by .def( self /= GfHalf() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange.template.cpp b/pxr/base/gf/wrapRange.template.cpp index ee454f2b5e..2522554d61 100644 --- a/pxr/base/gf/wrapRange.template.cpp +++ b/pxr/base/gf/wrapRange.template.cpp @@ -19,19 +19,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = {{ DIM }}; @@ -156,7 +156,7 @@ void wrapRange{{ SUFFIX }}() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange1d.cpp b/pxr/base/gf/wrapRange1d.cpp index 6232f8c24e..f547576d62 100644 --- a/pxr/base/gf/wrapRange1d.cpp +++ b/pxr/base/gf/wrapRange1d.cpp @@ -17,19 +17,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = 1; @@ -141,7 +141,7 @@ void wrapRange1d() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange1f.cpp b/pxr/base/gf/wrapRange1f.cpp index 506f8d17b7..cc7b74b010 100644 --- a/pxr/base/gf/wrapRange1f.cpp +++ b/pxr/base/gf/wrapRange1f.cpp @@ -17,19 +17,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = 1; @@ -141,7 +141,7 @@ void wrapRange1f() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange2d.cpp b/pxr/base/gf/wrapRange2d.cpp index 82d39196ae..5d86a52f03 100644 --- a/pxr/base/gf/wrapRange2d.cpp +++ b/pxr/base/gf/wrapRange2d.cpp @@ -17,19 +17,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = 2; @@ -144,7 +144,7 @@ void wrapRange2d() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange2f.cpp b/pxr/base/gf/wrapRange2f.cpp index 3e0434568f..3e5c4f3ac4 100644 --- a/pxr/base/gf/wrapRange2f.cpp +++ b/pxr/base/gf/wrapRange2f.cpp @@ -17,19 +17,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = 2; @@ -144,7 +144,7 @@ void wrapRange2f() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange3d.cpp b/pxr/base/gf/wrapRange3d.cpp index 46b631d356..523331db49 100644 --- a/pxr/base/gf/wrapRange3d.cpp +++ b/pxr/base/gf/wrapRange3d.cpp @@ -17,19 +17,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = 3; @@ -144,7 +144,7 @@ void wrapRange3d() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRange3f.cpp b/pxr/base/gf/wrapRange3f.cpp index 519f9e6536..1cfd8531a4 100644 --- a/pxr/base/gf/wrapRange3f.cpp +++ b/pxr/base/gf/wrapRange3f.cpp @@ -17,19 +17,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static const int _dimension = 3; @@ -144,7 +144,7 @@ void wrapRange3f() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapRay.cpp b/pxr/base/gf/wrapRay.cpp index d4630359b4..5c334eb204 100644 --- a/pxr/base/gf/wrapRay.cpp +++ b/pxr/base/gf/wrapRay.cpp @@ -16,21 +16,21 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static void diff --git a/pxr/base/gf/wrapRect2i.cpp b/pxr/base/gf/wrapRect2i.cpp index 54261f48a2..310f3d2ddf 100644 --- a/pxr/base/gf/wrapRect2i.cpp +++ b/pxr/base/gf/wrapRect2i.cpp @@ -11,19 +11,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static string _Repr(GfRect2i const &self) { diff --git a/pxr/base/gf/wrapRotation.cpp b/pxr/base/gf/wrapRotation.cpp index b493a89026..065ffa33c6 100644 --- a/pxr/base/gf/wrapRotation.cpp +++ b/pxr/base/gf/wrapRotation.cpp @@ -11,19 +11,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { void SetAxisHelper( GfRotation &rotation, const GfVec3d &axis ) @@ -75,16 +75,16 @@ _DecomposeRotation(const GfMatrix4d &rot, { double angle[4] = { thetaTwHint.ptr() != Py_None ? - boost::python::extract(thetaTwHint) : 0.0, + pxr_boost::python::extract(thetaTwHint) : 0.0, thetaFBHint.ptr() != Py_None ? - boost::python::extract(thetaFBHint) : 0.0, + pxr_boost::python::extract(thetaFBHint) : 0.0, thetaLRHint.ptr() != Py_None ? - boost::python::extract(thetaLRHint) : 0.0, + pxr_boost::python::extract(thetaLRHint) : 0.0, thetaSwHint.ptr() != Py_None ? - boost::python::extract(thetaSwHint) : 0.0 + pxr_boost::python::extract(thetaSwHint) : 0.0 }; double swShift = swShiftIn.ptr() != Py_None ? - boost::python::extract(swShiftIn) : 0.0; + pxr_boost::python::extract(swShiftIn) : 0.0; GfRotation::DecomposeRotation( rot, TwAxis, FBAxis, LRAxis, handedness, @@ -111,13 +111,13 @@ _MatchClosestEulerRotation( { double angle[4] = { thetaTw.ptr() != Py_None ? - boost::python::extract(thetaTw) : 0.0, + pxr_boost::python::extract(thetaTw) : 0.0, thetaFB.ptr() != Py_None ? - boost::python::extract(thetaFB) : 0.0, + pxr_boost::python::extract(thetaFB) : 0.0, thetaLR.ptr() != Py_None ? - boost::python::extract(thetaLR) : 0.0, + pxr_boost::python::extract(thetaLR) : 0.0, thetaSw.ptr() != Py_None ? - boost::python::extract(thetaSw) : 0.0 + pxr_boost::python::extract(thetaSw) : 0.0 }; GfRotation::MatchClosestEulerRotation( diff --git a/pxr/base/gf/wrapSize2.cpp b/pxr/base/gf/wrapSize2.cpp index 26806d0702..30ff0eea9d 100644 --- a/pxr/base/gf/wrapSize2.cpp +++ b/pxr/base/gf/wrapSize2.cpp @@ -11,19 +11,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static int @@ -122,7 +122,7 @@ void wrapSize2() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapSize3.cpp b/pxr/base/gf/wrapSize3.cpp index b304fba6d4..359d7c4ec2 100644 --- a/pxr/base/gf/wrapSize3.cpp +++ b/pxr/base/gf/wrapSize3.cpp @@ -12,19 +12,19 @@ #include "pxr/base/tf/wrapTypeHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/implicit.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static int @@ -126,7 +126,7 @@ void wrapSize3() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>()); } diff --git a/pxr/base/gf/wrapTransform.cpp b/pxr/base/gf/wrapTransform.cpp index 978171e44c..a318541170 100644 --- a/pxr/base/gf/wrapTransform.cpp +++ b/pxr/base/gf/wrapTransform.cpp @@ -15,21 +15,21 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/args.hpp" +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/copy_const_reference.hpp" +#include "pxr/external/boost/python/init.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_arg.hpp" #include using std::string; using std::vector; -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static GfVec3d _NoTranslation() { return GfVec3d(0,0,0); } diff --git a/pxr/base/gf/wrapVec.template.cpp b/pxr/base/gf/wrapVec.template.cpp index 94841b1190..9bb8493373 100644 --- a/pxr/base/gf/wrapVec.template.cpp +++ b/pxr/base/gf/wrapVec.template.cpp @@ -27,23 +27,23 @@ {% endfor %} {% endif %} -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -121,10 +121,10 @@ static size_t __hash__({{ VEC }} const &self) { {% if IS_FLOATING_POINT(SCL) %} -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static {{ SCL }} @@ -133,7 +133,7 @@ NormalizeHelper({{ VEC }} *vec, {{ SCL }} eps = {{ EPS }}) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); {% endif %} {# IS_FLOATING_POINT(SCL) #} @@ -147,7 +147,7 @@ OrthogonalizeBasisHelper( {{ VEC }} *v1, {{ VEC }} *v2, {{ VEC }} *v3, { return {{ VEC }}::OrthogonalizeBasis(v1, v2, v3, normalize, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, OrthogonalizeBasisHelper, 4, 5) static tuple @@ -156,10 +156,10 @@ BuildOrthonormalFrameHelper(const {{ VEC }} &self, { {{ VEC }} v1, v2; self.BuildOrthonormalFrame( &v1, &v2, eps ); - return boost::python::make_tuple(v1, v2); + return pxr_boost::python::make_tuple(v1, v2); } -BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, BuildOrthonormalFrameHelper, 1, 2) {% endif %} {# DIM == 3 and IS_FLOATING_POINT(SCL) #} @@ -212,13 +212,13 @@ static void __setitem__({{ VEC }} &self, int index, {{ SCL }} value) { // Handles refcounting & extraction for PySequence_GetItem. static {{ SCL }} _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract<{{ SCL }}>(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract<{{ SCL }}>(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract<{{ SCL }}> e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract<{{ SCL }}> e((pxr_boost::python::object(h))); return e.check(); } @@ -314,7 +314,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id<{{ VEC }}>()); + pxr_boost::python::type_id<{{ VEC }}>()); } private: @@ -353,10 +353,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const {{ VEC }} &v) { - return boost::python::make_tuple({{ LIST("v[%(i)s]") }}); + static pxr_boost::python::tuple getinitargs(const {{ VEC }} &v) { + return pxr_boost::python::make_tuple({{ LIST("v[%(i)s]") }}); } }; @@ -504,7 +504,7 @@ void wrapVec{{ SUFFIX }}() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec2d.cpp b/pxr/base/gf/wrapVec2d.cpp index b3295e6a79..2424a4b419 100644 --- a/pxr/base/gf/wrapVec2d.cpp +++ b/pxr/base/gf/wrapVec2d.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec2h.h" #include "pxr/base/gf/vec2i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec2d const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static double @@ -130,7 +130,7 @@ NormalizeHelper(GfVec2d *vec, double eps = GF_MIN_VECTOR_LENGTH) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -183,13 +183,13 @@ static void __setitem__(GfVec2d &self, int index, double value) { // Handles refcounting & extraction for PySequence_GetItem. static double _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -285,7 +285,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -324,10 +324,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec2d &v) { - return boost::python::make_tuple(v[0], v[1]); + static pxr_boost::python::tuple getinitargs(const GfVec2d &v) { + return pxr_boost::python::make_tuple(v[0], v[1]); } }; @@ -453,7 +453,7 @@ void wrapVec2d() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec2f.cpp b/pxr/base/gf/wrapVec2f.cpp index 98682931d9..2a3bc8f6ac 100644 --- a/pxr/base/gf/wrapVec2f.cpp +++ b/pxr/base/gf/wrapVec2f.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec2h.h" #include "pxr/base/gf/vec2i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec2f const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static float @@ -130,7 +130,7 @@ NormalizeHelper(GfVec2f *vec, float eps = GF_MIN_VECTOR_LENGTH) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -183,13 +183,13 @@ static void __setitem__(GfVec2f &self, int index, float value) { // Handles refcounting & extraction for PySequence_GetItem. static float _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -285,7 +285,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -324,10 +324,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec2f &v) { - return boost::python::make_tuple(v[0], v[1]); + static pxr_boost::python::tuple getinitargs(const GfVec2f &v) { + return pxr_boost::python::make_tuple(v[0], v[1]); } }; @@ -451,7 +451,7 @@ void wrapVec2f() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec2h.cpp b/pxr/base/gf/wrapVec2h.cpp index 57907f082d..304fea6784 100644 --- a/pxr/base/gf/wrapVec2h.cpp +++ b/pxr/base/gf/wrapVec2h.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec2f.h" #include "pxr/base/gf/vec2i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec2h const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static GfHalf @@ -130,7 +130,7 @@ NormalizeHelper(GfVec2h *vec, GfHalf eps = 0.001) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -183,13 +183,13 @@ static void __setitem__(GfVec2h &self, int index, GfHalf value) { // Handles refcounting & extraction for PySequence_GetItem. static GfHalf _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -285,7 +285,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -324,10 +324,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec2h &v) { - return boost::python::make_tuple(v[0], v[1]); + static pxr_boost::python::tuple getinitargs(const GfVec2h &v) { + return pxr_boost::python::make_tuple(v[0], v[1]); } }; @@ -449,7 +449,7 @@ void wrapVec2h() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec2i.cpp b/pxr/base/gf/wrapVec2i.cpp index 7584f2d8e6..74e25a417d 100644 --- a/pxr/base/gf/wrapVec2i.cpp +++ b/pxr/base/gf/wrapVec2i.cpp @@ -20,23 +20,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -163,13 +163,13 @@ static void __setitem__(GfVec2i &self, int index, int value) { // Handles refcounting & extraction for PySequence_GetItem. static int _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -265,7 +265,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -304,10 +304,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec2i &v) { - return boost::python::make_tuple(v[0], v[1]); + static pxr_boost::python::tuple getinitargs(const GfVec2i &v) { + return pxr_boost::python::make_tuple(v[0], v[1]); } }; @@ -403,7 +403,7 @@ void wrapVec2i() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec3d.cpp b/pxr/base/gf/wrapVec3d.cpp index 7386effb3d..84d9f835c5 100644 --- a/pxr/base/gf/wrapVec3d.cpp +++ b/pxr/base/gf/wrapVec3d.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec3h.h" #include "pxr/base/gf/vec3i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec3d const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static double @@ -130,7 +130,7 @@ NormalizeHelper(GfVec3d *vec, double eps = GF_MIN_VECTOR_LENGTH) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -142,7 +142,7 @@ OrthogonalizeBasisHelper( GfVec3d *v1, GfVec3d *v2, GfVec3d *v3, { return GfVec3d::OrthogonalizeBasis(v1, v2, v3, normalize, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, OrthogonalizeBasisHelper, 4, 5) static tuple @@ -151,10 +151,10 @@ BuildOrthonormalFrameHelper(const GfVec3d &self, { GfVec3d v1, v2; self.BuildOrthonormalFrame( &v1, &v2, eps ); - return boost::python::make_tuple(v1, v2); + return pxr_boost::python::make_tuple(v1, v2); } -BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, BuildOrthonormalFrameHelper, 1, 2) @@ -206,13 +206,13 @@ static void __setitem__(GfVec3d &self, int index, double value) { // Handles refcounting & extraction for PySequence_GetItem. static double _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -308,7 +308,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -349,10 +349,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec3d &v) { - return boost::python::make_tuple(v[0], v[1], v[2]); + static pxr_boost::python::tuple getinitargs(const GfVec3d &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2]); } }; @@ -490,7 +490,7 @@ void wrapVec3d() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec3f.cpp b/pxr/base/gf/wrapVec3f.cpp index cd28de69ca..e957129b13 100644 --- a/pxr/base/gf/wrapVec3f.cpp +++ b/pxr/base/gf/wrapVec3f.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec3h.h" #include "pxr/base/gf/vec3i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec3f const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static float @@ -130,7 +130,7 @@ NormalizeHelper(GfVec3f *vec, float eps = GF_MIN_VECTOR_LENGTH) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -142,7 +142,7 @@ OrthogonalizeBasisHelper( GfVec3f *v1, GfVec3f *v2, GfVec3f *v3, { return GfVec3f::OrthogonalizeBasis(v1, v2, v3, normalize, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, OrthogonalizeBasisHelper, 4, 5) static tuple @@ -151,10 +151,10 @@ BuildOrthonormalFrameHelper(const GfVec3f &self, { GfVec3f v1, v2; self.BuildOrthonormalFrame( &v1, &v2, eps ); - return boost::python::make_tuple(v1, v2); + return pxr_boost::python::make_tuple(v1, v2); } -BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, BuildOrthonormalFrameHelper, 1, 2) @@ -206,13 +206,13 @@ static void __setitem__(GfVec3f &self, int index, float value) { // Handles refcounting & extraction for PySequence_GetItem. static float _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -308,7 +308,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -349,10 +349,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec3f &v) { - return boost::python::make_tuple(v[0], v[1], v[2]); + static pxr_boost::python::tuple getinitargs(const GfVec3f &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2]); } }; @@ -488,7 +488,7 @@ void wrapVec3f() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec3h.cpp b/pxr/base/gf/wrapVec3h.cpp index a215f1fae2..04efc43a67 100644 --- a/pxr/base/gf/wrapVec3h.cpp +++ b/pxr/base/gf/wrapVec3h.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec3f.h" #include "pxr/base/gf/vec3i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec3h const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static GfHalf @@ -130,7 +130,7 @@ NormalizeHelper(GfVec3h *vec, GfHalf eps = 0.001) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -142,7 +142,7 @@ OrthogonalizeBasisHelper( GfVec3h *v1, GfVec3h *v2, GfVec3h *v3, { return GfVec3h::OrthogonalizeBasis(v1, v2, v3, normalize, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(OrthogonalizeBasis_overloads, OrthogonalizeBasisHelper, 4, 5) static tuple @@ -151,10 +151,10 @@ BuildOrthonormalFrameHelper(const GfVec3h &self, { GfVec3h v1, v2; self.BuildOrthonormalFrame( &v1, &v2, eps ); - return boost::python::make_tuple(v1, v2); + return pxr_boost::python::make_tuple(v1, v2); } -BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(BuildOrthonormalFrame_overloads, BuildOrthonormalFrameHelper, 1, 2) @@ -206,13 +206,13 @@ static void __setitem__(GfVec3h &self, int index, GfHalf value) { // Handles refcounting & extraction for PySequence_GetItem. static GfHalf _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -308,7 +308,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -349,10 +349,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec3h &v) { - return boost::python::make_tuple(v[0], v[1], v[2]); + static pxr_boost::python::tuple getinitargs(const GfVec3h &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2]); } }; @@ -486,7 +486,7 @@ void wrapVec3h() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec3i.cpp b/pxr/base/gf/wrapVec3i.cpp index d7ad178d0b..a608c73876 100644 --- a/pxr/base/gf/wrapVec3i.cpp +++ b/pxr/base/gf/wrapVec3i.cpp @@ -20,23 +20,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -163,13 +163,13 @@ static void __setitem__(GfVec3i &self, int index, int value) { // Handles refcounting & extraction for PySequence_GetItem. static int _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -265,7 +265,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -306,10 +306,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec3i &v) { - return boost::python::make_tuple(v[0], v[1], v[2]); + static pxr_boost::python::tuple getinitargs(const GfVec3i &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2]); } }; @@ -406,7 +406,7 @@ void wrapVec3i() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec4d.cpp b/pxr/base/gf/wrapVec4d.cpp index 365a9ff77c..fc73462feb 100644 --- a/pxr/base/gf/wrapVec4d.cpp +++ b/pxr/base/gf/wrapVec4d.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec4h.h" #include "pxr/base/gf/vec4i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec4d const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static double @@ -130,7 +130,7 @@ NormalizeHelper(GfVec4d *vec, double eps = GF_MIN_VECTOR_LENGTH) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -183,13 +183,13 @@ static void __setitem__(GfVec4d &self, int index, double value) { // Handles refcounting & extraction for PySequence_GetItem. static double _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -285,7 +285,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -328,10 +328,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec4d &v) { - return boost::python::make_tuple(v[0], v[1], v[2], v[3]); + static pxr_boost::python::tuple getinitargs(const GfVec4d &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2], v[3]); } }; @@ -459,7 +459,7 @@ void wrapVec4d() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec4f.cpp b/pxr/base/gf/wrapVec4f.cpp index 586e9db955..05a131f52b 100644 --- a/pxr/base/gf/wrapVec4f.cpp +++ b/pxr/base/gf/wrapVec4f.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec4h.h" #include "pxr/base/gf/vec4i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec4f const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static float @@ -130,7 +130,7 @@ NormalizeHelper(GfVec4f *vec, float eps = GF_MIN_VECTOR_LENGTH) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -183,13 +183,13 @@ static void __setitem__(GfVec4f &self, int index, float value) { // Handles refcounting & extraction for PySequence_GetItem. static float _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -285,7 +285,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -328,10 +328,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec4f &v) { - return boost::python::make_tuple(v[0], v[1], v[2], v[3]); + static pxr_boost::python::tuple getinitargs(const GfVec4f &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2], v[3]); } }; @@ -457,7 +457,7 @@ void wrapVec4f() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec4h.cpp b/pxr/base/gf/wrapVec4h.cpp index 101e6cee1e..84c9f52415 100644 --- a/pxr/base/gf/wrapVec4h.cpp +++ b/pxr/base/gf/wrapVec4h.cpp @@ -25,23 +25,23 @@ #include "pxr/base/gf/vec4f.h" #include "pxr/base/gf/vec4i.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -118,10 +118,10 @@ static size_t __hash__(GfVec4h const &self) { } -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecGetNormalized_overloads, GetNormalized, 0, 1); -BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); -BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, +PXR_BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(VecNormalize_overloads, Normalize, 0, 1); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(GetNormalized_overloads, GfGetNormalized, 1, 2); static GfHalf @@ -130,7 +130,7 @@ NormalizeHelper(GfVec4h *vec, GfHalf eps = 0.001) return GfNormalize(vec, eps); } -BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); +PXR_BOOST_PYTHON_FUNCTION_OVERLOADS(Normalize_overloads, NormalizeHelper, 1, 2); @@ -183,13 +183,13 @@ static void __setitem__(GfVec4h &self, int index, GfHalf value) { // Handles refcounting & extraction for PySequence_GetItem. static GfHalf _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -285,7 +285,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -328,10 +328,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec4h &v) { - return boost::python::make_tuple(v[0], v[1], v[2], v[3]); + static pxr_boost::python::tuple getinitargs(const GfVec4h &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2], v[3]); } }; @@ -455,7 +455,7 @@ void wrapVec4h() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/gf/wrapVec4i.cpp b/pxr/base/gf/wrapVec4i.cpp index becaf3f5e8..c08ae4e229 100644 --- a/pxr/base/gf/wrapVec4i.cpp +++ b/pxr/base/gf/wrapVec4i.cpp @@ -20,23 +20,23 @@ #include "pxr/base/tf/wrapTypeHelpers.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/make_constructor.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/overloads.hpp" +#include "pxr/external/boost/python/return_arg.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/slice.hpp" #include -using namespace boost::python; - using std::string; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { //////////////////////////////////////////////////////////////////////// @@ -163,13 +163,13 @@ static void __setitem__(GfVec4i &self, int index, int value) { // Handles refcounting & extraction for PySequence_GetItem. static int _SequenceGetItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - return extract(boost::python::object(h)); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + return extract(pxr_boost::python::object(h)); } static bool _SequenceCheckItem(PyObject *seq, Py_ssize_t i) { - boost::python::handle<> h(PySequence_GetItem(seq, i)); - extract e((boost::python::object(h))); + pxr_boost::python::handle<> h(PySequence_GetItem(seq, i)); + extract e((pxr_boost::python::object(h))); return e.check(); } @@ -265,7 +265,7 @@ struct FromPythonTuple { FromPythonTuple() { converter::registry:: push_back(&_convertible, &_construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } private: @@ -308,10 +308,10 @@ struct FromPythonTuple { // This adds support for python's builtin pickling library // This is used by our Shake plugins which need to pickle entire classes // (including code), which we don't support in pxml. -struct PickleSuite : boost::python::pickle_suite +struct PickleSuite : pxr_boost::python::pickle_suite { - static boost::python::tuple getinitargs(const GfVec4i &v) { - return boost::python::make_tuple(v[0], v[1], v[2], v[3]); + static pxr_boost::python::tuple getinitargs(const GfVec4i &v) { + return pxr_boost::python::make_tuple(v[0], v[1], v[2], v[3]); } }; @@ -409,7 +409,7 @@ void wrapVec4i() // __itruediv__ not added by .def( self /= double() ) above, which // happens when building with python 2, but we need it to support // "from __future__ import division". This is also a workaround for a - // bug in the current version of boost::python that incorrectly wraps + // bug in the current version of pxr_boost::python that incorrectly wraps // in-place division with __idiv__ when building with python 3. cls.def("__itruediv__", __itruediv__, return_self<>{}); } diff --git a/pxr/base/js/CMakeLists.txt b/pxr/base/js/CMakeLists.txt index f5ac5fb99e..d59dc1f6da 100644 --- a/pxr/base/js/CMakeLists.txt +++ b/pxr/base/js/CMakeLists.txt @@ -5,9 +5,6 @@ pxr_library(js LIBRARIES tf - INCLUDE_DIRS - ${Boost_INCLUDE_DIRS} - PUBLIC_CLASSES json utils diff --git a/pxr/base/plug/CMakeLists.txt b/pxr/base/plug/CMakeLists.txt index ac6942516c..6d4c74ce0a 100644 --- a/pxr/base/plug/CMakeLists.txt +++ b/pxr/base/plug/CMakeLists.txt @@ -8,11 +8,9 @@ pxr_library(plug js trace work - ${Boost_PYTHON_LIBRARY} ${TBB_tbb_LIBRARY} INCLUDE_DIRS - ${Boost_INCLUDE_DIRS} ${TBB_INCLUDE_DIRS} PUBLIC_CLASSES diff --git a/pxr/base/plug/wrapNotice.cpp b/pxr/base/plug/wrapNotice.cpp index fabf8b3728..dc0593db2c 100644 --- a/pxr/base/plug/wrapNotice.cpp +++ b/pxr/base/plug/wrapNotice.cpp @@ -10,13 +10,13 @@ #include "pxr/base/tf/pyResultConversions.h" #include "pxr/base/tf/pyNoticeWrapper.h" -#include -#include - -using namespace boost::python; +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/scope.hpp" PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { TF_INSTANTIATE_NOTICE_WRAPPER(PlugNotice::Base, TfNotice); diff --git a/pxr/base/plug/wrapPlugin.cpp b/pxr/base/plug/wrapPlugin.cpp index 8d009cd26f..596a11e01b 100644 --- a/pxr/base/plug/wrapPlugin.cpp +++ b/pxr/base/plug/wrapPlugin.cpp @@ -13,16 +13,17 @@ #include "pxr/base/tf/pyResultConversions.h" #include "pxr/base/tf/iterator.h" -#include +#include "pxr/external/boost/python.hpp" #include #include -using namespace boost::python; using std::string; using std::vector; PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { static dict @@ -89,8 +90,8 @@ void wrapPlugin() ; // The call to JsConvertToContainerType in _ConvertDict creates - // vectors of boost::python::objects for array values, so register + // vectors of pxr_boost::python::objects for array values, so register // a converter that turns that vector into a Python list. - boost::python::to_python_converter, + pxr_boost::python::to_python_converter, TfPySequenceToPython > >(); } diff --git a/pxr/base/plug/wrapRegistry.cpp b/pxr/base/plug/wrapRegistry.cpp index be574cbec1..ea406a000d 100644 --- a/pxr/base/plug/wrapRegistry.cpp +++ b/pxr/base/plug/wrapRegistry.cpp @@ -15,7 +15,7 @@ #include "pxr/base/tf/stringUtils.h" #include -#include +#include "pxr/external/boost/python.hpp" #include #include @@ -27,10 +27,10 @@ using std::string; using std::vector; -using namespace boost::python; - PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { typedef TfWeakPtr PlugRegistryPtr; diff --git a/pxr/base/plug/wrapTestPlugBase.cpp b/pxr/base/plug/wrapTestPlugBase.cpp index 8d81012dca..600448266f 100644 --- a/pxr/base/plug/wrapTestPlugBase.cpp +++ b/pxr/base/plug/wrapTestPlugBase.cpp @@ -12,12 +12,12 @@ #include "pxr/base/tf/pyContainerConversions.h" #include -#include - -using namespace boost::python; +#include "pxr/external/boost/python.hpp" PXR_NAMESPACE_USING_DIRECTIVE +using namespace pxr_boost::python; + namespace { template diff --git a/pxr/base/tf/CMakeLists.txt b/pxr/base/tf/CMakeLists.txt index b236184c47..306419493d 100644 --- a/pxr/base/tf/CMakeLists.txt +++ b/pxr/base/tf/CMakeLists.txt @@ -18,7 +18,7 @@ function(add_py_dll_link_test) # early return if python target is not built! (No python build enabled or # doing a static build, etc) - if (NOT TARGET python) + if (NOT TARGET python_modules) return() endif() @@ -60,7 +60,7 @@ function(add_py_dll_link_test) add_library(testTfPyDllLinkModule SHARED "testenv/testTfPyDllLinkModule.c" ) - add_dependencies(python testTfPyDllLinkModule) + add_dependencies(python_modules testTfPyDllLinkModule) set(module_name testTfPyDllLinkModule) if (PXR_USE_DEBUG_PYTHON) @@ -114,13 +114,9 @@ pxr_library(tf LIBRARIES arch ${WINLIBS} - ${PYTHON_LIBRARIES} - ${Boost_PYTHON_LIBRARY} ${TBB_tbb_LIBRARY} INCLUDE_DIRS - ${PYTHON_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} ${TBB_INCLUDE_DIRS} PUBLIC_CLASSES @@ -128,7 +124,9 @@ pxr_library(tf anyWeakPtr atomicOfstreamWrapper bigRWMutex + bits bitUtils + compressedBits debug debugNotice denseHashMap @@ -361,7 +359,6 @@ if(PXR_ENABLE_PYTHON_SUPPORT) pxr_build_test(testTfPyFunction LIBRARIES tf - ${Boost_PYTHON_LIBRARY} CPPFILES testenv/testTfPyFunction.cpp ) @@ -383,7 +380,6 @@ if(PXR_ENABLE_PYTHON_SUPPORT) pxr_build_test(testTfPyLock LIBRARIES tf - ${Boost_PYTHON_LIBRARY} CPPFILES testenv/testTfPyLock.cpp ) @@ -391,7 +387,6 @@ if(PXR_ENABLE_PYTHON_SUPPORT) pxr_build_test(testTfPyResultConversions LIBRARIES tf - ${Boost_PYTHON_LIBRARY} CPPFILES testenv/testTfPyResultConversions.cpp ) @@ -446,7 +441,9 @@ pxr_build_test(testTf testenv/main.cpp testenv/anyUniquePtr.cpp testenv/atomicOfstreamWrapper.cpp + testenv/bits.cpp testenv/bitUtils.cpp + testenv/compressedBits.cpp testenv/debug.cpp testenv/denseHashMap.cpp testenv/delegate.cpp @@ -549,9 +546,15 @@ pxr_register_test(TfAnyUniquePtr pxr_register_test(TfAtomicOfstreamWrapper COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testTf TfAtomicOfstreamWrapper" ) +pxr_register_test(TfBits + COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testTf TfBits" +) pxr_register_test(TfBitUtils COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testTf TfBitUtils" ) +pxr_register_test(TfCompressedBits + COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testTf TfCompressedBits" +) pxr_register_test(TfDl COMMAND "${CMAKE_INSTALL_PREFIX}/tests/testTf TfDl" ) diff --git a/pxr/base/tf/anyUniquePtr.h b/pxr/base/tf/anyUniquePtr.h index 6eeaaa405c..156ebd7c6b 100644 --- a/pxr/base/tf/anyUniquePtr.h +++ b/pxr/base/tf/anyUniquePtr.h @@ -22,7 +22,7 @@ PXR_NAMESPACE_OPEN_SCOPE /// rarely accessed values. As such, its design prioritizes compile-time /// overhead over runtime performance and avoids clever metaprogramming. /// Please resist the urge to add functionality to this class (e.g. small -/// object optimization, boost::python interoperability.) +/// object optimization, pxr_boost::python interoperability.) class TfAnyUniquePtr { public: diff --git a/pxr/base/tf/anyWeakPtr.cpp b/pxr/base/tf/anyWeakPtr.cpp index 61221a4f5f..0a424e33ff 100644 --- a/pxr/base/tf/anyWeakPtr.cpp +++ b/pxr/base/tf/anyWeakPtr.cpp @@ -141,7 +141,7 @@ TfAnyWeakPtr::GetType() const } #ifdef PXR_PYTHON_SUPPORT_ENABLED -boost::python::api::object +pxr_boost::python::api::object TfAnyWeakPtr::_GetPythonObject() const { TfPyLock pyLock; diff --git a/pxr/base/tf/anyWeakPtr.h b/pxr/base/tf/anyWeakPtr.h index 7b5db192bb..40243e9b48 100644 --- a/pxr/base/tf/anyWeakPtr.h +++ b/pxr/base/tf/anyWeakPtr.h @@ -138,13 +138,13 @@ class TfAnyWeakPtr #ifdef PXR_PYTHON_SUPPORT_ENABLED // This grants friend access to a function in the wrapper file for this // class. This lets the wrapper reach down into an AnyWeakPtr to get a - // boost::python wrapped object corresponding to the held type. This + // pxr_boost::python wrapped object corresponding to the held type. This // facility is necessary to get the python API we want. - friend boost::python::api::object + friend pxr_boost::python::api::object Tf_GetPythonObjectFromAnyWeakPtr(This const &self); TF_API - boost::python::api::object _GetPythonObject() const; + pxr_boost::python::api::object _GetPythonObject() const; #endif // PXR_PYTHON_SUPPORT_ENABLED template diff --git a/pxr/base/tf/bits.cpp b/pxr/base/tf/bits.cpp new file mode 100644 index 0000000000..02001a0550 --- /dev/null +++ b/pxr/base/tf/bits.cpp @@ -0,0 +1,460 @@ +// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// + +#include "pxr/base/tf/bits.h" +#include "pxr/base/tf/tf.h" + +#include "pxr/base/arch/hash.h" + +#include +#include + +PXR_NAMESPACE_OPEN_SCOPE + +// XXX:optimization Instead of always leaving the trailing bits to zero, we +// could let the first trailing bit (we might need to allocate +// one more word due to this), be 1. This "guard bit" can +// then be used to make FindNextSet() faster. + +// Note that the 64-bit array holding the bits can have unused bits at the end. +// By definition, these bits always needs to be kept at 0. + +size_t +TfBits::_CountNumSet() const +{ + // Accumulate number of set bits in a local variable (this makes the + // compiler generate better code since it does not have to repeatedly + // read/store the contents of a member variable). + size_t numSet = 0; + + // Limit the range of words we're looking at. + size_t firstSet = GetFirstSet(); + size_t lastSet = GetLastSet(); + size_t offset = firstSet >> 6; + size_t numWords = (firstSet < _num) ? (lastSet >> 6) + 1 - offset : 0; + + uint64_t *p = _bits + offset; + + // Loop over all words in the range. + for(size_t n=numWords; n>0; n--) + { + // On both gcc and clang, with the appropriate instruction set + // enabled, bitset::count compiles down to popcntq. + numSet += std::bitset<64>(*p++).count(); + } + + return numSet; +} + +size_t +TfBits::_FindNextSet(size_t index, size_t startBit) const +{ + for(size_t w=index>>6; w<_numWords; w++) + { + if (uint64_t bits = _bits[w]) + for(size_t i=startBit; i<64; i++) + if (bits & (UINT64_C(1) << i)) + { + // Note: Since we round up the # bits to fit an + // integer # of words, we need to check if we + // read too far... + + size_t bit = i + (w << 6); + + return (bit >= _num) ? _num : bit; + } + + startBit = 0; + } + + return _num; +} + +size_t +TfBits::_FindPrevSet(size_t index, size_t startBit) const +{ + for(int w=index>>6; w>=0; w--) + { + if (uint64_t bits = _bits[w]) + for(int i=startBit; i>=0; i--) + if (bits & (UINT64_C(1) << i)) + { + // Note: Since we round up the # bits to fit an + // integer # of words, we need to check if we + // read too far... + + return i + (w << 6); + } + + startBit = 63; + } + + return _num; +} + +size_t +TfBits::_FindNextUnset(size_t index, size_t startBit) const +{ + for(size_t w=index>>6; w<_numWords; w++) + { + // Note: We're operating on the flipped bits here... + if (uint64_t bits = ~_bits[w]) + for(size_t i=startBit; i<64; i++) + if (bits & (UINT64_C(1) << i)) + { + // Note: Since we round up the # bits to fit an + // integer # of words, we need to check if we + // read too far... + + size_t bit = i + (w << 6); + + return (bit >= _num) ? _num : bit; + } + + startBit = 0; + } + + return _num; +} + +size_t +TfBits::GetHash() const +{ + size_t firstSet = GetFirstSet(); + size_t lastSet = GetLastSet(); + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + uint64_t *p0 = _bits + offset; + + if (firstSet == _num) { + return firstSet; + } + + // Use firstSet as the seed. + return ArchHash((const char *)p0, numWords * sizeof(uint64_t), firstSet); +} + +bool +TfBits::operator==(const TfBits &rhs) const +{ + // Early bail out. + if (this == &rhs) + return true; + + // If the number of bits is different, they can't be equal. + if (_num != rhs._num) + return false; + + // XXX:optimization: Faster to always cache _numSet? + + // Check if we can do an early out based on the number of set bits. + const size_t numSet = _numSet.Load(); + if (numSet != size_t(-1)) { + const size_t rhsNumSet = rhs._numSet.Load(); + if (rhsNumSet != size_t(-1)) { + + // If the number of set bits is different, they can't be equal. + if (numSet != rhsNumSet) + return false; + + // Order doesn't matter if all bits are set or cleared and the # of + // set bits is the same... + if (numSet == 0 || numSet == _num) + return true; + } + } + + // Limit the comparison to where we have bits set. + size_t firstSet = GetFirstSet(); + size_t lastSet = GetLastSet(); + size_t rhsFirstSet = rhs.GetFirstSet(); + size_t rhsLastSet = rhs.GetLastSet(); + + if (firstSet != rhsFirstSet || lastSet != rhsLastSet) + return false; + + // Are any of the sets empty? + if (firstSet == _num || rhsFirstSet == _num) + return firstSet == rhsFirstSet; + + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + // Have to compare the bits. + + // This only works because we are careful to keep the trailing bits unset. + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + if (*p0 != *p1) + return false; + + p0++; + p1++; + } + + return true; +} + +TfBits & +TfBits::operator&=(const TfBits &rhs) +{ + // Early bail out. + if (this == &rhs) + return *this; + + TF_AXIOM(_num == rhs._num); + + // Limit the bit operations to where we have bits set in the first of + // the two sets (we only remove bits). + size_t firstSet = GetFirstSet(); + size_t lastSet = GetLastSet(); + + // Nothing to do if we already have no bits set. + if (firstSet < _num) + { + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + *p0 &= *p1; + p0++; + p1++; + } + + // Note: All lhs & rhs trailing bits are 0, so no trailing bits get set. + _numSet .Store(-1); + _firstSet.Store(FindNextSet(firstSet)); + _lastSet .Store(FindPrevSet(lastSet)); + } + + return *this; +} + +TfBits & +TfBits::operator|=(const TfBits &rhs) +{ + if (TF_VERIFY(_num == rhs._num)) + _Or(rhs); + + return *this; +} + +void +TfBits::_Or(const TfBits &rhs) +{ + // Early bail out. + if (this == &rhs) + return; + + // Limit the bit operations to where we have bits set in the second of + // the two sets (we only add bits that are set in the second set). + size_t rhsFirstSet = rhs.GetFirstSet(); + + // Nothing to do if rhs has no bits set. + if (rhsFirstSet < rhs._num) + { + size_t rhsLastSet = rhs.GetLastSet(); + size_t lhsFirstSet = GetFirstSet(); + size_t lhsLastSet = GetLastSet(); + + // Compute the first and last set bits for the result (note that we + // only do the or operation on the range where we have rhs bits set). + size_t firstSet = TfMin(lhsFirstSet, rhsFirstSet); + size_t lastSet = (lhsLastSet < _num) ? + TfMax(lhsLastSet, rhsLastSet) : rhsLastSet; + + // Early out for the case that rhs is contained in lhs (note that + // we do not bother to cache _numSet, if it is not cached we simply + // skip this early check). + if (_numSet.Load() == lastSet - firstSet + 1 && + firstSet == lhsFirstSet && lastSet == lhsLastSet) + return; + + size_t offset = rhsFirstSet >> 6; + size_t numWords = (rhsLastSet >> 6) + 1 - offset; + + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + *p0 |= *p1; + p0++; + p1++; + } + + // Note: All lhs & rhs trailing bits are 0, so no trailing bits get set. + _numSet .Store(-1); + _firstSet.Store(firstSet); + _lastSet .Store(lastSet); + } +} + +void +TfBits::OrSubset(const TfBits &rhs) +{ + if (TF_VERIFY(_num >= rhs._num)) + _Or(rhs); +} + +TfBits & +TfBits::operator^=(const TfBits &rhs) +{ + TF_AXIOM(_num == rhs._num); + + // Limit the bit operations to where we have bits set in either of + // the two sets. + size_t i0 = GetFirstSet(); + size_t i1 = rhs.GetFirstSet(); + + // Nothing to do if rhs has no bits set. + if (i1 < _num) + { + size_t firstSet = TfMin(i0, i1); + size_t lastSet = (i0 < _num) ? + TfMax(GetLastSet(), rhs.GetLastSet()) : rhs.GetLastSet(); + + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + *p0 ^= *p1; + p0++; + p1++; + } + + // Note: All lhs & rhs trailing bits are 0, so no trailing bits get set. + _numSet .Store(-1); + _firstSet.Store(FindNextSet(firstSet)); + _lastSet .Store(FindPrevSet(lastSet)); + } + + return *this; +} + +TfBits & +TfBits::operator-=(const TfBits &rhs) +{ + TF_AXIOM(_num == rhs._num); + + // Limit the bit operations to where we have bits set in both of + // the two sets. + size_t lhsFirstSet = GetFirstSet(); + size_t lhsLastSet = GetLastSet(); + + size_t i0 = lhsFirstSet; + size_t i1 = rhs.GetFirstSet(); + size_t firstSet = TfMax(i0, i1); + size_t lastSet = TfMin(lhsLastSet, rhs.GetLastSet()); + + // Nothing to do if either set has not bits set, or they do not overlap. + if (i0 < _num && i1 < _num && firstSet <= lastSet) + { + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + *p0 &= ~*p1; + p0++; + p1++; + } + + // Note: All lhs & rhs trailing bits are 0, so no trailing bits get set. + _numSet .Store(-1); + _firstSet.Store(FindNextSet(lhsFirstSet)); + _lastSet .Store(FindPrevSet(lhsLastSet)); + } + + return *this; +} + +TfBits & +TfBits::Complement() +{ + uint64_t *p = _bits; + + for(size_t i=_numWords; i>0; i--) + { + *p = ~*p; + p++; + } + + // Note: Need to keep unused bits at 0. + _ClearTrailingBits(); + + const size_t numSet = _numSet.Load(); + if (numSet != size_t(-1)) + _numSet.Store(_num - numSet); + _firstSet.Store(-1); + _lastSet .Store(-1); + + return *this; +} + +void +TfBits::_ClearTrailingBits() +{ + // Find the total number of words. + if (_numWords && (_num & 63)) + { + // Mask out the bits we're not using so that they don't end up in the + // count. + size_t numUsedBitsInLastWord = 64 - ((_numWords << 6) - _num); + TF_AXIOM(numUsedBitsInLastWord > 0 && numUsedBitsInLastWord <= 63); + + // Zero out the unused bits so that they don't show up in the counts. + _bits[_numWords - 1] &= (UINT64_C(1) << numUsedBitsInLastWord) - 1; + } +} + +std::string +TfBits::GetAsStringLeftToRight() const +{ + std::string res; + + for(size_t i=0; i<_num; ++i) + res.push_back('0' + IsSet(i)); + + return res; +} + +std::string +TfBits::GetAsStringRightToLeft() const +{ + std::string res; + + for(int64_t i=_num-1; i>=0; --i) + res.push_back('0' + IsSet(i)); + + return res; +} + + +std::ostream & +operator<<(std::ostream &out, const TfBits & bits) +{ + out << bits.GetAsStringLeftToRight(); + return out; +} + +PXR_NAMESPACE_CLOSE_SCOPE diff --git a/pxr/base/tf/bits.h b/pxr/base/tf/bits.h new file mode 100644 index 0000000000..7d7cf67876 --- /dev/null +++ b/pxr/base/tf/bits.h @@ -0,0 +1,1079 @@ +// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// +#ifndef PXR_BASE_TF_BITS_H +#define PXR_BASE_TF_BITS_H + +#include "pxr/base/arch/hints.h" +#include "pxr/base/tf/api.h" +#include "pxr/base/tf/hash.h" +#include "pxr/base/tf/tf.h" +#include "pxr/base/tf/hash.h" +#include "pxr/base/tf/diagnostic.h" +#include "pxr/base/tf/iterator.h" + +#include +#include +#include +#include +#include + +PXR_NAMESPACE_OPEN_SCOPE + +/// \class TfBits +/// +/// \brief Fast bit array that keeps track of the number of bits set and +/// can find the next set in a timely manner. +/// +/// Note about thread safety in this class: +/// +/// TfBits supports only the most basic thread safety guarantee: multiple +/// threads may safely call const methods concurrently. A thread must not +/// invoke any non-const method on a TfBits object while any other thread is +/// accessing it. +/// +/// There are certain members in this class that are mutable and modified in +/// const methods. However, since they are updated without being read and +/// all threads would update them with the same values in the case of a race +/// condition, the worst thing that can happen is redundant computation. +/// +/// Successive access to these members will result in read only access +/// patterns. +/// +/// All occurances are marked. +/// +class TfBits +{ +public: + + // View and iterator modes: All bits, all set bits, all unset bits. + enum Mode { All, AllSet, AllUnset }; + + /// Hash for TfBits. + /// + /// This hash is linear in time as it considers all the bits between + /// first set and last set. If you need a faster hash, see FastHash, + /// it may be suitable for your needs. + /// + struct Hash { + size_t operator()(TfBits const &bits) const { + return bits.GetHash(); + } + }; + + /// A hash functor for TfBits that is faster than Hash. + /// + /// This hash uses the number of bits in total, the number of bits + /// set, the first set and last set to compute the hash. + /// + struct FastHash { + size_t operator()(TfBits const &bits) const { + return TfHash::Combine( + bits.GetSize(), + bits.GetFirstSet(), + bits.GetLastSet(), + bits.GetNumSet()); + } + }; + + + /// Constructs a fixed size bit array, clears all bits. + /// + explicit TfBits(size_t num=0) + { + _bits = NULL; + _numWords = 0; + Resize(num); + ClearAll(); + } + + /// Constructs a fixed size bit array, with a range of bits set. + /// + TfBits(size_t num, size_t first, size_t last) + { + _bits = NULL; + _numWords = 0; + Resize(num); + + if (num == 0) { + ClearAll(); + } else if (first == 0 && last >= (num - 1)) { + SetAll(); + } else { + ClearAll(); + for (size_t i = first; i <= last; ++i) + Set(i); + } + } + + /// Copy-constructs a fixed size bit array. + /// + TfBits(const TfBits &rhs) + { + _num = rhs._num; + _numSet .Store(rhs._numSet.Load()); + _firstSet .Store(rhs._firstSet.Load()); + _lastSet .Store(rhs._lastSet.Load()); + _numWords = rhs._numWords; + _bits = _Alloc(_numWords); + + // This loop turns out to be faster than a memcpy. + for (size_t i = 0; i < _numWords; ++i) + _bits[i] = rhs._bits[i]; + } + + /// Move constructor. + /// + TfBits(TfBits &&rhs) : TfBits(0) + { + Swap(rhs); + } + + /// Destructor + /// + ~TfBits() + { + _Free(_bits, _numWords); + } + + /// Assignment operator + /// + TfBits &operator=(const TfBits &rhs) + { + // Early bail out. + if (this == &rhs) { + return *this; + } + + // Avoid free-ing and reallocing if we have the same size + if (_numWords != rhs._numWords) { + _Free(_bits, _numWords); + _bits = _Alloc(rhs._numWords); + } + + _num = rhs._num; + _numSet .Store(rhs._numSet.Load()); + _firstSet .Store(rhs._firstSet.Load()); + _lastSet .Store(rhs._lastSet.Load()); + _numWords = rhs._numWords; + + // This loop turns out to be faster than a memcpy. + for (size_t i = 0; i < _numWords; ++i) + _bits[i] = rhs._bits[i]; + + return (*this); + } + + /// Move assignment operator. + /// + TfBits &operator=(TfBits &&rhs) + { + if (this == &rhs) + return *this; + + Swap(rhs); + + return *this; + } + + /// Resizes the bit array, however, the bits are left uninitialized. + /// So you most likely want to call ClearAll(); or SetAll();. + /// + void Resize(size_t num) + { + if (_bits && _num == num) + return; + + _Free(_bits, _numWords); + + _num = num; + _numSet .Store(-1); + _firstSet .Store(-1); + _lastSet .Store(-1); + _numWords = (num + 63) >> 6; + _bits = _Alloc(_numWords); + + // By definition, the unused, trailing bits always needs to be + // initialized to 0 and all operations can assume they are 0. + + if (_numWords) + _bits[_numWords - 1] = 0; + } + + /// Resizes the size of the bit array while keeping the content. + /// + void ResizeKeepContent(size_t num) + { + if (num == _num) + return; + + //XXX: We could try to be fancy and not re-allocate in certain cases. + TfBits temp(num); + + // Figure out how much to copy. + size_t numWordsToCopy = TfMin(temp._numWords, _numWords); + + for(size_t i=0; i= rhs.GetSize(). This is more efficient than + /// padding \p rhs to the correct size beforehand. + /// + TF_API + void OrSubset(const TfBits &rhs); + + /// Provides a fast swap. + /// + void Swap(TfBits &rhs) + { + if (this == &rhs) + return; + + std::swap(_num, rhs._num); + + // Because Swap is a mutating operation, we do not require atomic + // updates to the set-bits members. + _numSet.NonAtomicSwap(rhs._numSet); + _firstSet.NonAtomicSwap(rhs._firstSet); + _lastSet.NonAtomicSwap(rhs._lastSet); + + if (_numWords == 1 && rhs._numWords == 1) { + + // Both sides use inline storage. + // + // We can just swap the inline data. Both _bits & rhs._bits will + // already point their respective inline storage. + + std::swap(_inlineData, rhs._inlineData); + + } else if (_numWords == 1) { + + // 'this' uses inline storage; 'rhs' uses heap-allocated storage. + // + // Transfer rhs's heap-allocated data to ourself and copy our inline + // data to rhs. We leave our _inlineData unchanged as it is now + // essentially garbage. + + _bits = rhs._bits; + rhs._inlineData = _inlineData; + rhs._bits = &rhs._inlineData; + + } else if (rhs._numWords == 1) { + + // 'rhs' uses inline storage; 'this' uses heap-allocated storage. + // + // Transfer our heap-allocated data to rhs and copy rhs's inline + // data to our inline storage. We leave rhs._inlineData unchanged + // as it is now essentially garbage. + + rhs._bits = _bits; + _inlineData = rhs._inlineData; + _bits = &_inlineData; + + } else { + + // Both sides use heap-allocated storage. + // + // We can just swap the _bits pointers and ignore _inlineData. + + std::swap(_bits, rhs._bits); + + } + + // Swap _numWords only after swapping data. Otherwise, reasoning about + // whose _bits & _inlineData to update gets confusing. + std::swap(_numWords, rhs._numWords); + } + + // Swap overload for unqualified calls in generic code. + // + friend void swap(TfBits &lhs, TfBits &rhs) { + lhs.Swap(rhs); + } + + /// Clears all bits to zero. + /// + void ClearAll() + { + memset(_bits, 0x00, _numWords << 3); + _numSet.Store(0); + _firstSet.Store(_num); + _lastSet.Store(_num); + } + + /// Sets all bits to one. + /// + void SetAll() + { + memset(_bits, 0xff, _numWords << 3); + _numSet.Store(_num); + _firstSet.Store(0); + _lastSet.Store(_num > 0 ? _num-1 : 0); + + // Clear out unused bits... + _ClearTrailingBits(); + } + + /// Clears bit # index to zero. + /// + void Clear(size_t index) + { + TF_AXIOM(index < _num); + + uint64_t mask = UINT64_C(1) << (index & 63); + + if (_bits[index >> 6] & mask) + { + const size_t numSet = _numSet.Load(); + TF_AXIOM(numSet == size_t(-1) || numSet > 0); + + if (numSet != size_t(-1)) + _numSet.Decrement(); + if (index == _firstSet.Load()) + _firstSet.Store(-1); + if (index == _lastSet.Load()) + _lastSet.Store(-1); + + _bits[index >> 6] ^= mask; + } + } + + /// Sets bit # index to one. + /// + void Set(size_t index) + { + TF_AXIOM(index < _num); + + uint64_t mask = UINT64_C(1) << (index & 63); + + if (!(_bits[index >> 6] & mask)) + { + const size_t numSet = _numSet.Load(); + TF_AXIOM(numSet == size_t(-1) || numSet < _num); + + if (numSet != size_t(-1)) + _numSet.Increment(); + if (index < _firstSet.Load()) + _firstSet.Store(index); + const size_t lastSet = _lastSet.Load(); + if (index > lastSet || lastSet == _num) + _lastSet.Store(index); + + _bits[index >> 6] |= mask; + } + } + + /// Assigns val to bit # index. + /// + void Assign(size_t index, bool val) + { + if (val) + Set(index); + else + Clear(index); + } + + /// Returns true, if bit # index is set. + /// + bool IsSet(size_t index) const + { + TF_AXIOM(index < _num); + + return _bits[index >> 6] & (UINT64_C(1) << (index & 63)); + } + + /// Finds the next set bit that has a higher or equal index than index. + /// If no more set bits are found, index returns 'GetSize()'. + /// + size_t FindNextSet(size_t index) const + { + if (ARCH_UNLIKELY(index >= _num)) { + return _num; + } + + size_t startBit = index & 63; + + // Early out for bit set... + if (_bits[index >> 6] & (UINT64_C(1) << startBit)) + return index; + + return _FindNextSet(index, startBit); + } + + /// Finds the prev set bit that has a lower or equal index than index. + /// If no more set bits are found, index returns 'GetSize()'. + /// + size_t FindPrevSet(size_t index) const + { + if (ARCH_UNLIKELY(index >= _num)) { + return _num; + } + + size_t startBit = index & 63; + + // Early out for bit set... + if (_bits[index >> 6] & (UINT64_C(1) << startBit)) + return index; + + return _FindPrevSet(index, startBit); + } + + /// Finds the next unset bit that has a higher or equal index than index. + /// If no more set bits are found, index returns 'GetSize()'. + /// + size_t FindNextUnset(size_t index) const + { + if (ARCH_UNLIKELY(index >= _num)) { + return _num; + } + + size_t startBit = index & 63; + + // Early out for bit set... + if (!(_bits[index >> 6] & (UINT64_C(1) << startBit))) + return index; + + return _FindNextUnset(index, startBit); + } + + /// Returns the size of the bit array, ie. the # of bits it can hold. + /// + size_t GetSize() const + { + return _num; + } + + /// Returns \c true if this bit array is empty, i.e. it is of size zero. + /// + bool IsEmpty() const + { + return _num == 0; + } + + /// Returns the index of the first bit set in the bit array. If no bits + /// are set, the return value is 'GetSize()'. + /// + size_t GetFirstSet() const + { + // See comment at top of this file on why this is thread safe. + size_t firstSet = _firstSet.Load(); + if (firstSet == size_t(-1)) { + firstSet = FindNextSet(0); + _firstSet.Store(firstSet); + } + + return firstSet; + } + + /// Returns the index of the last bit set in the bit array. If no bits + /// are set, the return value is 'GetSize()'. + /// + size_t GetLastSet() const + { + // See comment at top of this file on why this is thread safe. + size_t lastSet = _lastSet.Load(); + if (lastSet == size_t(-1)) { + // Also works if _num is 0. + lastSet = FindPrevSet(_num-1); + _lastSet.Store(lastSet); + } + + return lastSet; + } + + /// Returns the number of bits currently set in this array. + /// + size_t GetNumSet() const + { + // See comment at top of this file on why this is thread safe. + size_t numSet = _numSet.Load(); + if (numSet == size_t(-1)) { + numSet = _CountNumSet(); + _numSet.Store(numSet); + } + + return numSet; + } + + /// Returns true, if all the bits in this bit array are set. + /// + bool AreAllSet() const + { + // Note that "not IsAnyUnset();" is not cached because FindNextUnset(0); + // isn't. Therefore we use GetNumSet() which is cached. + return GetNumSet() == GetSize(); + } + + /// Returns true, if all the bits in this bit array are unset. + /// + bool AreAllUnset() const + { + return !IsAnySet(); + } + + /// Returns true, if there is at least a single set bit. + /// + bool IsAnySet() const + { + return GetFirstSet() < GetSize(); + } + + /// Returns true, if there is at least a single unset bit. + /// + bool IsAnyUnset() const + { + return !AreAllSet(); + } + + /// Returns true if the set bits in this bit array are contiguous. + /// + /// Note: This returns false if there are no set bits. + /// + bool AreContiguouslySet() const + { + return GetNumSet() == GetLastSet() - GetFirstSet() + 1; + } + + /// Returns the amount of memory this object holds on to. + /// + size_t GetAllocatedSize() const + { + size_t memUsed = sizeof(TfBits); + + // Note that up to 64 bits are inlined, cf. _Alloc(); + if (_numWords > 1) + memUsed += _numWords << 3; + + return memUsed; + } + + /// Returns a hash for this instance. + /// + TF_API + size_t GetHash() const; + + /// Returns a string representing the bits for debugging with bits + /// ordered from left to right with increasing indices. + /// + TF_API + std::string GetAsStringLeftToRight() const; + + /// Returns a string representing the bits for debugging with bits + /// ordered from right to left with increasing indices. + /// + TF_API + std::string GetAsStringRightToLeft() const; + + /// \name Operators + /// @{ + + /// Returns true if this == \p rhs. + /// + TF_API + bool operator==(const TfBits &rhs) const; + + /// Returns true if this != \p rhs. + /// + bool operator!=(const TfBits &rhs) const + { + return !(*this == rhs); + } + + /// Ands these bits with the \p rhs bits. + /// + /// The resulting bit set is the intersection of the two bit sets. + /// + TF_API + TfBits &operator&=(const TfBits &rhs); + + /// Returns these bits and'ed with \p rhs. + /// + TfBits operator&(const TfBits &rhs) const + { + TfBits r(*this); + r &= rhs; + return r; + } + + /// Ors these bits with the \p rhs bits. + /// + /// The resulting bit set is the union of the two bit sets. + /// + TF_API + TfBits &operator|=(const TfBits &rhs); + + /// Returns these bits or'ed with \p rhs. + /// + TfBits operator|(const TfBits &rhs) const + { + TfBits r(*this); + r |= rhs; + return r; + } + + /// Xors these bits with the \p rhs bits. + /// + /// The resulting bit set is the union of the two bit sets minus the + /// intersection of the two bit sets. + /// + TF_API + TfBits &operator^=(const TfBits &rhs); + + /// Returns these bits xor'ed with \p rhs. + /// + TfBits operator^(const TfBits &rhs) const + { + TfBits r(*this); + r ^= rhs; + return r; + } + + /// Removes all bits in the \p rhs bits from these bits. + /// + /// The resulting bit set is the asymmetric set difference of + /// the two bit sets. + /// + TF_API + TfBits &operator-=(const TfBits &rhs); + + /// Flips all bits. + /// + /// The resulting bit set is the complement of this bit set. + /// + TF_API + TfBits &Complement(); + + /// Returns bit at \p index. + /// + bool operator[](size_t index) const + { + return IsSet(index); + } + + /// @} + + + /// Returns true if the result of the intersection with \p rhs would be + /// non-zero. + /// + /// This method can be used for efficiency because it doesn't perform + /// the full AND operation on a copy, and it can return early. + /// + bool HasNonEmptyIntersection(const TfBits &rhs) const + { + TF_AXIOM(_num == rhs._num); + + // Limit the bit operations to where we have bits set in both of + // the two sets. + size_t firstSet = GetFirstSet(); + size_t rhsFirstSet = rhs.GetFirstSet(); + + // Nothing to compare if either set is empty. + if (firstSet < _num && rhsFirstSet < _num) + { + firstSet = TfMax(firstSet, rhsFirstSet); + size_t lastSet = TfMin(GetLastSet(), rhs.GetLastSet()); + + if (firstSet <= lastSet) + { + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + // Have to compare the bits. + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + // Note: This assumes trailing bits in last word to be zero. + if (uint64_t word = *p0) + if (word & *p1) + return true; + p0++; + p1++; + } + } + } + + return false; + } + + /// Returns true if the result of an asymmetric set different is non-zero. + /// This is the equivalent to computing: + /// return (this - rhs).GetNumSet() != 0 + /// but avoids creating temporary copies. + /// + bool HasNonEmptyDifference(const TfBits &rhs) const + { + TF_AXIOM(_num == rhs._num); + + // Limit the bit operations to where we have bits set in the first set. + size_t firstSet = GetFirstSet(); + + // The difference is empty if the first set is empty. + if (firstSet < _num) + { + size_t lastSet = GetLastSet(); + size_t rhsFirstSet = rhs.GetFirstSet(); + size_t rhsLastSet = rhs.GetLastSet(); + + // Check for trivial non-empty difference (we know that the first + // set is not empty). + if (firstSet < rhsFirstSet || lastSet > rhsLastSet || + firstSet > rhsLastSet || lastSet < rhsFirstSet || + GetNumSet() > rhs.GetNumSet()) + return true; + + size_t offset = firstSet >> 6; + size_t numWords = (lastSet >> 6) + 1 - offset; + + // Have to compare the bits. + uint64_t *p0 = _bits + offset; + uint64_t *p1 = rhs._bits + offset; + + for(size_t n=numWords; n>0; n--) + { + // Note: This assumes trailing bits in last word to be the same. + if (uint64_t word = *p0) + if (word & ~*p1) + return true; + p0++; + p1++; + } + } + + return false; + } + + /// Returns true if this bit array contains \p rhs by computing: + /// (rhs - this).GetNumSet() == 0. + /// + /// Ie. it will return true if all bits of \p rhs are also set in this. + /// + bool Contains(const TfBits &rhs) const + { + return !rhs.HasNonEmptyDifference(*this); + } + + /// Iterator support. + /// + template + class View + { + public: + class const_iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const size_t; + using reference = const size_t &; + using pointer = const size_t *; + using difference_type = const size_t; + + const_iterator() + : _bits(NULL), _index(0) {} + + reference operator*() const { return dereference(); } + pointer operator->() const { return &(dereference()); } + + const_iterator& operator++() { + increment(); + return *this; + } + + const_iterator operator++(int) { + const_iterator r(*this); + increment(); + return r; + } + + bool operator==(const const_iterator& rhs) const { + return equal(rhs); + } + + bool operator!=(const const_iterator& rhs) const { + return !equal(rhs); + } + + private: + + friend class View; + + // Ctor. + const_iterator( + const TfBits *bits, size_t index) + : _bits(bits), _index(index) {} + + bool equal(const const_iterator &rhs) const { + return _bits == rhs._bits && _index == rhs._index; + } + + void increment() { + ++_index; + + if (mode == AllSet) + _index = _bits->FindNextSet(_index); + else if (mode == AllUnset) + _index = _bits->FindNextUnset(_index); + } + + const size_t &dereference() const { + return _index; + } + + private: + + // The bits being iterated over. + const TfBits *_bits; + + // The index. + size_t _index; + }; + + // Support for TF_FOR_ALL. + typedef const_iterator iterator; + + const_iterator begin() const { + size_t start = 0; + if (mode == AllSet) + start = _bits->GetFirstSet(); + else if (mode == AllUnset) + start = _bits->FindNextUnset(0); + + return const_iterator(_bits, start); + } + + const_iterator end() const { + return const_iterator(_bits, _bits->GetSize()); + } + + /// Return true, if the view is empty. + /// + bool IsEmpty() const { + return begin() == end(); + } + + private: + + // The TfBits can create new views. + friend class TfBits; + + // Ctor. + View(const TfBits *bits) + : _bits(bits) {} + + const TfBits *_bits; + }; + + using AllView = View; + using AllSetView = View; + using AllUnsetView = View; + + /// Returns an iteratable view for the bits that steps over all bits. + /// + AllView GetAllView() const { + return AllView(this); + } + + /// Returns an iteratable view for the bits that steps over all set bits. + /// + AllSetView GetAllSetView() const { + return AllSetView(this); + } + + /// Returns an iteratable view for the bits that steps over all unset bits. + /// + AllUnsetView GetAllUnsetView() const { + return AllUnsetView(this); + } + +// ----------------------------------------------------------------------------- + +private: + + // This counts the number of set bits. + TF_API + size_t _CountNumSet() const; + + // This is a helper method for FindNextSet so that we don't have to inline + // the whole method. This gives us the best compromise for speed and code + // size. + TF_API + size_t _FindNextSet(size_t index, size_t startBit) const; + + // This is a helper method for FindPrevSet so that we don't have to inline + // the whole method. This gives us the best compromise for speed and code + // size. + TF_API + size_t _FindPrevSet(size_t index, size_t startBit) const; + + // This is a helper method for FindNextUnset so that we don't have to inline + // the whole method. This gives us the best compromise for speed and code + // size. + TF_API + size_t _FindNextUnset(size_t index, size_t startBit) const; + + // This is a helper method that clear out unused bits in the last word of + // the bit array. + TF_API + void _ClearTrailingBits(); + + // Helper that performs the or operation on these bits where rhs must have + // same or less # of bits. + TF_API + void _Or(const TfBits &rhs); + + // Allocates the bits array with \p numWords words. + // If \p numWords is 0, NULL is returned. If \p numWords is 1, inline + // data will be used (to avoid an extra malloc). + // Returned memory must be freed with _Free(). + + uint64_t *_Alloc(size_t numWords) + { + if (!numWords) + return NULL; + + if (numWords == 1) + return &_inlineData; + + return new uint64_t[numWords]; + } + + // Frees data allocated with _Alloc(). + static void _Free(uint64_t *data, size_t numWords) + { + if (numWords > 1) + delete [] data; + } + +private: + + // # of bits in this array. + size_t _num; + + // Wrapper class for lazily-initialized size_t members. + // + // These members only require relaxed ordering and we want to avoid + // unintentionally scribbling mfence all over the place with the + // sequentially consistent std::atomic operator=(size_t). + class _RelaxedAtomicSize_t + { + public: + _RelaxedAtomicSize_t() + : _n{} + {} + + explicit _RelaxedAtomicSize_t(size_t n) + : _n{n} + {} + + void Increment() { + _n.fetch_add(1, std::memory_order_relaxed); + } + + void Decrement() { + _n.fetch_sub(1, std::memory_order_relaxed); + } + + size_t Load() const { + return _n.load(std::memory_order_relaxed); + } + + void Store(size_t n) { + _n.store(n, std::memory_order_relaxed); + } + + // Note, it's not possible to do an atomic swap of two memory + // locations. Provide a non-atomic swap operation to be used when + // no concurrent operations may be taking place. See TfBits::Swap. + void NonAtomicSwap(_RelaxedAtomicSize_t &other) { + const size_t n = _n.load(std::memory_order_relaxed); + const size_t o = other._n.load(std::memory_order_relaxed); + _n.store(o, std::memory_order_relaxed); + other._n.store(n, std::memory_order_relaxed); + } + + private: + std::atomic _n; + }; + + // See comment at top of this file on why the usage of _numSet, _firstSet + // and _lastSet is thread safe. + + // # of bits set in this array (set to size_t(-1) when invalid). + mutable _RelaxedAtomicSize_t _numSet; + + // Cached first and last set bits (set to size_t(-1) when invalid). + mutable _RelaxedAtomicSize_t _firstSet; + mutable _RelaxedAtomicSize_t _lastSet; + + // Size in uint64_t of the bits array. + size_t _numWords; + + // Pointer to the actual data. + uint64_t *_bits; + + // Data used if _num <= 64. + uint64_t _inlineData; +}; + +// Specialize this template so TfIterator knows to retain a copy when iterating. +template<> +struct Tf_ShouldIterateOverCopy< TfBits::AllView > : + std::true_type +{ +}; + +template<> +struct Tf_ShouldIterateOverCopy< TfBits::AllSetView > : + std::true_type +{ +}; + +template<> +struct Tf_ShouldIterateOverCopy< TfBits::AllUnsetView > : + std::true_type +{ +}; + +//! \brief Output a TfBits, as a stream of 0s and 1s. +// \ingroup group_tf_DebuggingOutput +TF_API std::ostream & operator<<(std::ostream &out, const TfBits & bits); + +PXR_NAMESPACE_CLOSE_SCOPE + +#endif diff --git a/pxr/base/tf/compressedBits.cpp b/pxr/base/tf/compressedBits.cpp new file mode 100644 index 0000000000..a410556559 --- /dev/null +++ b/pxr/base/tf/compressedBits.cpp @@ -0,0 +1,287 @@ +// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// + +#include "pxr/base/tf/compressedBits.h" +#include "pxr/base/tf/bits.h" + +#include "pxr/base/arch/hash.h" +#include "pxr/base/tf/stringUtils.h" + +#include + +PXR_NAMESPACE_OPEN_SCOPE + +TfCompressedBits::TfCompressedBits(const TfBits &bits) : + _num(bits.GetSize()) +{ + if (bits.GetSize() == 0) { + _runningBit = 0; + _platforms.PushBack(0); + return; + } + + bool set = bits.IsSet(0); + _runningBit = set ? 1 : 0; + + size_t i = 0; + while (i < bits.GetSize()) { + size_t next = set ? + bits.FindNextUnset(i + 1) : + bits.FindNextSet(i + 1); + + _platforms.PushBack(next - i); + + set = !set; + i = next; + } +} + +size_t +TfCompressedBits::GetHash() const +{ + if (_num == 0) { + return 0; + } + + // Hash the running bit and number of platforms. + uint64_t seed = TfHash::Combine( + _runningBit, + _platforms.GetNum()); + + // Hash all the platform data. + return ArchHash64( + (const char*)&(_platforms[0]), + _platforms.GetNum() * sizeof(_WordType), + seed); +} + +std::string +TfCompressedBits::GetAsStringLeftToRight() const +{ + std::string res; + + uint8_t bit = _runningBit; + for (size_t i = 0; i < _platforms.GetNum(); ++i) { + for (size_t j = 0; j < _platforms[i]; ++j) { + res.push_back('0' + bit); + } + bit = 1 - bit; + } + + return res; +} + +std::string +TfCompressedBits::GetAsStringRightToLeft() const +{ + std::string res; + + uint8_t bit = _runningBit; + if ((_platforms.GetNum() & 1) == 0) { + bit = 1 - bit; + } + + for (int i = _platforms.GetNum() - 1; i >= 0; --i) { + for (size_t j = 0; j < _platforms[i]; ++j) { + res.push_back('0' + bit); + } + bit = 1 - bit; + } + + return res; +} + +std::string +TfCompressedBits::GetAsRLEString() const +{ + std::string res; + + // If the length of the mask is <= 4 bits we just print them left to + // right. This makes a lot of the simple unit tests much easier to + // read. + if (_num == 0) { + return res; + } else if (_num <= 4) { + return GetAsStringLeftToRight(); + } + + uint8_t bit = _runningBit; + res = TfIntToString(bit) + "x" + TfIntToString(_platforms[0]); + bit = 1 - bit; + + for (size_t i = 1; i < _platforms.GetNum(); ++i) { + res.push_back('-'); + res += TfIntToString(bit); + res.push_back('x'); + res += TfIntToString(_platforms[i]); + bit = 1 - bit; + } + + return res; +} + +static bool +_IsWhiteSpace(const char c) +{ + // These characters are considered whitespace in the string representation + // of a compressed bitset. + return c == ' ' || c == '\n' || c == '\r' || c == '\t'; +} + +static std::vector +_TokenizeRLEString(const std::string &source) +{ + // There are two types of token delimiters, and we toggle between the two. + // We first expect a 'x' delimiter, followed by a '-' delimiter, followed + // by another 'x', and so forth. + const std::array delimiters = { 'x', '-' }; + uint32_t nextDelimiterIdx = 0; + + // The resulting tokens. A platform is comprised of two tokens, a bit value + // (either zero or one), followed by a platform length. + std::vector tokens(1, 0); + + // Iterate over the source string and build a vector of tokens for the + // platforms representing the bitset. + for (const char c : source) { + + // Digits encode the integer value of the current token. + if (c >= '0' && c <= '9') { + const uint32_t digit = c - '0'; + tokens.back() = tokens.back() * 10 + digit; + } + + // Whitespace is ignored entirely. + else if (_IsWhiteSpace(c)) { + continue; + } + + // Delimiters constitute the beginning of a new token, but the next + // expected delimiter is toggled between 'x' and '-'. + else if (c == delimiters[nextDelimiterIdx]) { + nextDelimiterIdx = (nextDelimiterIdx + 1) % delimiters.size(); + tokens.push_back(0); + } + + // Reset the resulting vector of tokens and break the loop, if an + // unexpected character is encountered. This could be a non-digit, non- + // whitespace character, or an unexpected delimiter. + else { + tokens.clear(); + break; + } + } + + return tokens; +} + +static TfCompressedBits +_FromRLETokens(const std::vector &tokens) +{ + // The number of tokens must be even, because each platform is comprised of + // two tokens: A bit value (zero or one), and a platform length. + if (tokens.size() & 1) { + return TfCompressedBits(); + } + + // Iterate over the tokens in pairs, extract the bit value and platform + // length, and append a new platform to the resulting bitset. + TfCompressedBits result; + for (size_t i = 0; i < tokens.size(); i += 2) { + const uint32_t bit = tokens[i]; + const uint32_t length = tokens[i + 1]; + + // In order to be a valid representation, the bit value must be zero or + // one, and the platform length must be greater than 0. + if (bit > 1 || length == 0) { + return TfCompressedBits(); + } + + result.Append(length, bit); + } + + return result; +} + +static TfCompressedBits +_FromBinaryRepresentation(const std::string &source) +{ + TfCompressedBits result; + + // Iterate over the string and treat it as a binary representation, i.e. a + // string of zeros and ones. + for (const char c : source) { + + // Zeros and ones will be appended to the bitset. + if (c == '0' || c == '1') { + const bool bit = c - '0'; + result.Append(1, bit); + } + + // White space will be ignored entirely. + else if (_IsWhiteSpace(c)) { + continue; + } + + // Any other character is unexpected and constitutes an invalid + // representation. + else { + return TfCompressedBits(); + } + } + + return result; +} + +TfCompressedBits +TfCompressedBits::FromString(const std::string &source) +{ + TfCompressedBits result; + + // Assume the string is a RLE representation of the bits. Let's tokenize it + // (i.e. interleaved pairs of platform value bits, and platform lengths.) + std::vector tokens = _TokenizeRLEString(source); + + // If no tokens have been found, the source string is considered an empty + // representation. + if (tokens.empty()) { + return result; + } + + // Build a compressed bitset from the RLE tokens. + result = _FromRLETokens(tokens); + + // If this returns an empty bitset, maybe the string is encoded as a binary + // representation, i.e. a string of zeros and ones. + if (result.GetSize() == 0) { + result = _FromBinaryRepresentation(source); + } + + // Return the result from the conversion, or an empty representation if + // the conversion failed, due to an invalid string representation. + return result; +} + +void +TfCompressedBits::Decompress(TfBits *bits) const +{ + bits->Resize(_num); + bits->ClearAll(); + + size_t bitIndex = 0; + bool bitValue = _runningBit == 1; + for (size_t i = 0; i < _platforms.GetNum(); ++i) { + _WordType numBits = _platforms[i]; + for (_WordType j = 0; j < numBits; ++j) { + bits->Assign(bitIndex, bitValue); + ++bitIndex; + } + bitValue = !bitValue; + } +} + +PXR_NAMESPACE_CLOSE_SCOPE diff --git a/pxr/base/tf/compressedBits.h b/pxr/base/tf/compressedBits.h new file mode 100644 index 0000000000..c16d52397e --- /dev/null +++ b/pxr/base/tf/compressedBits.h @@ -0,0 +1,1964 @@ +// +// Copyright 2024 Pixar +// +// Licensed under the terms set forth in the LICENSE.txt file available at +// https://openusd.org/license. +// +#ifndef PXR_BASE_TF_COMPRESSED_BITS_H +#define PXR_BASE_TF_COMPRESSED_BITS_H + +#include "pxr/base/arch/align.h" +#include "pxr/base/tf/api.h" +#include "pxr/base/tf/diagnostic.h" +#include "pxr/base/tf/hash.h" +#include "pxr/base/tf/iterator.h" +#include "pxr/base/tf/staticData.h" + +#include +#include +#include +#include +#include + +PXR_NAMESPACE_OPEN_SCOPE + +/// Forward Declarations +/// +class TfBits; + +/// \class TfCompressedBits +/// +/// \brief Fast, compressed bit array which is capable of performing logical +/// operations without first decompressing the internal data representation. +/// +/// The internal data compression is based on a form of RLE, where words are +/// used to indicate the number of bits set to the same value. Each subsequent +/// word denotes that the bit value has changed and a "runningBit" is set +/// internally, in order to denote the bit value for the first word. +/// +/// Internally, a bitset like this: +/// +/// 111000101000 +/// +/// Will be represented as: +/// +/// 1 331113 +/// +/// i.e., the running bit is '1', and there are 3 of those, followed by 3 +/// zeroes, followed by 1 one, followed by 1 zero, followed by 1 one, followed +/// by three zeroes. Each word is called a "platform". +/// +/// Compressed bits are very fast when used for logical operations (conjugate, +/// and, or, xor, etc.), and when iterated over. Contains and Overlaps are also +/// very fast. The representation is lightweight in memory and hence very cache +/// efficient. +/// +/// Whenever indexing, setting and resetting of seemingly random bits is a +/// requirement, however, TfBits will perform better, since finding a specific +/// bit requires a linear search. +/// +class TfCompressedBits +{ +private: + // Type of one word stored in the word array + typedef uint32_t _WordType; + + // Lightweight, re-allocating array type optimized for native, word data. + // + // Note, this is not a std::vector, because we really want a container, + // which is optimized for native types, allowing fast memcpy capabilities, + // and providing local storage optimizations. + class _WordArray + { + public: + static const uint32_t LOCAL_SIZE = 6; + + _WordArray() : + _data(_local), + _numAllocated(LOCAL_SIZE), + _num(0) {} + + _WordArray(const _WordArray &rhs) : + _data(_local), + _numAllocated(LOCAL_SIZE), + _num(rhs._num) { + _Duplicate(rhs); + } + + ~_WordArray() { + _Deallocate(); + } + + _WordArray& operator=(const _WordArray &rhs) { + if (this == &rhs) { + return *this; + } + + _Duplicate(rhs); + return *this; + } + + // Clear all words + void Clear() { + _num = 0; + } + + // Add a word (may cause re-allocation) + void PushBack(_WordType value) { + // Reallocate? + if (_num >= _numAllocated) { + _Reallocate(); + } + + // PushBack + _data[_num] = value; + ++_num; + } + + // Remove a word + void PopBack() { + --_num; + } + + // Remove multiple words + void PopBackNum(uint32_t popNum) { + _num -= popNum; + } + + // Move this representation into rhs. This representation will be + // invalid after this operation. + void MoveInto(_WordArray &rhs) { + rhs._numAllocated = _numAllocated; + rhs._num = _num; + + rhs._Deallocate(); + + // If the data is stored locally, copy it + if (_IsStoredLocally()) { + // The compiler will unroll this loop, making this faster + // than memcpy! + for (size_t i = 0; i < LOCAL_SIZE; ++i) { + rhs._data[i] = _data[i]; + } + } + + // Otherwise, just assign pointers + else { + rhs._data = _data; + + // Set our pointer back to local, so we won't deallocate the + // storage, which is now owned by rhs. + _data = _local; + _numAllocated = LOCAL_SIZE; + } + } + + // Swap two representations + void Swap(_WordArray &rhs) { + if (!_IsStoredLocally() && !rhs._IsStoredLocally()) { + std::swap(_data, rhs._data); + std::swap(_numAllocated, rhs._numAllocated); + std::swap(_num, rhs._num); + } else { + // Fall back to a copy. This could be optimized. + std::swap(*this, rhs); + } + } + + // Index operator + _WordType &operator[](size_t index) { + return _data[index]; + } + + const _WordType &operator[](size_t index) const { + return _data[index]; + } + + // Returns the number of words stored (not allocated) + uint32_t GetNum() const { + return _num; + } + + // Return the number of allocated words + uint32_t GetNumAllocated() const { + return _numAllocated; + } + + // Return a pointer to the first word + const _WordType *Begin() const { + return _data; + } + + // Return a pointer one past the end of the array. + const _WordType *End() const { + return _data + _num; + } + + // Returns the first word + _WordType &Front() { + return _data[0]; + } + + const _WordType &Front() const { + return _data[0]; + } + + // Returns the last word + _WordType &Back() { + return _data[_num - 1]; + } + + const _WordType &Back() const { + return _data[_num - 1]; + } + + private: + bool _IsStoredLocally() const { + return _data == _local; + } + + void _Deallocate() { + if (!_IsStoredLocally()) { + delete[] _data; + _data = _local; + } + } + + void _Duplicate(const _WordArray &rhs) { + if (rhs._num > 0) { + if (_numAllocated < rhs._num) { + _Deallocate(); + _data = new _WordType[rhs._numAllocated]; + _numAllocated = rhs._numAllocated; + } + + if (rhs._IsStoredLocally()) { + // The compiler will unroll this loop, making this faster + // than memcpy! + for (size_t i = 0; i < LOCAL_SIZE; ++i) { + _data[i] = rhs._data[i]; + } + } else { + memcpy(_data, rhs._data, sizeof(_WordType) * rhs._num); + } + } + + _num = rhs._num; + } + + void _Reallocate() { + _numAllocated <<= 1; + _WordType *newData = new _WordType[_numAllocated]; + memcpy(newData, _data, sizeof(_WordType) * _num); + _Deallocate(); + _data = newData; + } + + // Pointer to the data + _WordType *_data; + + // Local storage optimization + _WordType _local[LOCAL_SIZE]; + + // Number of words allocated + uint32_t _numAllocated; + + // Number of words stored + uint32_t _num; + }; + +public: + + // View and iterator modes: All bits, all set bits, all unset bits, + // platforms (iterator provides platform size and value) + enum class Mode { All, AllSet, AllUnset, Platforms }; + + /// Hash for TfCompressedBits. + /// + /// This hash is linear in time as it considers all the words between. + /// If you need a constant-time hash, see FastHash, it may be suitable for + /// your needs. + /// + struct Hash { + size_t operator()(const TfCompressedBits &bits) const { + return bits.GetHash(); + } + }; + + /// A hash functor for TfCompressedBits that is faster than Hash. + /// + /// This hash can be computed in constant time because it only uses a + /// fixed subset of data: the number of bits in total, the running bit, + /// the number of words and the first cache line of words. + /// + struct FastHash { + size_t operator()(const TfCompressedBits &bits) const { + if (bits.GetSize() == 0) { + return 0; + } + + // Hash the running bit and number of platforms. + size_t hash = TfHash::Combine( + bits.GetSize(), + bits._runningBit, + bits._platforms.GetNum()); + + // Hash a single cache line of platform data. + const uint32_t n = std::min( + bits._platforms.GetNum(), + ARCH_CACHE_LINE_SIZE / sizeof(uint32_t)); + for (uint32_t i = 0; i < n; ++i) { + hash = TfHash::Combine(hash, bits._platforms[i]); + } + + return hash; + } + }; + + /// Constructs a fixed size bit array, clears all bits. + /// + explicit TfCompressedBits(size_t num = 0) : + _num(num), + _runningBit(0) { + _platforms.PushBack(num); + } + + /// Constructs a fixed size bit array, with a range of bits set. + /// + explicit TfCompressedBits(size_t num, size_t first, size_t last) : + _num(num), + _runningBit(0) { + + // Empty bitset + if (num == 0) { + _platforms.PushBack(0); + return; + } + + // Range error (clear the whole bitset): + if (!TF_VERIFY(first < num && last < num && first <= last)) { + _platforms.PushBack(num); + return; + } + + size_t trailingZeroes = 0; + const size_t range = last - first + 1; + if (first == 0) { + _runningBit = 1; + _platforms.PushBack(range); + trailingZeroes = num - range; + } else { + _platforms.PushBack(first); + _platforms.PushBack(range); + trailingZeroes = num - last - 1; + } + + // Only push trailing zeroes, if there are any. Otherwise the + // _platforms array will be in an inconsistent state (containing + // platforms of size 0, when _num != 0). + if (trailingZeroes != 0) { + _platforms.PushBack(trailingZeroes); + } + } + + /// Copy-constructs a fixed size bit array. + /// + TfCompressedBits(const TfCompressedBits &rhs) : + _platforms(rhs._platforms), + _num(rhs._num), + _runningBit(rhs._runningBit) {} + + /// Copy-construct a fixed sized bit array, from the complement of the + /// \p rhs bitset. + /// + enum ComplementTagType { ComplementTag }; + TfCompressedBits(const TfCompressedBits &rhs, ComplementTagType) : + _platforms(rhs._platforms), + _num(rhs._num), + _runningBit(1 - rhs._runningBit) { + if (_num == 0) { + _runningBit = 0; + } + } + + /// Construct a TfCompressedBits array from a TfBits array. + /// + TF_API + explicit TfCompressedBits(const TfBits &bits); + + /// Move Constructor + /// + TfCompressedBits(TfCompressedBits &&rhs) : + _num(rhs._num), + _runningBit(rhs._runningBit) { + rhs._platforms.MoveInto(_platforms); + rhs._platforms.Clear(); + rhs._num = 0; + rhs._runningBit = 0; + } + + /// Destructor + /// + ~TfCompressedBits() {} + + /// Assignment operator + /// + TfCompressedBits &operator=(const TfCompressedBits &rhs) { + if (this == &rhs) { + return *this; + } + + _platforms = rhs._platforms; + _num = rhs._num; + _runningBit = rhs._runningBit; + + return *this; + } + + /// Move assignment operator. + /// + TfCompressedBits &operator=(TfCompressedBits &&rhs) { + if (this == &rhs) { + return *this; + } + + _num = rhs._num; + _runningBit = rhs._runningBit; + rhs._platforms.MoveInto(_platforms); + rhs._platforms.Clear(); + rhs._num = 0; + rhs._runningBit = 0; + + return *this; + } + + /// Resize the bitset, while keeping the contents, unless trimmed. + /// + void ResizeKeepContents(size_t num) { + if (_num == num) { + return; + } + + // Reduce size to 0 + if (num == 0) { + _platforms.Clear(); + _platforms.PushBack(0); + _runningBit = 0; + _num = 0; + return; + } + + // Grow + if (_num < num) { + if ((UINT32_C(1) - _runningBit) == + (_platforms.GetNum() & UINT32_C(1))) { + _platforms.Back() += (num - _num); + } else { + _platforms.PushBack(num - _num); + } + } + + // Shrink + else if (_num > num) { + uint32_t diff = _num - num; + while (_platforms.Back() <= diff) { + diff -= _platforms.Back(); + _platforms.PopBack(); + } + _platforms.Back() -= diff; + } + + _num = num; + } + + /// Provides a fast swap. + /// + void Swap(TfCompressedBits &rhs) { + std::swap(_num, rhs._num); + std::swap(_runningBit, rhs._runningBit); + _platforms.Swap(rhs._platforms); + } + + /// Clears all bits to zero. + /// + void ClearAll() { + if (_num <= 0 || (_runningBit == 0 && _platforms.GetNum() == 1)) { + return; + } + + _runningBit = 0; + _platforms.Clear(); + _platforms.PushBack(_num); + } + + /// Sets all bits to one. + /// + void SetAll() { + if (_num <= 0 || (_runningBit == 1 && _platforms.GetNum() == 1)) { + return; + } + + _runningBit = 1; + _platforms.Clear(); + _platforms.PushBack(_num); + } + + /// Clears bit # index to zero. + /// + /// Note: This is a slow operation on TfCompressedBits! + /// + void Clear(size_t index) { + if (!TF_VERIFY(index < _num)) { + return; + } + + TfCompressedBits tmp(_num, index, index); + tmp.Complement(); + *this &= tmp; + } + + /// Sets bit # index to zero. + /// + /// Note: This is a slow operation on TfCompressedBits! + /// + void Set(size_t index) { + if (!TF_VERIFY(index < _num)) { + return; + } + + TfCompressedBits tmp(_num, index, index); + *this |= tmp; + } + + /// Sets the bit within the range of first and last. + /// + /// Note: This is a slow operation on TfCompressedBits! + /// + void SetRange(size_t first, size_t last) { + // Range constructor does error checking + TfCompressedBits tmp(_num, first, last); + *this |= tmp; + } + + /// Append a number of bits with the given \p value to this bitset. + /// This also increases the size of the bitset by the number of bits added. + /// + void Append(size_t num, bool value) { + if (num == 0) { + return; + } + + if (_num == 0) { + _platforms[0] = num; + _runningBit = value; + _num = num; + return; + } + + const bool lastValue = _runningBit == (_platforms.GetNum() & 1); + if (value == lastValue) { + _platforms.Back() += num; + } else { + _platforms.PushBack(num); + } + + _num += num; + } + + /// Assigns val to bit # index. + /// + void Assign(size_t index, bool value) { + if (value) { + Set(index); + } else { + Clear(index); + } + } + + /// Shift this bitset a given number of \p bits to the right, and extend to + /// the left with zeroes. + /// + void ShiftRight(size_t bits) { + if (_num == 0 || bits == 0) { + return; + } + + // If the running bit is 0, just increment the first word (num zeroes) + if (_runningBit == 0) { + _platforms.Front() += bits; + } + + // If the running bit is 1, shift all the _platforms to the right and + // flip the running bit. Set the first platform (num zeroes) to the + // number of bits shifted. + else { + _runningBit = 0; + _platforms.PushBack(0); + for (size_t i = _platforms.GetNum() - 1; i > 0; --i) { + _platforms[i] = _platforms[i - 1]; + } + _platforms[0] = bits; + } + + // Now trim the _platforms on the right + while (_platforms.Back() <= bits) { + bits -= _platforms.Back(); + _platforms.PopBack(); + } + _platforms.Back() -= bits; + } + + /// Shift this bitset a given number of \p bits to the left, and extend the + /// right with zeroes. + /// + void ShiftLeft(size_t bits) { + if (_num == 0 || bits == 0) { + return; + } + + // How many platforms to trim on the left? + size_t trimBits = bits; + size_t platformIndex = 0; + while (platformIndex < _platforms.GetNum() && + _platforms[platformIndex] <= trimBits) { + trimBits -= _platforms[platformIndex]; + ++platformIndex; + } + + // Reduce the size of the first platform or, if the shift clears the + // whole bitset, remove all platforms. + if (platformIndex < _platforms.GetNum()) { + _platforms[platformIndex] -= trimBits; + } else { + _platforms.Clear(); + _runningBit = 0; + platformIndex = 0; + } + + // Are there any platforms to be trimmed on the left? + if (platformIndex > 0) { + // Shift the platforms to the left, by the number of + // platforms trimmed + const size_t last = _platforms.GetNum() - platformIndex; + for (size_t i = 0; i < last; ++i) { + _platforms[i] = _platforms[i + platformIndex]; + } + _platforms.PopBackNum(platformIndex); + + // Flip the running bit, if necessary + if (platformIndex & 1) { + _runningBit = 1 - _runningBit; + } + } + + // Extend on the right, by adding zeros, if the last platform + // is zeros ... + if ((UINT32_C(1) - _runningBit) == + (_platforms.GetNum() & UINT32_C(1))) { + _platforms.Back() += bits; + return; + } + + // ... or adding a new platform with zeros, if the last platform + // is ones + _platforms.PushBack(std::min<_WordType>(_num, bits)); + } + + /// Returns true, if bit # index is set. + /// + /// Note: This is a slow operation on TfCompressedBits. + /// Please, use an iterator if possible. Iterators are fast! + /// + bool IsSet(size_t index) const { + if (!TF_VERIFY(index < _num)) { + return false; + } + + size_t platformIndex, bitCount; + return _LinearSearch(index, &platformIndex, &bitCount) == 1; + } + + /// Returns the index of the n-th bit set in this bit set. + /// + /// This function counts the set bits up to the \p nth bit, and returns + /// the index of that n-th set bit. If there are less than \p nth bits set, + /// returns GetSize(). + /// + /// Note: This operation is slower than using an iterator. For forward or + /// reverse iteration, use the iterator instead. + /// + size_t FindNthSet(size_t nth) const { + size_t index = 0; + size_t count = 0; + uint8_t bit = _runningBit; + + // Iterate over all platforms to find the nth set bit using a running + // count of set bits, and an up-to-date index into the bitset. + for (size_t i = 0; i < _platforms.GetNum(); ++i) { + const _WordType platform = _platforms[i]; + + // Since bit toggles between 1 and 0 for every iteration of the + // loop, using it in a conditional guarantees a misprediction every + // time. Doing the multiplication instead is cheap and doesn't + // change the result of the conditional until we find the right + // index. + if (((count + platform) * bit) > nth) { + return index + (nth - count); + } + + index += platform; + count += (platform * bit); + bit = 1 - bit; + } + + // Less than nth bits are set, so return the size. + return _num; + } + + /// Find the next bit set that is higher or equal to index. + /// If no more set bits are found, index returns 'GetSize()'. + /// + /// Note: This is a slow operation on TfCompressedBits. + /// Please, use an iterator if possible. Iterators are fast! + /// + size_t FindNextSet(size_t index) const + { + if (index >= _num) { + return _num; + } + + size_t platformIndex, bitCount; + const uint8_t bit = _LinearSearch(index, &platformIndex, &bitCount); + + if (bit == 1) { + return index; + } + + return bitCount; + } + + /// Finds the next unset bit that has a higher or equal index than index. + /// If no more set bits are found, index returns 'GetSize()'. + /// + /// Note: This is a slow operation on TfCompressedBits. + /// Please, use an iterator if possible. Iterators are fast! + /// + size_t FindPrevSet(size_t index) const + { + if (index >= _num) { + return _num; + } + + size_t platformIndex, bitCount; + const uint8_t bit = _LinearSearch(index, &platformIndex, &bitCount); + + if (bit == 1) { + return index; + } + + const size_t first = bitCount - _platforms[platformIndex]; + if (first > 0) { + return first - 1; + } + + return _num; + } + + /// Finds the next unset bit that has a higher or equal index than index. + /// If no more set bits are found, index returns 'GetSize()'. + /// + /// Note: This is a slow operation on TfCompressedBits. + /// Please, use an iterator if possible. Iterators are fast! + /// + size_t FindNextUnset(size_t index) const + { + if (index >= _num) { + return _num; + } + + size_t platformIndex, bitCount; + const uint8_t bit = _LinearSearch(index, &platformIndex, &bitCount); + + if (bit == 0) { + return index; + } + + return bitCount; + } + + /// Count the bits set, and also return the largest gap between bits. + /// + void Count(size_t *numSet, size_t *maxGap) const { + const uint32_t lastIndex = _platforms.GetNum() - 1; + uint32_t num = 0; + uint32_t max = 0; + uint8_t bit = _runningBit; + for (size_t i = 0; i < _platforms.GetNum(); ++i) { + // Accumulate set bits. + if (bit == 1) { + num += _platforms[i]; + } + // Don't account the leading and trailing zeros as gaps. + else if (i > 0 && i < lastIndex) { + max = std::max(max, _platforms[i]); + } + bit = 1 - bit; + } + *numSet = num; + *maxGap = max; + } + + /// Returns the size of the bit array, ie. the # of bits it can hold. + /// + size_t GetSize() const { + return _num; + } + + /// Returns \c true if this bit array is empty, i.e. it is of size zero. + /// + bool IsEmpty() const { + return _num == 0; + } + + /// Returns the index of the first bit set in the bit array. If no bits + /// are set, the return value is 'GetSize()'. + /// + size_t GetFirstSet() const { + if (_num == 0 || _runningBit == 1) { + return 0; + } + + return _platforms.Front(); + } + + /// Returns the index of the last bit set in the bit array. If no bits + /// are set, the return value is 'GetSize()'. + /// + size_t GetLastSet() const { + // Zero size or all zeros case + if (_num == 0 || (_runningBit == 0 && _platforms.GetNum() == 1)) { + return _num; + } + + // If _runningBit == 1 and number of words is odd or + // _runningBit == 0 and number of words is even + if (_runningBit == (_platforms.GetNum() & 1)) { + return _num - 1; + } + + return _num - 1 - _platforms.Back(); + } + + /// Returns the number of bits currently set in this array. + /// + size_t GetNumSet() const { + size_t numSet = 0; + for (size_t i = 1 - _runningBit; i < _platforms.GetNum(); i += 2) { + numSet += _platforms[i]; + } + return numSet; + } + + /// Returns the number of platforms (zeros or ones) in this bitset. + /// + size_t GetNumPlatforms() const { + if (_num == 0) { + return 0; + } + + return _platforms.GetNum(); + } + + /// Returns the number of set (ones) platforms in this bitset. + /// + size_t GetNumSetPlatforms() const { + if (_num == 0) { + return 0; + } + + const uint32_t numP = _platforms.GetNum(); + return (numP / 2) + (numP & _runningBit); + } + + /// Returns the number of unset (zeros) platforms in this bitset. + /// + size_t GetNumUnsetPlatforms() const { + if (_num == 0) { + return 0; + } + + const uint32_t numP = _platforms.GetNum(); + return (numP / 2) + (numP & (1 - _runningBit)); + } + + /// Returns true, if all the bits in this bit array are set. + /// + bool AreAllSet() const { + return _num == 0 || (_runningBit == 1 && _platforms.GetNum() == 1); + } + + /// Returns true, if all the bits in this bit array are unset. + /// + bool AreAllUnset() const { + return !IsAnySet(); + } + + /// Returns true, if there is at least a single set bit. + /// + bool IsAnySet() const { + return _num > 0 && (_runningBit == 1 || _platforms.GetNum() > 1); + } + + /// Returns true, if there is at least a single unset bit. + /// + bool IsAnyUnset() const { + return _num > 0 && (_runningBit == 0 || _platforms.GetNum() > 1); + } + + /// Returns true if the set bits in this bit array are contiguous. + /// + /// Note: This returns false if there are no set bits. + /// + bool AreContiguouslySet() const { + const uint32_t numP = _platforms.GetNum(); + return + _num > 0 && numP <= 3 && + (numP == 2 || + (_runningBit == 1 && numP == 1) || + (_runningBit == 0 && numP == 3)); + } + + /// Returns the amount of memory this object holds on to. + /// + size_t GetAllocatedSize() const + { + size_t size = sizeof(TfCompressedBits); + if (_platforms.GetNumAllocated() > _WordArray::LOCAL_SIZE) { + size += sizeof(_WordType) * _platforms.GetNumAllocated(); + } + return size; + } + + /// Returns a hash for this instance. + /// + TF_API + size_t GetHash() const; + + /// Returns a string representing the bits for debugging with bits + /// ordered from left to right with increasing indices. + /// + TF_API + std::string GetAsStringLeftToRight() const; + + /// Returns a string representing the bits for debugging with bits + /// ordered from right to left with increasing indices. + /// + TF_API + std::string GetAsStringRightToLeft() const; + + /// Returns a string representing the bits for debugging with bits + /// represented in run-length encoding form. + /// + TF_API + std::string GetAsRLEString() const; + + /// Returns a bitset constructed from the supplied string representation. + /// + /// The string representation can be either a RLE encoded bitset, such as + /// '1x5-0x5-1x100', or a string of zeros and ones, such as '1111100000'. + /// Note that whitespace anywhere in the string representation is ignored. + /// + /// Any character other than whitespace, a digit, 'x' or '-' in the string + /// representation is considered invalid. Invalid string representations + /// will return an empty bitset. + /// An empty string representation (or a string purely comprised of + /// whitespace), however, is considered a valid representation describing + /// an empty bitset. + /// + TF_API + static TfCompressedBits FromString(const std::string &source); + + /// \name Operators + /// @{ + + /// Returns true if this == \p rhs. + /// + bool operator==(const TfCompressedBits &rhs) const { + if (this == &rhs || (_num == 0 && rhs._num == 0)) { + return true; + } + + // Fast comparisons, first + if (_num == rhs._num && + _runningBit == rhs._runningBit && + _platforms.GetNum() == rhs._platforms.GetNum()) { + + // Worst case, scenario: Must compare every word + for (size_t i = 0; i < _platforms.GetNum(); ++i) { + // Early bailout, if two words don't match + if (_platforms[i] != rhs._platforms[i]) { + return false; + } + } + + // All words match + return true; + } + + // Early comparison failed + return false; + } + + /// Returns true if this != \p rhs. + /// + bool operator!=(const TfCompressedBits &rhs) const { + return !(*this == rhs); + } + + /// Ands these bits with the \p rhs bits. + /// + /// The resulting bit set is the intersection of the two bit sets. + /// + TfCompressedBits &operator&=(const TfCompressedBits &rhs) { + if (!TF_VERIFY(_num == rhs._num) || + _num == 0 || rhs._num == 0) { + return *this; + } + + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhs._platforms.GetNum(); + const uint8_t bitA = _runningBit; + const uint8_t bitB = rhs._runningBit; + + // Early bailout: This is all zeroes or all ones + if (numA == 1) { + if (bitA == 0) { + return *this; + } + + _runningBit = bitB; + _platforms = rhs._platforms; + return *this; + } + + // Early bailout: Rhs is all zeroes or all ones + if (numB == 1) { + if (bitB == 1) { + return *this; + } + + ClearAll(); + return *this; + } + + // Early bailout: No bits will overlap, if sets are disjoint + if (_AreBoundsDisjoint(rhs)) { + ClearAll(); + return *this; + } + + return _Logical<_And>(bitB, rhs._platforms); + } + + /// Returns these bits and'ed with \p rhs. + /// + TfCompressedBits operator&(const TfCompressedBits &rhs) const { + TfCompressedBits r(*this); + r &= rhs; + return r; + } + + /// Ors these bits with the \p rhs bits. + /// + /// The resulting bit set is the union of the two bit sets. + /// + TfCompressedBits &operator|=(const TfCompressedBits &rhs) { + if (!TF_VERIFY(_num == rhs._num) || + _num == 0 || rhs._num == 0) { + return *this; + } + + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhs._platforms.GetNum(); + const uint8_t bitA = _runningBit; + const uint8_t bitB = rhs._runningBit; + + // Early bailout: This is all zeroes or all ones + if (numA == 1) { + if (bitA == 1) { + return *this; + } + + _runningBit = bitB; + _platforms = rhs._platforms; + return *this; + } + + // Early bailout: Rhs is all zeroes or all ones + if (numB == 1) { + if (bitB == 0) { + return *this; + } + + SetAll(); + return *this; + } + + // If this set already contains all the bits in rhs, there is no + // point in proceeding with the full logical OR. Note, that although + // this operation needs to look at all the platforms, it only performs + // reads from memory, which makes it faster than the logical OR. If + // this check fails, the data is already prefetched and ready to be + // consumed by the logical OR. + if (Contains(rhs)) { + return *this; + } + + return _Logical<_Or>(bitB, rhs._platforms); + } + + /// Returns these bits or'ed with the \p rhs. + /// + TfCompressedBits operator|(const TfCompressedBits &rhs) const { + TfCompressedBits r(*this); + r |= rhs; + return r; + } + + /// Xors these bits with the \p rhs bits. + /// + /// The resulting bit set is the union of the two bit sets minus the + /// intersection of the two bit sets. + /// + TfCompressedBits &operator^=(const TfCompressedBits &rhs) { + if (!TF_VERIFY(_num == rhs._num) || + _num == 0 || rhs._num == 0) { + return *this; + } + + // Early bailout: This is all zeroes + if (AreAllUnset()) { + *this = rhs; + return *this; + } + + // Early bailout: Rhs is all zeroes + if (rhs.AreAllUnset()) { + return *this; + } + + return _Logical<_Xor>(rhs._runningBit, rhs._platforms); + } + + /// Returns these bits xor'ed with \p rhs. + /// + TfCompressedBits operator^(const TfCompressedBits &rhs) const { + TfCompressedBits r(*this); + r ^= rhs; + return r; + } + + /// Removes all bits in the \p rhs bits from these bits. + /// + /// The resulting bit set is the asymmetric set difference of + /// the two bit sets. + /// + TfCompressedBits &operator-=(const TfCompressedBits &rhs) { + if (!TF_VERIFY(_num == rhs._num) || + _num == 0 || rhs._num == 0) { + return *this; + } + + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhs._platforms.GetNum(); + const uint8_t bitA = _runningBit; + const uint8_t bitB = rhs._runningBit; + + // Early bailout: This is all zeroes or all ones + if (numA == 1) { + if (bitA == 0) { + return *this; + } + + _runningBit = 1 - bitB; + _platforms = rhs._platforms; + return *this; + } + + // Early bailout: Rhs is all zeroes or all ones + if (numB == 1) { + if (bitB == 0) { + return *this; + } + + ClearAll(); + return *this; + } + + // Early bailout: No bits will be subtracted, if sets are disjoint. + // Note, that although this operation needs to look at all the + // platforms, it only performs reads from memory, which makes it faster + // than the logical AND. If this check fails, the data is already + // prefetched and ready to be consumed by the logical AND. + if (_AreBoundsDisjoint(rhs) || !HasNonEmptyIntersection(rhs)) { + return *this; + } + + return _Logical<_And>(1 - bitB, rhs._platforms); + } + + /// Returns bits with all the bits in \p rhs removed from these bits. + /// + TfCompressedBits operator-(const TfCompressedBits &rhs) const { + TfCompressedBits r(*this); + r -= rhs; + return r; + } + + /// Flips all bits. + /// + /// The resulting bit set is the complement of this bit set. + /// + TfCompressedBits &Complement() { + if (_num != 0) { + _runningBit = 1 - _runningBit; + } + return *this; + } + + /// Returns bit at \p index. + /// + /// Note: This is a slow operation on TfCompressedBits! + /// + bool operator[](size_t index) const { + return IsSet(index); + } + + /// Shifts to the right (see ShiftRight) + /// + TfCompressedBits &operator>>=(size_t bits) { + ShiftRight(bits); + return *this; + } + + /// Returns bits shifted to the right. + /// + TfCompressedBits operator>>(size_t bits) const { + TfCompressedBits r(*this); + r >>= bits; + return r; + } + + /// Shifts to the left (see ShiftLeft) + /// + TfCompressedBits &operator<<=(size_t bits) { + ShiftLeft(bits); + return *this; + } + + /// Returns bits shifted to the left. + /// + TfCompressedBits operator<<(size_t bits) const { + TfCompressedBits r(*this); + r <<= bits; + return r; + } + + /// @} + + + /// Returns true if the result of the intersection with \p rhs would be + /// non-zero. + /// + /// This method can be used for efficiency because it doesn't perform + /// the full AND operation on a copy, and it can return early. + /// + bool HasNonEmptyIntersection(const TfCompressedBits &rhs) const { + if (!TF_VERIFY(_num == rhs._num) || + _num == 0 || rhs._num == 0) { + return false; + } + + uint8_t bitA = _runningBit; + uint8_t bitB = rhs._runningBit; + if (bitA & bitB) { + return true; + } + + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhs._platforms.GetNum(); + if (numA == 1) { + if (bitA == 0) { + return false; + } + + return rhs.IsAnySet(); + } + + if (numB == 1) { + if (bitB == 0) { + return false; + } + + return IsAnySet(); + } + + // We can bail out early if the ranges of set bits do not overlap + if (_AreBoundsDisjoint(rhs)) { + return false; + } + + return _HasLogical<_And>(bitB, rhs._platforms); + } + + /// Returns true if the result of an asymmetric set different is non-zero. + /// This is the equivalent to computing: + /// return (this - rhs).GetNumSet() != 0 + /// but avoids creating temporary copies. + /// + bool HasNonEmptyDifference(const TfCompressedBits &rhs) const { + if (!TF_VERIFY(_num == rhs._num) || + _num == 0 || rhs._num == 0) { + return false; + } + + uint8_t bitA = _runningBit; + uint8_t bitB = rhs._runningBit; + if (bitA && !bitB) { + return true; + } + + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhs._platforms.GetNum(); + if (numA == 1) { + if (bitA == 0) { + return false; + } + + return rhs.IsAnyUnset(); + } + + if (numB == 1) { + if (bitB == 0) { + return IsAnySet(); + } + + return false; + } + + // We can bail out early, if the ranges of set bits do not overlap. + // Check the first set bits first, because checking for the last set + // bit is more expensive. + const size_t firstSet = GetFirstSet(); + const size_t rhsFirstSet = rhs.GetFirstSet(); + if (firstSet < rhsFirstSet) { + return true; + } + + // If we still haven't bailed out yet, check the last set bit. + const size_t lastSet = GetLastSet(); + const size_t rhsLastSet = rhs.GetLastSet(); + if (lastSet > rhsLastSet || + firstSet > rhsLastSet || + lastSet < rhsFirstSet) { + return true; + } + + return _HasLogical<_And>(1 - bitB, rhs._platforms); + } + + /// Returns true if this bit array contains \p rhs by computing: + /// (rhs - this).GetNumSet() == 0. + /// + /// Ie. it will return true if all bits of \p rhs are also set in this. + /// + bool Contains(const TfCompressedBits &rhs) const { + return !rhs.HasNonEmptyDifference(*this); + } + + /// Returns an empty TfBits. + /// + static const TfCompressedBits &GetEmpty() { + static TfStaticData emptyBits; + return *emptyBits; + } + + /// Decompress the bits into a TfBits array. + /// + TF_API + void Decompress(TfBits *bits) const; + + /// Iterator support. + /// + template + class View; + + /// Returns an iteratable view for the bits that steps over all bits. + /// + typedef View AllView; + inline AllView GetAllView() const; + + /// Returns an iteratable view for the bits that steps over all set bits. + /// + typedef View AllSetView; + inline AllSetView GetAllSetView() const; + + /// Returns an iteratable view for the bits that steps over all unset bits. + /// + typedef View AllUnsetView; + inline AllUnsetView GetAllUnsetView() const; + + /// Returns an iteratable view for the bits that steps over all platforms. + /// + typedef View PlatformsView; + inline PlatformsView GetPlatformsView() const; + +private: + // Functor for logical operation: AND + struct _And { + inline uint8_t operator() (uint8_t a, uint8_t b) { + return a & b; + } + }; + + // Functor for logical operation: OR + struct _Or { + inline uint8_t operator() (uint8_t a, uint8_t b) { + return a | b; + } + }; + + // Functor for logical operation: XOR + struct _Xor { + inline uint8_t operator() (uint8_t a, uint8_t b) { + return a ^ b; + } + }; + + // This method performs a logical operation on the passed in running bit + // and word array. OP denotes a functor implementing the logical operation. + // The idea is that the compiler will be smart enough to inline the + // operation, without actually having to call the function. + template < class OP > TfCompressedBits & + _Logical(uint8_t rhsRunningBit, const _WordArray &rhsPlatforms) { + OP op; + + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhsPlatforms.GetNum(); + uint8_t bitA = _runningBit; + uint8_t bitB = rhsRunningBit; + + uint8_t b = op(bitA, bitB); + _WordArray result; + _runningBit = b; + + size_t indexA = 0; + size_t indexB = 0; + _WordType platformA = _platforms[indexA]; + _WordType platformB = rhsPlatforms[indexB]; + + uint32_t newTotal = 0; + _WordType newPlatform = 0; + + while (true) { + if (platformA < platformB) { + newTotal += platformA; + newPlatform += platformA; + bitA = 1 - bitA; + + // Commit the new platform + const uint8_t newBit = op(bitA, bitB); + if (newBit != b) { + result.PushBack(newPlatform); + newPlatform = 0; + b = newBit; + } + + // Move on to the next platform + ++indexA; + platformB = platformB - platformA; + if (indexA == numA) { + platformA = _num - newTotal; + } else if (indexA < numA) { + platformA = _platforms[indexA]; + } + + } else if (platformA > platformB) { + newTotal += platformB; + newPlatform += platformB; + bitB = 1 - bitB; + + // Commit the new platform + const uint8_t newBit = op(bitA, bitB); + if (newBit != b) { + result.PushBack(newPlatform); + newPlatform = 0; + b = newBit; + } + + // Move on to the next platform + ++indexB; + platformA = platformA - platformB; + if (indexB == numB) { + platformB = _num - newTotal; + } else if(indexB < numB) { + platformB = rhsPlatforms[indexB]; + } + + } else { + newTotal += platformA; + newPlatform += platformA; + bitA = 1 - bitA; + bitB = 1 - bitB; + + // Commit the new platform + const uint8_t newBit = op(bitA, bitB); + if (newBit != b || newTotal >= _num) { + result.PushBack(newPlatform); + newPlatform = 0; + b = newBit; + } + + if (newTotal >= _num) + break; + + // Move on to the next platforms + ++indexA; + if (indexA == numA) { + platformA = _num - newTotal; + } else if (indexA < numA) { + platformA = _platforms[indexA]; + } + + ++indexB; + if (indexB == numB) { + platformB = _num - newTotal; + } else if (indexB < numB) { + platformB = rhsPlatforms[indexB]; + } + } + } + + result.MoveInto(_platforms); + return *this; + } + + // Performs a logical operation, but breaks out and returns true, as soon + // as the logical operation returns true. If the logical operation never + // returns true, false is returned. + template < class OP > bool + _HasLogical(uint8_t rhsRunningBit, const _WordArray &rhsPlatforms) const { + OP op; + + uint8_t bitA = _runningBit; + uint8_t bitB = rhsRunningBit; + const uint32_t numA = _platforms.GetNum(); + const uint32_t numB = rhsPlatforms.GetNum(); + + size_t indexA = 0; + size_t indexB = 0; + _WordType sumPlatformA = _platforms[indexA]; + _WordType sumPlatformB = rhsPlatforms[indexB]; + while (indexA < numA && indexB < numB) { + if (op(bitA, bitB)) { + return true; + } + + if (sumPlatformA < sumPlatformB) { + bitA = 1 - bitA; + ++indexA; + sumPlatformA += _platforms[indexA]; + + } else if (sumPlatformA > sumPlatformB) { + bitB = 1 - bitB; + ++indexB; + sumPlatformB += rhsPlatforms[indexB]; + + } else { + bitA = 1 - bitA; + bitB = 1 - bitB; + ++indexA; + ++indexB; + + if (indexA >= numA || indexB >= numB) { + return false; + } + + sumPlatformA += _platforms[indexA]; + sumPlatformB += rhsPlatforms[indexB]; + } + } + + return false; + } + + // Do a liner search for the bit index, returning its bit value. + // Also returns the index of that bit in the word array, as well as the + // bitCount denoting the number of bits counted up until the range that + // terminates the current word, the index is found in. + uint8_t _LinearSearch( + size_t index, size_t *platformIndex, size_t *bitCount) const { + uint8_t bit = _runningBit; + size_t count = 0; + size_t i; + + for (i = 0; i < _platforms.GetNum(); ++i) { + count += _platforms[i]; + if (count > index) { + break; + } + bit = 1 - bit; + } + + *platformIndex = i; + *bitCount = count; + return bit; + } + + // Returns true if this bit array's bounds are disjoint from the bounds + // of the rhs bit array. The two are considered disjoint if the last bit + // set of array A is at a lower index than the first bit set on array B + // (or vice versa). + // Note, that the bit arrays may still be disjoint, even if this method + // returns false, but if this method returns true, the bit arrays are + // guaranteed to be disjoint. This is basically a very cheap early out for + // the Overlaps() method. + bool _AreBoundsDisjoint(const TfCompressedBits &rhs) const { + return + GetLastSet() < rhs.GetFirstSet() || + GetFirstSet() > rhs.GetLastSet(); + } + + // The word array, storing the bit platforms. + _WordArray _platforms; + + // The size of this bit array in number of bits. + uint32_t _num; + + // The value of the running bit, indicating what the bit value of the first + // word is. + uint8_t _runningBit; + +}; + +template +class TfCompressedBits::View +{ +public: + class const_iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const uint32_t; + using reference = const uint32_t &; + using pointer = const uint32_t *; + using difference_type = const uint32_t; + + const_iterator() : + _bits(nullptr), + _platformIndex(0), + _bitIndex(0), + _bitCounter(0), + _value(0) + {} + + reference operator*() const { return dereference(); } + pointer operator->() const { return &(dereference()); } + + const_iterator& operator++() { + increment(); + return *this; + } + + const_iterator operator++(int) { + const_iterator r(*this); + increment(); + return r; + } + + bool operator==(const const_iterator& rhs) const { + return equal(rhs); + } + + bool operator!=(const const_iterator& rhs) const { + return !equal(rhs); + } + + bool IsSet() const { + return _value == 1; + } + + bool IsAtEnd() const { + if (!_bits) { + return true; + } + return _bitIndex >= _bits->GetSize(); + } + + private: + friend class View; + + const_iterator( + const TfCompressedBits *bits, + uint32_t platformIndex, + uint32_t bitIndex, + uint8_t value) : + _bits(bits), + _platformIndex(platformIndex), + _bitIndex(bitIndex), + _bitCounter(0), + _value(value) + {} + + bool equal(const const_iterator &rhs) const { + return _bits == rhs._bits && _bitIndex == rhs._bitIndex; + } + + void increment() { + // Note, this looks like quite a bit of logic, but mode is a + // compile time constant. The compiler to the rescue! + if (!_bits) { + return; + } + + // Increment bit index + ++_bitIndex; + + // Increment bit counter (counting the current word) + ++_bitCounter; + + // If the bit counter surpasses the current word, + // skip ahead to the next word + if (_bitCounter >= _bits->_platforms[_platformIndex]) { + + // If the iterator mode is not All, look at + // every other word + const uint32_t numP = + _bits->_platforms.GetNum(); + if ((mode == Mode::AllSet || mode == Mode::AllUnset) && + (_platformIndex + 1) < numP) { + _bitIndex += _bits->_platforms[_platformIndex + 1]; + _platformIndex += 2; + } + + // Otherwise, look at every word and toggle + // the value bit + else { + ++_platformIndex; + _value = 1 - _value; + } + + // Reset the bit counter + _bitCounter = 0; + } + } + + const uint32_t &dereference() const { + return _bitIndex; + } + + const TfCompressedBits *_bits; + uint32_t _platformIndex; + uint32_t _bitIndex; + uint32_t _bitCounter; + uint8_t _value; + }; + + // Support for TF_FOR_ALL. + typedef const_iterator iterator; + + const_iterator begin() const { + const uint8_t bit = _bits->_runningBit; + + // Skip ahead one word, if looking at AllSet/AllUnset and the + // first word describes an unset/set platform of bits + if ((mode == Mode::AllSet && bit == 0) || + (mode == Mode::AllUnset && bit == 1)) { + return const_iterator(_bits, 1, _bits->_platforms[0], 1 - bit); + } + + return const_iterator(_bits, 0, 0, bit); + } + + const_iterator end() const { + return const_iterator(_bits, 0, _bits->GetSize(), 0); + } + + /// Return true, if the view is empty. + /// + bool IsEmpty() const { + return begin() == end(); + } + +private: + + // The TfCompressedBits can create new views. + friend class TfCompressedBits; + + // Ctor. + View(const TfCompressedBits *bits) : + _bits(bits) + {} + + const TfCompressedBits *_bits; +}; + +// Specialize the platform view because its iterators are much simpler than +// the per-bit views. +template <> +class TfCompressedBits::View +{ +public: + class const_iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const uint32_t; + using reference = const uint32_t &; + using pointer = const uint32_t *; + using difference_type = const uint32_t; + + const_iterator() : + _platform(nullptr), + _bitIndex(0), + _value(0) + {} + + reference operator*() const { return dereference(); } + pointer operator->() const { return &(dereference()); } + + const_iterator& operator++() { + increment(); + return *this; + } + + const_iterator operator++(int) { + const_iterator r(*this); + increment(); + return r; + } + + bool operator==(const const_iterator& rhs) const { + return equal(rhs); + } + + bool operator!=(const const_iterator& rhs) const { + return !equal(rhs); + } + + bool IsSet() const { + return _value == 1; + } + + uint32_t GetPlatformSize() const { + return *_platform; + } + + private: + friend class View; + + const_iterator( + const uint32_t *platform, + uint8_t value) + : _platform(platform) + , _bitIndex(0) + , _value(value) + {} + + bool equal(const const_iterator &rhs) const { + return _platform == rhs._platform; + } + + void increment() { + _bitIndex += *_platform; + ++_platform; + _value = 1 - _value; + } + + const uint32_t &dereference() const { + return _bitIndex; + } + + const uint32_t *_platform; + uint32_t _bitIndex; + uint8_t _value; + }; + + const_iterator begin() const { + return const_iterator(_bits->_platforms.Begin(), _bits->_runningBit); + } + + const_iterator end() const { + return const_iterator(_bits->_platforms.End(), 0); + } + + /// Return true, if the view is empty. + /// + bool IsEmpty() const { + return begin() == end(); + } + +private: + + // The TfCompressedBits can create new views. + friend class TfCompressedBits; + + // Ctor. + View(const TfCompressedBits *bits) : + _bits(bits) + {} + + const TfCompressedBits *_bits; +}; + +TfCompressedBits::AllView +TfCompressedBits::GetAllView() const +{ + return View(this); +} + +TfCompressedBits::AllSetView +TfCompressedBits::GetAllSetView() const +{ + return View(this); +} + +TfCompressedBits::AllUnsetView +TfCompressedBits::GetAllUnsetView() const +{ + return View(this); +} + +TfCompressedBits::PlatformsView +TfCompressedBits::GetPlatformsView() const +{ + return View(this); +} + +// Specializing, so TfIterator knows to retain a copy when iterating. +template<> +struct Tf_ShouldIterateOverCopy : + std::true_type +{}; + +template<> +struct Tf_ShouldIterateOverCopy : + std::true_type +{}; + +template<> +struct Tf_ShouldIterateOverCopy : + std::true_type +{}; + +//! \brief Output a TfBits, as a stream of 0s and 1s. +// \ingroup group_tf_DebuggingOutput +inline std::ostream& +operator<<(std::ostream &out, const TfCompressedBits &bits) { + out << bits.GetAsStringLeftToRight(); + return out; +} + +PXR_NAMESPACE_CLOSE_SCOPE + +#endif diff --git a/pxr/base/tf/errorOverview.dox b/pxr/base/tf/errorOverview.dox index b4ef713e5e..89f2bc1ccc 100644 --- a/pxr/base/tf/errorOverview.dox +++ b/pxr/base/tf/errorOverview.dox @@ -192,10 +192,10 @@ under a scope named Error. // wrapOofError.cpp #include "pxr/base/tf/pyEnum.h" -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/scope.hpp" -using namespace boost::python; +using namespace pxr_boost::python; void wrapOofError() { typedef OofError This; diff --git a/pxr/base/tf/fileUtils.cpp b/pxr/base/tf/fileUtils.cpp index d4b10e4c2a..9f8282bed9 100644 --- a/pxr/base/tf/fileUtils.cpp +++ b/pxr/base/tf/fileUtils.cpp @@ -66,8 +66,8 @@ Tf_HasAttribute( if (path.back() == '/' || path.back() == '\\') resolveSymlinks = true; - const DWORD attribs = - GetFileAttributesW(ArchWindowsUtf8ToUtf16(path).c_str()); + std::wstring pathW = ArchWindowsUtf8ToUtf16(path); + DWORD attribs = GetFileAttributesW(pathW.c_str()); if (attribs == INVALID_FILE_ATTRIBUTES) { if (attribute == 0 && (GetLastError() == ERROR_FILE_NOT_FOUND || @@ -77,6 +77,30 @@ Tf_HasAttribute( } return false; } + + // Ignore reparse points on network volumes. They can't be resolved + // properly, so simply remove the reparse point attribute and treat + // it like a regular file/directory. + if ((attribs & FILE_ATTRIBUTE_REPARSE_POINT) != 0) { + // Calling PathIsNetworkPath sometimes sets the "last error" to an + // error code indicating an incomplete overlapped I/O function. We + // want to ignore this error. + DWORD olderr = GetLastError(); + if (PathIsNetworkPathW(pathW.c_str())) { + attribs &= ~FILE_ATTRIBUTE_REPARSE_POINT; + } + SetLastError(olderr); + } + + // Because we remove the REPARSE_POINT attribute above for reparse points + // on network shares, the behavior of this bit of code will be slightly + // different than for reparse points on non-network volumes. We will not + // try to follow the link and get the attributes of the destination. This + // will result in a link to an invalid destination directory claiming that + // the directory exists. It might be possible to use some other function + // to test for the existence of the destination directory in this case + // (such as FindFirstFile), but doing this doesn't seem to be relevent + // to how USD uses this method. if (!resolveSymlinks || (attribs & FILE_ATTRIBUTE_REPARSE_POINT) == 0) { return attribute == 0 || (attribs & attribute) == expected; } diff --git a/pxr/base/tf/fileUtils.h b/pxr/base/tf/fileUtils.h index ca6baded45..243a56ffca 100644 --- a/pxr/base/tf/fileUtils.h +++ b/pxr/base/tf/fileUtils.h @@ -25,6 +25,11 @@ PXR_NAMESPACE_OPEN_SCOPE /// If \p resolveSymlinks is false (default), the path is checked using /// lstat(). if \p resolveSymlinks is true, the path is checked using stat(), /// which resolves all symbolic links in the path. +/// +/// On Windows, if the path points to a reparse point symlink on a network +/// share, even if resolveSymlinks is true we are unable to follow the +/// symlink, and so will return true even if the destination of the symlink +/// does not exist. TF_API bool TfPathExists(std::string const& path, bool resolveSymlinks = false); @@ -33,6 +38,11 @@ bool TfPathExists(std::string const& path, bool resolveSymlinks = false); /// If \p resolveSymlinks is false (default), the path is checked using /// lstat(). if \p resolveSymlinks is true, the path is checked using stat(), /// which resolves all symbolic links in the path. +/// +/// On Windows, if the path points to a reparse point symlink on a network +/// share, even if resolveSymlinks is true we are unable to follow the +/// symlink, and so will return true even if the destination of the symlink +/// does not exist. TF_API bool TfIsDir(std::string const& path, bool resolveSymlinks = false); @@ -41,10 +51,21 @@ bool TfIsDir(std::string const& path, bool resolveSymlinks = false); /// If \p resolveSymlinks is false (default), the path is checked using /// lstat(). if \p resolveSymlinks is true, the path is checked using stat(), /// which resolves all symbolic links in the path. +/// +/// On Windows, if the path points to a reparse point symlink on a network +/// share, even if resolveSymlinks is true we are unable to follow the +/// symlink, and so will return true even if the destination of the symlink +/// does not exist. TF_API bool TfIsFile(std::string const& path, bool resolveSymlinks = false); /// Returns true if the path exists and is a symbolic link. +/// +/// On Windows, if the path points to a reparse point symlink on a network +/// share, we are unable to follow the symlink, so we will return false for +/// this directory even though it is a link. This is because any attempt to +/// actually follow this link will fail, so it is safer to pretend it is not +/// actually a link. TF_API bool TfIsLink(std::string const& path); diff --git a/pxr/base/tf/makePyConstructor.h b/pxr/base/tf/makePyConstructor.h index fe3a12ee48..5707f9efc6 100644 --- a/pxr/base/tf/makePyConstructor.h +++ b/pxr/base/tf/makePyConstructor.h @@ -30,14 +30,14 @@ #include "pxr/base/arch/demangle.h" -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/def_visitor.hpp" +#include "pxr/external/boost/python/dict.hpp" +#include "pxr/external/boost/python/errors.hpp" +#include "pxr/external/boost/python/list.hpp" +#include "pxr/external/boost/python/object/iterator.hpp" +#include "pxr/external/boost/python/raw_function.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/type_id.hpp" #include #include @@ -65,26 +65,26 @@ PXR_NAMESPACE_OPEN_SCOPE // TfMakePyConstructorWithVarArgs may be used to wrap an object so that it // may be constructed with a variable number of positional and keyword // arguments. The last two arguments of the function being wrapped must -// be a boost::python::tuple and dict. These will contain the remaining +// be a pxr_boost::python::tuple and dict. These will contain the remaining // positional and keyword args after required arguments are parsed. // // Example usage: // // static MyObjectRefPtr MyObjectFactory( // int formalArg1, const std::string& formalArg2, -// const boost::python::tuple& args, const boost::python::dict& kwargs); +// const pxr_boost::python::tuple& args, const pxr_boost::python::dict& kwargs); // // class_("MyClass", no_init) // .def(TfPyRefAndWeakPtr()) // .def(TfMakePyConstructorWithVarArgs(MyObjectFactory)) // .def(...) // -// NOTE: The current implementation does not handle boost::python::arg for +// NOTE: The current implementation does not handle pxr_boost::python::arg for // specifying keywords for required arguments. namespace Tf_MakePyConstructor { -namespace bp = boost::python; +namespace bp = pxr_boost::python; template struct InitVisitor : bp::def_visitor > { @@ -258,8 +258,8 @@ struct _RefPtrFactoryConverter { // Required for boost.python signature generator, in play when // BOOST_PYTHON_NO_PY_SIGNATURES is undefined. PyTypeObject const *get_pytype() const { - return boost::python::objects::registered_class_object( - boost::python::type_id()).get(); + return pxr_boost::python::objects::registered_class_object( + pxr_boost::python::type_id()).get(); } }; @@ -368,7 +368,7 @@ struct TfPyRefPtrFactory : public Tf_MakePyConstructor::RefPtrFactory {}; template struct Tf_PySequenceToListConverterRefPtrFactory; -/// A \c boost::python result converter generator which converts standard +/// A \c pxr_boost::python result converter generator which converts standard /// library sequences to lists of python owned objects. struct TfPySequenceToListRefPtrFactory { template @@ -385,17 +385,17 @@ struct Tf_PySequenceToListConverterRefPtrFactory { return true; } PyObject *operator()(T seq) const { - using namespace boost::python; + using namespace pxr_boost::python; typedef typename Tf_MakePyConstructor::RefPtrFactory<>:: apply::type RefPtrFactory; - boost::python::list l; + pxr_boost::python::list l; for (typename SeqType::const_iterator i = seq.begin(); i != seq.end(); ++i) { l.append(object(handle<>(RefPtrFactory()(*i)))); } - return boost::python::incref(l.ptr()); + return pxr_boost::python::incref(l.ptr()); } // Required for boost.python signature generator, in play when // BOOST_PYTHON_NO_PY_SIGNATURES is undefined. diff --git a/pxr/base/tf/moduleDeps.cpp b/pxr/base/tf/moduleDeps.cpp index 8ce3addbd2..d750239c0a 100644 --- a/pxr/base/tf/moduleDeps.cpp +++ b/pxr/base/tf/moduleDeps.cpp @@ -18,7 +18,8 @@ PXR_NAMESPACE_OPEN_SCOPE TF_REGISTRY_FUNCTION(TfScriptModuleLoader) { // List of direct dependencies for this library. const std::vector reqs = { - TfToken("arch") + TfToken("arch"), + TfToken("python") }; TfScriptModuleLoader::GetInstance(). RegisterLibrary(TfToken("tf"), TfToken("pxr.Tf"), reqs); diff --git a/pxr/base/tf/pxrTslRobinMap/robin_growth_policy.h b/pxr/base/tf/pxrTslRobinMap/robin_growth_policy.h index bb3310050b..9745f74086 100644 --- a/pxr/base/tf/pxrTslRobinMap/robin_growth_policy.h +++ b/pxr/base/tf/pxrTslRobinMap/robin_growth_policy.h @@ -38,6 +38,16 @@ // Pixar modification, modify namespace for isolation. #include "pxr/pxr.h" +// A change of the major version indicates an API and/or ABI break (change of +// in-memory layout of the data structure) +#define PXR_TSL_RH_VERSION_MAJOR 1 +// A change of the minor version indicates the addition of a feature without +// impact on the API/ABI +#define PXR_TSL_RH_VERSION_MINOR 3 +// A change of the patch version indicates a bugfix without additional +// functionality +#define PXR_TSL_RH_VERSION_PATCH 0 + #ifdef PXR_TSL_DEBUG #define pxr_tsl_rh_assert(expr) assert(expr) #else @@ -54,15 +64,15 @@ #define PXR_TSL_RH_THROW_OR_TERMINATE(ex, msg) throw ex(msg) #else #define PXR_TSL_RH_NO_EXCEPTIONS -#ifdef NDEBUG -#define PXR_TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate() -#else +#ifdef PXR_TSL_DEBUG #include #define PXR_TSL_RH_THROW_OR_TERMINATE(ex, msg) \ do { \ std::cerr << msg << std::endl; \ std::terminate(); \ } while (0) +#else +#define PXR_TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate() #endif #endif diff --git a/pxr/base/tf/pxrTslRobinMap/robin_hash.h b/pxr/base/tf/pxrTslRobinMap/robin_hash.h index 4f1b657091..6ff3e4fbae 100644 --- a/pxr/base/tf/pxrTslRobinMap/robin_hash.h +++ b/pxr/base/tf/pxrTslRobinMap/robin_hash.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -91,6 +92,8 @@ static T numeric_cast(U value, PXR_TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message); } + PXR_TSL_RH_UNUSED(error_message); + return ret; } @@ -200,6 +203,7 @@ class bucket_entry : public bucket_entry_hash { value_type(other.value()); m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; } + pxr_tsl_rh_assert(empty() == other.empty()); } /** @@ -217,6 +221,7 @@ class bucket_entry : public bucket_entry_hash { value_type(std::move(other.value())); m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; } + pxr_tsl_rh_assert(empty() == other.empty()); } bucket_entry& operator=(const bucket_entry& other) noexcept( @@ -254,12 +259,22 @@ class bucket_entry : public bucket_entry_hash { value_type& value() noexcept { pxr_tsl_rh_assert(!empty()); +#if defined(__cplusplus) && __cplusplus >= 201703L + return *std::launder( + reinterpret_cast(std::addressof(m_value))); +#else return *reinterpret_cast(std::addressof(m_value)); +#endif } const value_type& value() const noexcept { pxr_tsl_rh_assert(!empty()); +#if defined(__cplusplus) && __cplusplus >= 201703L + return *std::launder( + reinterpret_cast(std::addressof(m_value))); +#else return *reinterpret_cast(std::addressof(m_value)); +#endif } distance_type dist_from_ideal_bucket() const noexcept { @@ -288,6 +303,7 @@ class bucket_entry : public bucket_entry_hash { void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket, truncated_hash_type& hash, value_type& value) { pxr_tsl_rh_assert(!empty()); + pxr_tsl_rh_assert(dist_from_ideal_bucket > m_dist_from_ideal_bucket); using std::swap; swap(value, this->value()); @@ -315,19 +331,16 @@ class bucket_entry : public bucket_entry_hash { public: static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1; - static const distance_type DIST_FROM_IDEAL_BUCKET_LIMIT = 4096; + static const distance_type DIST_FROM_IDEAL_BUCKET_LIMIT = 8192; static_assert(DIST_FROM_IDEAL_BUCKET_LIMIT <= std::numeric_limits::max() - 1, "DIST_FROM_IDEAL_BUCKET_LIMIT must be <= " "std::numeric_limits::max() - 1."); private: - using storage = typename std::aligned_storage::type; - distance_type m_dist_from_ideal_bucket; bool m_last_bucket; - storage m_value; + alignas(value_type) unsigned char m_value[sizeof(value_type)]; }; /** @@ -415,9 +428,9 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { PXR_TSL_RH_UNUSED(bucket_count); return true; } else if (STORE_HASH && is_power_of_two_policy::value) { - pxr_tsl_rh_assert(bucket_count > 0); - return (bucket_count - 1) <= - std::numeric_limits::max(); + return bucket_count == 0 || + (bucket_count - 1) <= + std::numeric_limits::max(); } else { PXR_TSL_RH_UNUSED(bucket_count); return false; @@ -541,23 +554,18 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { : Hash(hash), KeyEqual(equal), GrowthPolicy(bucket_count), - m_buckets_data( - [&]() { - if (bucket_count > max_bucket_count()) { - PXR_TSL_RH_THROW_OR_TERMINATE( - std::length_error, - "The map exceeds its maximum bucket count."); - } - - return bucket_count; - }(), - alloc), + m_buckets_data(bucket_count, alloc), m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() : m_buckets_data.data()), m_bucket_count(bucket_count), m_nb_elements(0), m_grow_on_next_insert(false), m_try_shrink_on_next_insert(false) { + if (bucket_count > max_bucket_count()) { + PXR_TSL_RH_THROW_OR_TERMINATE(std::length_error, + "The map exceeds its maximum bucket count."); + } + if (m_bucket_count > 0) { pxr_tsl_rh_assert(!m_buckets_data.empty()); m_buckets_data.back().set_as_last_bucket(); @@ -669,7 +677,7 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { robin_hash& operator=(robin_hash&& other) { other.swap(*this); - other.clear(); + other.clear_and_shrink(); return *this; } @@ -817,6 +825,10 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { return try_emplace(std::forward(key), std::forward(args)...).first; } + void erase_fast(iterator pos) { + erase_from_bucket(pos); + } + /** * Here to avoid `template size_type erase(const K& key)` being used * when we use an `iterator` instead of a `const_iterator`. @@ -833,8 +845,6 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { ++pos; } - m_try_shrink_on_next_insert = true; - return pos; } @@ -913,8 +923,6 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { auto it = find(key, hash); if (it != end()) { erase_from_bucket(it); - m_try_shrink_on_next_insert = true; - return 1; } else { return 0; @@ -1078,11 +1086,12 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { m_max_load_factor = clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR), float(MAXIMUM_MAX_LOAD_FACTOR)); m_load_threshold = size_type(float(bucket_count()) * m_max_load_factor); + pxr_tsl_rh_assert(bucket_count() == 0 || m_load_threshold < bucket_count()); } void rehash(size_type count_) { count_ = std::max(count_, - size_type(std::ceil(float(size()) / max_load_factor()))); + size_type(std::ceil(float(size()) / max_load_factor()))); rehash_impl(count_); } @@ -1207,6 +1216,7 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { previous_ibucket = ibucket; ibucket = next_bucket(ibucket); } + m_try_shrink_on_next_insert = true; } template @@ -1229,7 +1239,7 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { dist_from_ideal_bucket++; } - if (rehash_on_extreme_load()) { + while (rehash_on_extreme_load(dist_from_ideal_bucket)) { ibucket = bucket_for_hash(hash); dist_from_ideal_bucket = 0; @@ -1281,6 +1291,8 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { void insert_value_impl(std::size_t ibucket, distance_type dist_from_ideal_bucket, truncated_hash_type hash, value_type& value) { + pxr_tsl_rh_assert(dist_from_ideal_bucket > + m_buckets[ibucket].dist_from_ideal_bucket()); m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); ibucket = next_bucket(ibucket); @@ -1289,7 +1301,7 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { while (!m_buckets[ibucket].empty()) { if (dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { - if (dist_from_ideal_bucket >= + if (dist_from_ideal_bucket > bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT) { /** * The number of probes is really high, rehash the map on the next @@ -1314,6 +1326,7 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { robin_hash new_table(count_, static_cast(*this), static_cast(*this), get_allocator(), m_min_load_factor, m_max_load_factor); + pxr_tsl_rh_assert(size() <= new_table.m_load_threshold); const bool use_stored_hash = USE_STORED_HASH_ON_REHASH(new_table.bucket_count()); @@ -1374,8 +1387,11 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { * * Return true if the table has been rehashed. */ - bool rehash_on_extreme_load() { - if (m_grow_on_next_insert || size() >= m_load_threshold) { + bool rehash_on_extreme_load(distance_type curr_dist_from_ideal_bucket) { + if (m_grow_on_next_insert || + curr_dist_from_ideal_bucket > + bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT || + size() >= m_load_threshold) { rehash_impl(GrowthPolicy::next_bucket_count()); m_grow_on_next_insert = false; @@ -1581,6 +1597,7 @@ class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { */ bucket_entry* static_empty_bucket_ptr() noexcept { static bucket_entry empty_bucket(true); + pxr_tsl_rh_assert(empty_bucket.empty()); return &empty_bucket; } diff --git a/pxr/base/tf/pxrTslRobinMap/robin_map.h b/pxr/base/tf/pxrTslRobinMap/robin_map.h index c0e380ce08..39c3a108b4 100644 --- a/pxr/base/tf/pxrTslRobinMap/robin_map.h +++ b/pxr/base/tf/pxrTslRobinMap/robin_map.h @@ -102,8 +102,8 @@ class robin_map { public: using key_type = Key; - const key_type& operator()(const std::pair& key_value) const - noexcept { + const key_type& operator()( + const std::pair& key_value) const noexcept { return key_value.first; } @@ -116,8 +116,8 @@ class robin_map { public: using value_type = T; - const value_type& operator()(const std::pair& key_value) const - noexcept { + const value_type& operator()( + const std::pair& key_value) const noexcept { return key_value.second; } @@ -344,6 +344,14 @@ class robin_map { } size_type erase(const key_type& key) { return m_ht.erase(key); } + /** + * Erase the element at position 'pos'. In contrast to the regular erase() + * function, erase_fast() does not return an iterator. This allows it to be + * faster especially in hash tables with a low load factor, where finding the + * next nonempty bucket would be costly. + */ + void erase_fast(iterator pos) { return m_ht.erase_fast(pos); } + /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Useful to speed-up diff --git a/pxr/base/tf/pxrTslRobinMap/robin_set.h b/pxr/base/tf/pxrTslRobinMap/robin_set.h index 92fd376042..5246508846 100644 --- a/pxr/base/tf/pxrTslRobinMap/robin_set.h +++ b/pxr/base/tf/pxrTslRobinMap/robin_set.h @@ -268,6 +268,14 @@ class robin_set { } size_type erase(const key_type& key) { return m_ht.erase(key); } + /** + * Erase the element at position 'pos'. In contrast to the regular erase() + * function, erase_fast() does not return an iterator. This allows it to be + * faster especially in hash sets with a low load factor, where finding the + * next nonempty bucket would be costly. + */ + void erase_fast(iterator pos) { return m_ht.erase_fast(pos); } + /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Useful to speed-up diff --git a/pxr/base/tf/pyAnnotatedBoolResult.h b/pxr/base/tf/pyAnnotatedBoolResult.h index 6d87485338..4a3277fcef 100644 --- a/pxr/base/tf/pyAnnotatedBoolResult.h +++ b/pxr/base/tf/pyAnnotatedBoolResult.h @@ -12,9 +12,9 @@ #include "pxr/base/tf/pyLock.h" #include "pxr/base/tf/pyUtils.h" -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/return_by_value.hpp" #include @@ -59,10 +59,10 @@ struct TfPyAnnotatedBoolResult } template - static boost::python::class_ + static pxr_boost::python::class_ Wrap(char const *name, char const *annotationName) { typedef TfPyAnnotatedBoolResult This; - using namespace boost::python; + using namespace pxr_boost::python; TfPyLock lock; return class_(name, init()) .def("__bool__", &Derived::GetValue) @@ -102,19 +102,19 @@ struct TfPyAnnotatedBoolResult } template - static boost::python::object _GetItem(const Derived& x, int i) + static pxr_boost::python::object _GetItem(const Derived& x, int i) { if (i == 0) { - return boost::python::object(x._val); + return pxr_boost::python::object(x._val); } if (i == 1) { - return boost::python::object(x._annotation); + return pxr_boost::python::object(x._annotation); } PyErr_SetString(PyExc_IndexError, "Index must be 0 or 1."); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); - return boost::python::object(); + return pxr_boost::python::object(); } private: diff --git a/pxr/base/tf/pyArg.cpp b/pxr/base/tf/pyArg.cpp index 49505939ec..d12c260645 100644 --- a/pxr/base/tf/pyArg.cpp +++ b/pxr/base/tf/pyArg.cpp @@ -11,18 +11,18 @@ #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/stringUtils.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/list.hpp" +#include "pxr/external/boost/python/slice.hpp" +#include "pxr/external/boost/python/stl_iterator.hpp" using std::string; using std::vector; -using namespace boost::python; - PXR_NAMESPACE_OPEN_SCOPE +using namespace pxr_boost::python; + static bool _ArgumentIsNamed(const std::string& name, const TfPyArg& arg) { diff --git a/pxr/base/tf/pyArg.h b/pxr/base/tf/pyArg.h index 65c12c8562..a4673d3084 100644 --- a/pxr/base/tf/pyArg.h +++ b/pxr/base/tf/pyArg.h @@ -10,8 +10,8 @@ #include "pxr/pxr.h" #include "pxr/base/tf/api.h" -#include -#include +#include "pxr/external/boost/python/dict.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include #include @@ -21,7 +21,7 @@ PXR_NAMESPACE_OPEN_SCOPE /// /// Class representing a function argument. /// -/// This is similar to \c boost::python::arg, except it's not opaque and +/// This is similar to \c pxr_boost::python::arg, except it's not opaque and /// provides more fields for documentation purposes. class TfPyArg { @@ -67,10 +67,10 @@ typedef std::vector TfPyArgs; /// arguments will cause a Python TypeError to be emitted. Otherwise, /// unmatched arguments will be added to the returned tuple or dict. TF_API -std::pair +std::pair TfPyProcessOptionalArgs( - const boost::python::tuple& args, - const boost::python::dict& kwargs, + const pxr_boost::python::tuple& args, + const pxr_boost::python::dict& kwargs, const TfPyArgs& expectedArgs, bool allowExtraArgs = false); diff --git a/pxr/base/tf/pyCall.h b/pxr/base/tf/pyCall.h index 335c52f398..afced52226 100644 --- a/pxr/base/tf/pyCall.h +++ b/pxr/base/tf/pyCall.h @@ -19,7 +19,7 @@ #include "pxr/base/tf/pyLock.h" #include "pxr/base/tf/pyObjWrapper.h" -#include +#include "pxr/external/boost/python/call.hpp" PXR_NAMESPACE_OPEN_SCOPE @@ -33,13 +33,13 @@ PXR_NAMESPACE_OPEN_SCOPE /// \endcode /// Generally speaking, TfPyCall instances may be copied, assigned, destroyed, /// and invoked without the client holding the GIL. However, if the \a Return -/// template parameter is a \a boost::python::object (or a derived class, such +/// template parameter is a \a pxr_boost::python::object (or a derived class, such /// as list or tuple) then the client must hold the GIL in order to invoke the /// call operator. template struct TfPyCall { /// Construct with callable \a c. Constructing with a \c - /// boost::python::object works, since those implicitly convert to \c + /// pxr_boost::python::object works, since those implicitly convert to \c /// TfPyObjWrapper, however in that case the GIL must be held by the caller. explicit TfPyCall(TfPyObjWrapper const &c) : _callable(c) {} @@ -59,9 +59,9 @@ TfPyCall::operator()(Args... args) // Do *not* call through if there's an active python exception. if (!PyErr_Occurred()) { try { - return boost::python::call + return pxr_boost::python::call (_callable.ptr(), args...); - } catch (boost::python::error_already_set const &) { + } catch (pxr_boost::python::error_already_set const &) { // Convert any exception to TF_ERRORs. TfPyConvertPythonExceptionToTfErrors(); PyErr_Clear(); diff --git a/pxr/base/tf/pyClassMethod.h b/pxr/base/tf/pyClassMethod.h index c2abe48a19..395ec6fa06 100644 --- a/pxr/base/tf/pyClassMethod.h +++ b/pxr/base/tf/pyClassMethod.h @@ -9,20 +9,20 @@ #include "pxr/pxr.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/dict.hpp" +#include "pxr/external/boost/python/object.hpp" +#include "pxr/external/boost/python/def_visitor.hpp" PXR_NAMESPACE_OPEN_SCOPE namespace Tf_PyClassMethod { -using namespace boost::python; +using namespace pxr_boost::python; // Visitor for wrapping functions as Python class methods. // See typedef below for docs. -// This is very similar to the staticmethod() method on boost::python::class, +// This is very similar to the staticmethod() method on pxr_boost::python::class, // except it uses PyClassMethod_New() instead of PyStaticMethod_New(). struct _TfPyClassMethod : def_visitor<_TfPyClassMethod> { @@ -70,7 +70,7 @@ struct _TfPyClassMethod : def_visitor<_TfPyClassMethod> /// classmethod()-wrapped one. /// /// \code -/// void Foo( boost::python::object & pyClassObject ) { /* ... */ } +/// void Foo( pxr_boost::python::object & pyClassObject ) { /* ... */ } /// /// class_<...>(...) /// .def("Foo", &Foo) diff --git a/pxr/base/tf/pyContainerConversions.h b/pxr/base/tf/pyContainerConversions.h index 363a8b1a56..10f74437d0 100644 --- a/pxr/base/tf/pyContainerConversions.h +++ b/pxr/base/tf/pyContainerConversions.h @@ -27,10 +27,10 @@ #include "pxr/base/tf/iterator.h" #include "pxr/base/tf/pyUtils.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/list.hpp" +#include "pxr/external/boost/python/tuple.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/to_python_converter.hpp" #include #include @@ -45,11 +45,11 @@ struct TfPySequenceToPython { static PyObject* convert(ContainerType const &c) { - boost::python::list result; + pxr_boost::python::list result; TF_FOR_ALL(i, c) { result.append(*i); } - return boost::python::incref(result.ptr()); + return pxr_boost::python::incref(result.ptr()); } }; @@ -61,7 +61,7 @@ struct TfPySequenceToPythonSet { PyObject* result = PySet_New(nullptr); for (const auto &elem : c) { - PySet_Add(result, boost::python::object(elem).ptr()); + PySet_Add(result, pxr_boost::python::object(elem).ptr()); } return result; } @@ -72,7 +72,7 @@ struct TfPyMapToPythonDict { static PyObject* convert(ContainerType const &c) { - return boost::python::incref(TfPyCopyMapToDictionary(c).ptr()); + return pxr_boost::python::incref(TfPyCopyMapToDictionary(c).ptr()); } }; @@ -83,12 +83,12 @@ namespace TfPyContainerConversions { { static PyObject* convert(ContainerType const& a) { - boost::python::list result; + pxr_boost::python::list result; typedef typename ContainerType::const_iterator const_iter; for(const_iter p=a.begin();p!=a.end();p++) { - result.append(boost::python::object(*p)); + result.append(pxr_boost::python::object(*p)); } - return boost::python::incref(boost::python::tuple(result).ptr()); + return pxr_boost::python::incref(pxr_boost::python::tuple(result).ptr()); } }; @@ -96,9 +96,9 @@ namespace TfPyContainerConversions { struct to_tuple > { static PyObject* convert(std::pair const& a) { - boost::python::tuple result = - boost::python::make_tuple(a.first, a.second); - return boost::python::incref(result.ptr()); + pxr_boost::python::tuple result = + pxr_boost::python::make_tuple(a.first, a.second); + return pxr_boost::python::incref(result.ptr()); } }; @@ -107,13 +107,13 @@ namespace TfPyContainerConversions { static bool check_convertibility_per_element() { return false; } template - static bool check_size(boost::type, std::size_t sz) + static bool check_size(ContainerType*, std::size_t sz) { return true; } template - static void assert_size(boost::type, std::size_t sz) {} + static void assert_size(ContainerType*, std::size_t sz) {} template static void reserve(ContainerType& a, std::size_t sz) {} @@ -124,18 +124,18 @@ namespace TfPyContainerConversions { static bool check_convertibility_per_element() { return true; } template - static bool check_size(boost::type, std::size_t sz) + static bool check_size(ContainerType*, std::size_t sz) { return ContainerType::size() == sz; } template - static void assert_size(boost::type, std::size_t sz) + static void assert_size(ContainerType* c, std::size_t sz) { - if (!check_size(boost::type(), sz)) { + if (!check_size(c, sz)) { PyErr_SetString(PyExc_RuntimeError, "Insufficient elements for fixed-size array."); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } } @@ -145,7 +145,7 @@ namespace TfPyContainerConversions { if (sz > ContainerType::size()) { PyErr_SetString(PyExc_RuntimeError, "Too many elements for fixed-size array."); - boost::python::throw_error_already_set(); + pxr_boost::python::throw_error_already_set(); } } @@ -181,7 +181,7 @@ namespace TfPyContainerConversions { struct fixed_capacity_policy : variable_capacity_policy { template - static bool check_size(boost::type, std::size_t sz) + static bool check_size(ContainerType*, std::size_t sz) { return ContainerType::max_size() >= sz; } @@ -212,10 +212,10 @@ namespace TfPyContainerConversions { from_python_sequence() { - boost::python::converter::registry::push_back( + pxr_boost::python::converter::registry::push_back( &convertible, &construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } static void* convertible(PyObject* obj_ptr) @@ -236,8 +236,8 @@ namespace TfPyContainerConversions { "Boost.Python.class") != 0) && PyObject_HasAttrString(obj_ptr, "__len__") && PyObject_HasAttrString(obj_ptr, "__getitem__")))) return 0; - boost::python::handle<> obj_iter( - boost::python::allow_null(PyObject_GetIter(obj_ptr))); + pxr_boost::python::handle<> obj_iter( + pxr_boost::python::allow_null(PyObject_GetIter(obj_ptr))); if (!obj_iter.get()) { // must be convertible to an iterator PyErr_Clear(); return 0; @@ -249,7 +249,7 @@ namespace TfPyContainerConversions { return 0; } if (!ConversionPolicy::check_size( - boost::type(), obj_size)) return 0; + (ContainerType*)nullptr, obj_size)) return 0; bool is_range = PyRange_Check(obj_ptr); std::size_t i=0; if (!all_elements_convertible(obj_iter, is_range, i)) return 0; @@ -262,20 +262,20 @@ namespace TfPyContainerConversions { // Internal Compiler Error. static bool all_elements_convertible( - boost::python::handle<>& obj_iter, + pxr_boost::python::handle<>& obj_iter, bool is_range, std::size_t& i) { for(;;i++) { - boost::python::handle<> py_elem_hdl( - boost::python::allow_null(PyIter_Next(obj_iter.get()))); + pxr_boost::python::handle<> py_elem_hdl( + pxr_boost::python::allow_null(PyIter_Next(obj_iter.get()))); if (PyErr_Occurred()) { PyErr_Clear(); return false; } if (!py_elem_hdl.get()) break; // end of iteration - boost::python::object py_elem_obj(py_elem_hdl); - boost::python::extract + pxr_boost::python::object py_elem_obj(py_elem_hdl); + pxr_boost::python::extract elem_proxy(py_elem_obj); if (!elem_proxy.check()) return false; if (is_range) break; // in a range all elements are of the same type @@ -285,26 +285,26 @@ namespace TfPyContainerConversions { static void construct( PyObject* obj_ptr, - boost::python::converter::rvalue_from_python_stage1_data* data) + pxr_boost::python::converter::rvalue_from_python_stage1_data* data) { - boost::python::handle<> obj_iter(PyObject_GetIter(obj_ptr)); + pxr_boost::python::handle<> obj_iter(PyObject_GetIter(obj_ptr)); void* storage = ( - (boost::python::converter::rvalue_from_python_storage*) + (pxr_boost::python::converter::rvalue_from_python_storage*) data)->storage.bytes; new (storage) ContainerType(); data->convertible = storage; ContainerType& result = *((ContainerType*)storage); std::size_t i=0; for(;;i++) { - boost::python::handle<> py_elem_hdl( - boost::python::allow_null(PyIter_Next(obj_iter.get()))); - if (PyErr_Occurred()) boost::python::throw_error_already_set(); + pxr_boost::python::handle<> py_elem_hdl( + pxr_boost::python::allow_null(PyIter_Next(obj_iter.get()))); + if (PyErr_Occurred()) pxr_boost::python::throw_error_already_set(); if (!py_elem_hdl.get()) break; // end of iteration - boost::python::object py_elem_obj(py_elem_hdl); - boost::python::extract elem_proxy(py_elem_obj); + pxr_boost::python::object py_elem_obj(py_elem_hdl); + pxr_boost::python::extract elem_proxy(py_elem_obj); ConversionPolicy::set_value(result, i, elem_proxy()); } - ConversionPolicy::assert_size(boost::type(), i); + ConversionPolicy::assert_size((ContainerType*)nullptr, i); } }; @@ -315,10 +315,10 @@ namespace TfPyContainerConversions { from_python_tuple_pair() { - boost::python::converter::registry::push_back( + pxr_boost::python::converter::registry::push_back( &convertible, &construct, - boost::python::type_id()); + pxr_boost::python::type_id()); } static void* convertible(PyObject* obj_ptr) @@ -326,8 +326,8 @@ namespace TfPyContainerConversions { if (!PyTuple_Check(obj_ptr) || PyTuple_Size(obj_ptr) != 2) { return 0; } - boost::python::extract e1(PyTuple_GetItem(obj_ptr, 0)); - boost::python::extract e2(PyTuple_GetItem(obj_ptr, 1)); + pxr_boost::python::extract e1(PyTuple_GetItem(obj_ptr, 0)); + pxr_boost::python::extract e2(PyTuple_GetItem(obj_ptr, 1)); if (!e1.check() || !e2.check()) { return 0; } @@ -336,13 +336,13 @@ namespace TfPyContainerConversions { static void construct( PyObject* obj_ptr, - boost::python::converter::rvalue_from_python_stage1_data* data) + pxr_boost::python::converter::rvalue_from_python_stage1_data* data) { void* storage = ( - (boost::python::converter::rvalue_from_python_storage*) + (pxr_boost::python::converter::rvalue_from_python_storage*) data)->storage.bytes; - boost::python::extract e1(PyTuple_GetItem(obj_ptr, 0)); - boost::python::extract e2(PyTuple_GetItem(obj_ptr, 1)); + pxr_boost::python::extract e1(PyTuple_GetItem(obj_ptr, 0)); + pxr_boost::python::extract e2(PyTuple_GetItem(obj_ptr, 1)); new (storage) PairType(e1(), e2()); data->convertible = storage; } @@ -352,7 +352,7 @@ namespace TfPyContainerConversions { struct to_tuple_mapping { to_tuple_mapping() { - boost::python::to_python_converter< + pxr_boost::python::to_python_converter< ContainerType, to_tuple >(); } @@ -412,7 +412,7 @@ namespace TfPyContainerConversions { struct tuple_mapping_pair { tuple_mapping_pair() { - boost::python::to_python_converter< + pxr_boost::python::to_python_converter< ContainerType, to_tuple >(); from_python_tuple_pair(); diff --git a/pxr/base/tf/pyEnum.cpp b/pxr/base/tf/pyEnum.cpp index a9d7fa5439..1b12ccad77 100644 --- a/pxr/base/tf/pyEnum.cpp +++ b/pxr/base/tf/pyEnum.cpp @@ -19,7 +19,7 @@ TF_INSTANTIATE_SINGLETON(Tf_PyEnumRegistry); using std::string; -using namespace boost::python; +using namespace pxr_boost::python; Tf_PyEnumRegistry::Tf_PyEnumRegistry() { @@ -55,15 +55,15 @@ Tf_PyEnumRegistry::_ConvertEnumToPython(TfEnum const &e) name = TfStringReplace(name, ">", "_"); name = "AutoGenerated_" + name + "_" + TfStringify(e.GetValueAsInt()); - boost::python::object wrappedVal = - boost::python::object(Tf_PyEnumWrapper(name, e)); + pxr_boost::python::object wrappedVal = + pxr_boost::python::object(Tf_PyEnumWrapper(name, e)); wrappedVal.attr("_baseName") = std::string(); RegisterValue(e, wrappedVal); } - return boost::python::incref(_enumsToObjects[e]); + return pxr_boost::python::incref(_enumsToObjects[e]); } string @@ -119,9 +119,9 @@ string Tf_PyCleanEnumName(string name, bool stripPackageName) return TfStringReplace(name, " ", "_"); } -void Tf_PyEnumAddAttribute(boost::python::scope &s, +void Tf_PyEnumAddAttribute(pxr_boost::python::scope &s, const std::string &name, - const boost::python::object &value) { + const pxr_boost::python::object &value) { // Skip exporting attr if the scope already has an attribute // with that name, but do make sure to place it in .allValues // for the class. diff --git a/pxr/base/tf/pyEnum.h b/pxr/base/tf/pyEnum.h index dbd23a2cba..9d9b835760 100644 --- a/pxr/base/tf/pyEnum.h +++ b/pxr/base/tf/pyEnum.h @@ -25,17 +25,17 @@ #include "pxr/base/tf/singleton.h" #include "pxr/base/tf/stringUtils.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/class.hpp" +#include "pxr/external/boost/python/converter/from_python.hpp" +#include "pxr/external/boost/python/converter/registered.hpp" +#include "pxr/external/boost/python/converter/rvalue_from_python_data.hpp" +#include "pxr/external/boost/python/list.hpp" +#include "pxr/external/boost/python/object.hpp" +#include "pxr/external/boost/python/operators.hpp" +#include "pxr/external/boost/python/refcount.hpp" +#include "pxr/external/boost/python/scope.hpp" +#include "pxr/external/boost/python/to_python_converter.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include @@ -67,12 +67,12 @@ class Tf_PyEnumRegistry { } TF_API - void RegisterValue(TfEnum const &e, boost::python::object const &obj); + void RegisterValue(TfEnum const &e, pxr_boost::python::object const &obj); template void RegisterEnumConversions() { // Register conversions to and from python. - boost::python::to_python_converter >(); + pxr_boost::python::to_python_converter >(); _EnumFromPython(); } @@ -84,8 +84,8 @@ class Tf_PyEnumRegistry { template struct _EnumFromPython { _EnumFromPython() { - boost::python::converter::registry::insert - (&convertible, &construct, boost::python::type_id()); + pxr_boost::python::converter::registry::insert + (&convertible, &construct, pxr_boost::python::type_id()); } static void *convertible(PyObject *obj) { TfHashMap const &o2e = @@ -101,10 +101,10 @@ class Tf_PyEnumRegistry { else return (i != o2e.end() && i->second.IsA()) ? obj : 0; } - static void construct(PyObject *src, boost::python::converter:: + static void construct(PyObject *src, pxr_boost::python::converter:: rvalue_from_python_stage1_data *data) { void *storage = - ((boost::python::converter:: + ((pxr_boost::python::converter:: rvalue_from_python_storage *)data)->storage.bytes; new (storage) T(_GetEnumValue(src, (T *)0)); data->convertible = storage; @@ -147,7 +147,7 @@ TF_API_TEMPLATE_CLASS(TfSingleton); // Private function used for __repr__ of wrapped enum types. TF_API -std::string Tf_PyEnumRepr(boost::python::object const &self); +std::string Tf_PyEnumRepr(pxr_boost::python::object const &self); // Private base class for types which are instantiated and exposed to python // for each registered enum type. @@ -289,12 +289,12 @@ struct Tf_TypedPyEnumWrapper : Tf_PyEnumWrapper Tf_TypedPyEnumWrapper(std::string const &n, TfEnum const &val) : Tf_PyEnumWrapper(n, val) {} - static boost::python::object GetValueFromName(const std::string& name) { + static pxr_boost::python::object GetValueFromName(const std::string& name) { bool found = false; const TfEnum value = TfEnum::GetValueFromName(name, &found); return found - ? boost::python::object(value) - : boost::python::object(); + ? pxr_boost::python::object(value) + : pxr_boost::python::object(); } }; @@ -311,9 +311,9 @@ std::string Tf_PyCleanEnumName(std::string name, // Adds attribute of given name with given value to given scope. // Issues a coding error if attribute by that name already existed. TF_API -void Tf_PyEnumAddAttribute(boost::python::scope &s, +void Tf_PyEnumAddAttribute(pxr_boost::python::scope &s, const std::string &name, - const boost::python::object &value); + const pxr_boost::python::object &value); /// \class TfPyWrapEnum /// @@ -323,7 +323,7 @@ void Tf_PyEnumAddAttribute(boost::python::scope &s, /// TfEnum system, and potentially providing automatic wrapping by using names /// registered with the \a TfEnum system and by making some assumptions about /// the way we structure our code. Enums that are not registered with TfEnum -/// may be manually wrapped using boost::python::enum_ instead. +/// may be manually wrapped using pxr_boost::python::enum_ instead. /// /// Example usage. For an enum that looks like this: /// \code @@ -363,8 +363,8 @@ template ::value> struct TfPyWrapEnum { private: - typedef boost::python::class_< - Tf_TypedPyEnumWrapper, boost::python::bases > + typedef pxr_boost::python::class_< + Tf_TypedPyEnumWrapper, pxr_boost::python::bases > _EnumPyClassType; public: @@ -375,7 +375,7 @@ struct TfPyWrapEnum { /// stripped. explicit TfPyWrapEnum( std::string const &name = std::string()) { - using namespace boost::python; + using namespace pxr_boost::python; const bool explicitName = !name.empty(); @@ -447,7 +447,7 @@ struct TfPyWrapEnum { /// If no explicit names have been registered, this will export the TfEnum /// registered names and values (if any). void _ExportValues(bool stripPackageName, _EnumPyClassType &enumClass) { - boost::python::list valueList; + pxr_boost::python::list valueList; for (const std::string& name : TfEnum::GetAllNames()) { bool success = false; @@ -461,7 +461,7 @@ struct TfPyWrapEnum { // convert value to python. Tf_TypedPyEnumWrapper wrappedValue(cleanedName, enumValue); - boost::python::object pyValue(wrappedValue); + pxr_boost::python::object pyValue(wrappedValue); // register it as the python object for this value. Tf_PyEnumRegistry::GetInstance().RegisterValue(enumValue, pyValue); @@ -470,11 +470,11 @@ struct TfPyWrapEnum { std::string valueName = wrappedValue.GetName(); if (IsScopedEnum) { // If scoped enum, enum values appear on the enumClass ... - boost::python::scope s(enumClass); + pxr_boost::python::scope s(enumClass); Tf_PyEnumAddAttribute(s, valueName, pyValue); } else { // ... otherwise, enum values appear on the enclosing scope. - boost::python::scope s; + pxr_boost::python::scope s; Tf_PyEnumAddAttribute(s, valueName, pyValue); } @@ -482,7 +482,7 @@ struct TfPyWrapEnum { } // Add a tuple of all the values to the enum class. - enumClass.setattr("allValues", boost::python::tuple(valueList)); + enumClass.setattr("allValues", pxr_boost::python::tuple(valueList)); } }; diff --git a/pxr/base/tf/pyError.cpp b/pxr/base/tf/pyError.cpp index 65e0c63eef..e30bc3b8ed 100644 --- a/pxr/base/tf/pyError.cpp +++ b/pxr/base/tf/pyError.cpp @@ -14,19 +14,20 @@ #include "pxr/base/tf/pyError.h" #include "pxr/base/tf/pyErrorInternal.h" -#include -#include -#include -#include +#include "pxr/external/boost/python/handle.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/list.hpp" +#include "pxr/external/boost/python/tuple.hpp" #include -using namespace boost::python; using std::vector; using std::string; PXR_NAMESPACE_OPEN_SCOPE +using namespace pxr_boost::python; + bool TfPyConvertTfErrorsToPythonException(TfErrorMark const &m) { // If there is a python exception somewhere in here, restore that, otherwise // raise a normal error exception. diff --git a/pxr/base/tf/pyError.h b/pxr/base/tf/pyError.h index 06487fa495..d597bf2279 100644 --- a/pxr/base/tf/pyError.h +++ b/pxr/base/tf/pyError.h @@ -15,7 +15,7 @@ #include "pxr/base/tf/api.h" #include "pxr/base/tf/errorMark.h" -#include +#include "pxr/external/boost/python/default_call_policies.hpp" PXR_NAMESPACE_OPEN_SCOPE @@ -42,7 +42,7 @@ void TfPyConvertPythonExceptionToTfErrors(); /// required for wrapped functions and methods that do not appear directly in an /// extension module. For instance, the map and sequence proxy objects use /// this, since they are created on the fly. -template +template struct TfPyRaiseOnError : Base { public: diff --git a/pxr/base/tf/pyErrorInternal.cpp b/pxr/base/tf/pyErrorInternal.cpp index 2c19aa3f31..a17b447956 100644 --- a/pxr/base/tf/pyErrorInternal.cpp +++ b/pxr/base/tf/pyErrorInternal.cpp @@ -12,13 +12,13 @@ #include "pxr/base/tf/enum.h" #include "pxr/base/tf/registryManager.h" -#include -#include - -using namespace boost::python; +#include "pxr/external/boost/python/handle.hpp" +#include "pxr/external/boost/python/object.hpp" PXR_NAMESPACE_OPEN_SCOPE +using namespace pxr_boost::python; + TF_REGISTRY_FUNCTION(TfEnum) { TF_ADD_ENUM_NAME(TF_PYTHON_EXCEPTION); } diff --git a/pxr/base/tf/pyErrorInternal.h b/pxr/base/tf/pyErrorInternal.h index 63a4780262..5b199e842f 100644 --- a/pxr/base/tf/pyErrorInternal.h +++ b/pxr/base/tf/pyErrorInternal.h @@ -11,8 +11,8 @@ #include "pxr/base/tf/api.h" #include "pxr/base/tf/pyExceptionState.h" -#include -#include +#include "pxr/external/boost/python/handle.hpp" +#include "pxr/external/boost/python/object_fwd.hpp" PXR_NAMESPACE_OPEN_SCOPE @@ -20,8 +20,8 @@ enum Tf_PyExceptionErrorCode { TF_PYTHON_EXCEPTION }; -TF_API boost::python::handle<> Tf_PyGetErrorExceptionClass(); -TF_API void Tf_PySetErrorExceptionClass(boost::python::object const &cls); +TF_API pxr_boost::python::handle<> Tf_PyGetErrorExceptionClass(); +TF_API void Tf_PySetErrorExceptionClass(pxr_boost::python::object const &cls); /// RAII class to save and restore the Python exception state. The client /// must hold the GIL during all methods, including the c'tor and d'tor. diff --git a/pxr/base/tf/pyExceptionState.cpp b/pxr/base/tf/pyExceptionState.cpp index 8489821def..67fa29ab97 100644 --- a/pxr/base/tf/pyExceptionState.cpp +++ b/pxr/base/tf/pyExceptionState.cpp @@ -10,14 +10,15 @@ #include "pxr/base/tf/pyErrorInternal.h" #include "pxr/base/tf/pyLock.h" -#include -#include +#include "pxr/external/boost/python/object.hpp" +#include "pxr/external/boost/python/extract.hpp" -using namespace boost::python; using std::string; PXR_NAMESPACE_OPEN_SCOPE +using namespace pxr_boost::python; + TfPyExceptionState::TfPyExceptionState(TfPyExceptionState const &other) { TfPyLock lock; @@ -77,11 +78,11 @@ TfPyExceptionState::GetExceptionString() const object tbModule(handle<>(PyImport_ImportModule("traceback"))); object exception = tbModule.attr("format_exception")(_type, _value, _trace); - boost::python::ssize_t size = len(exception); - for (boost::python::ssize_t i = 0; i != size; ++i) { + pxr_boost::python::ssize_t size = len(exception); + for (pxr_boost::python::ssize_t i = 0; i != size; ++i) { s += extract(exception[i]); } - } catch (boost::python::error_already_set const &) { + } catch (pxr_boost::python::error_already_set const &) { // Just ignore the exception. } return s; diff --git a/pxr/base/tf/pyExceptionState.h b/pxr/base/tf/pyExceptionState.h index 053fb72225..bcbbce71eb 100644 --- a/pxr/base/tf/pyExceptionState.h +++ b/pxr/base/tf/pyExceptionState.h @@ -7,14 +7,14 @@ #include "pxr/pxr.h" #include "pxr/base/tf/api.h" -#include +#include "pxr/external/boost/python/handle.hpp" PXR_NAMESPACE_OPEN_SCOPE struct TfPyExceptionState { - TfPyExceptionState(boost::python::handle<> const &type, - boost::python::handle<> const &value, - boost::python::handle<> const &trace) : + TfPyExceptionState(pxr_boost::python::handle<> const &type, + pxr_boost::python::handle<> const &value, + pxr_boost::python::handle<> const &trace) : _type(type), _value(value), _trace(trace) {} TF_API @@ -32,9 +32,9 @@ struct TfPyExceptionState { TF_API static TfPyExceptionState Fetch(); - boost::python::handle<> const &GetType() const { return _type; } - boost::python::handle<> const &GetValue() const { return _value; } - boost::python::handle<> const &GetTrace() const { return _trace; } + pxr_boost::python::handle<> const &GetType() const { return _type; } + pxr_boost::python::handle<> const &GetValue() const { return _value; } + pxr_boost::python::handle<> const &GetTrace() const { return _trace; } // Move this object's exception state into Python's current exception state, // as by PyErr_Restore(). This leaves this object's exception state clear. @@ -47,7 +47,7 @@ struct TfPyExceptionState { std::string GetExceptionString() const; private: - boost::python::handle<> _type, _value, _trace; + pxr_boost::python::handle<> _type, _value, _trace; }; PXR_NAMESPACE_CLOSE_SCOPE diff --git a/pxr/base/tf/pyFunction.h b/pxr/base/tf/pyFunction.h index 616e623b7f..e274510929 100644 --- a/pxr/base/tf/pyFunction.h +++ b/pxr/base/tf/pyFunction.h @@ -14,12 +14,12 @@ #include "pxr/base/tf/pyObjWrapper.h" #include "pxr/base/tf/pyUtils.h" -#include -#include -#include -#include -#include -#include +#include "pxr/external/boost/python/converter/from_python.hpp" +#include "pxr/external/boost/python/converter/registered.hpp" +#include "pxr/external/boost/python/converter/rvalue_from_python_data.hpp" +#include "pxr/external/boost/python/extract.hpp" +#include "pxr/external/boost/python/handle.hpp" +#include "pxr/external/boost/python/object.hpp" #include @@ -48,7 +48,7 @@ struct TfPyFunctionFromPython TfPyObjWrapper weak; Ret operator()(Args... args) { - using namespace boost::python; + using namespace pxr_boost::python; // Attempt to get the referenced callable object. TfPyLock lock; object callable(handle<>(borrowed(PyWeakref_GetObject(weak.ptr())))); @@ -66,7 +66,7 @@ struct TfPyFunctionFromPython TfPyObjWrapper weakSelf; Ret operator()(Args... args) { - using namespace boost::python; + using namespace pxr_boost::python; // Attempt to get the referenced self parameter, then build a new // instance method and call it. TfPyLock lock; @@ -88,7 +88,7 @@ struct TfPyFunctionFromPython template static void RegisterFunctionType() { - using namespace boost::python; + using namespace pxr_boost::python; converter::registry:: insert(&convertible, &construct, type_id()); } @@ -98,10 +98,10 @@ struct TfPyFunctionFromPython } template - static void construct(PyObject *src, boost::python::converter:: + static void construct(PyObject *src, pxr_boost::python::converter:: rvalue_from_python_stage1_data *data) { using std::string; - using namespace boost::python; + using namespace pxr_boost::python; void *storage = ((converter::rvalue_from_python_storage *) data)->storage.bytes; diff --git a/pxr/base/tf/pyIdentity.cpp b/pxr/base/tf/pyIdentity.cpp index e87ecb5aca..5389d8f25e 100644 --- a/pxr/base/tf/pyIdentity.cpp +++ b/pxr/base/tf/pyIdentity.cpp @@ -161,7 +161,7 @@ static void _WeakBaseDied(void const *key) { }; static std::string _GetTypeName(PyObject *obj) { - using namespace boost::python; + using namespace pxr_boost::python; TfPyLock lock; handle<> typeHandle( borrowed<>( PyObject_Type(obj) ) ); if (typeHandle) { @@ -274,8 +274,8 @@ PyObject *Tf_PyIdentityHelper::Get(void const *key) { return 0; } - // use boost::python::xincref here, because it returns the increfed ptr. - return boost::python::xincref(i->second.Ptr()); + // use pxr_boost::python::xincref here, because it returns the increfed ptr. + return pxr_boost::python::xincref(i->second.Ptr()); } diff --git a/pxr/base/tf/pyIdentity.h b/pxr/base/tf/pyIdentity.h index 79eecf6962..fb49b9a484 100644 --- a/pxr/base/tf/pyIdentity.h +++ b/pxr/base/tf/pyIdentity.h @@ -20,13 +20,13 @@ #include "pxr/base/tf/stringUtils.h" #include "pxr/base/tf/weakPtr.h" -#include +#include "pxr/external/boost/python/handle.hpp" #include "pxr/base/tf/hashmap.h" -// Specializations for boost::python::pointee and get_pointer for TfRefPtr and +// Specializations for pxr_boost::python::pointee and get_pointer for TfRefPtr and // TfWeakPtr. -namespace boost { namespace python { +namespace PXR_BOOST_NAMESPACE { namespace python { // TfWeakPtrFacade template