diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index 1c662a8fcf..5e70ddd1b4 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -37,6 +37,9 @@ jobs: - TAG: cp311 INTERPRETER: /opt/python/cp311-cp311/bin/python VERSION_SPEC: '3.11' + - TAG: cp312 + INTERPRETER: /opt/python/cp312-cp312/bin/python + VERSION_SPEC: '3.12' runs-on: ubuntu-20.04 timeout-minutes: 60 steps: @@ -109,6 +112,9 @@ jobs: - VERSION_SPEC: '3.11' INTERPRETER: python3.11 TAG: cp311 + - VERSION_SPEC: '3.12' + INTERPRETER: python3.12 + TAG: cp312 runs-on: macos-13 timeout-minutes: 120 steps: @@ -122,6 +128,7 @@ jobs: - name: Install dependencies run: | ${{ matrix.PYTHON.INTERPRETER }} -m pip install --upgrade pip + ${{ matrix.PYTHON.INTERPRETER }} -m pip install setuptools ${{ matrix.PYTHON.INTERPRETER }} -m pip install delocate~=0.10.2 wheel - name: Build USD run: | @@ -176,6 +183,8 @@ jobs: TAG: cp310 - VERSION_SPEC: '3.11' TAG: cp311 + - VERSION_SPEC: '3.12' + TAG: cp312 runs-on: windows-2019 timeout-minutes: 60 steps: @@ -188,6 +197,7 @@ jobs: check-latest: false - name: Install dependencies run: | + python -m pip install setuptools python -m pip install wheel shell: cmd - name: Build USD @@ -261,6 +271,10 @@ jobs: PYTHON_VERSION_SPEC: '3.11' IMAGE: ubuntu-20.04 PYTHON_INTERPRETER: python3 + - NAME: Linux_Python312 + PYTHON_VERSION_SPEC: '3.12' + IMAGE: ubuntu-20.04 + PYTHON_INTERPRETER: python3 - NAME: Mac_Python38 PYTHON_VERSION_SPEC: '3.8' IMAGE: macos-13 @@ -277,6 +291,10 @@ jobs: PYTHON_VERSION_SPEC: '3.11' IMAGE: macos-13 PYTHON_INTERPRETER: python3 + - NAME: Mac_Python312 + PYTHON_VERSION_SPEC: '3.12' + IMAGE: macos-13 + PYTHON_INTERPRETER: python3 - NAME: Windows_Python38 PYTHON_VERSION_SPEC: '3.8' IMAGE: windows-2019 @@ -293,6 +311,10 @@ jobs: PYTHON_VERSION_SPEC: '3.11' IMAGE: windows-2019 PYTHON_INTERPRETER: python3 + - NAME: Windows_Python312 + PYTHON_VERSION_SPEC: '3.12' + IMAGE: windows-2019 + PYTHON_INTERPRETER: python3 runs-on: ${{ matrix.BUILD_CONFIG.IMAGE }} steps: - name: Install Python @@ -319,4 +341,4 @@ jobs: name: TEST-usdinstall-${{ matrix.BUILD_CONFIG.NAME }} path: TEST-usdinstall-${{ matrix.BUILD_CONFIG.NAME }}.xml # Use always() to always run this step to publish test results when there are test failures - if: ${{ always() }} \ No newline at end of file + if: ${{ always() }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 084e873cbf..960d29abf5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,484 @@ # Change Log +## [25.02] - 2025-01-21 + +### Build + +- Various fixes and changes to build_usd.py: + - Added `--no-zlib` option to disable building zlib, for developers that need + to supply their own version of the library. + (PR: [#2988](https://github.com/PixarAnimationStudios/OpenUSD/pull/2988), + [#3130](https://github.com/PixarAnimationStudios/OpenUSD/pull/3130)) + - Updated Boost dependency to 1.86.0 for Visual Studio 2022. + (Issue: [#1062](https://github.com/PixarAnimationStudios/OpenUSD/issues/1062), + [#2044](https://github.com/PixarAnimationStudios/OpenUSD/issues/2044), + [#2158](https://github.com/PixarAnimationStudios/OpenUSD/issues/2158), + [#3102](https://github.com/PixarAnimationStudios/OpenUSD/issues/3102)) + - Updated OpenEXR dependency to v3.1.13. + (PR: [#3077](https://github.com/PixarAnimationStudios/OpenUSD/pull/3077)) + - Updated OpenImageIO dependency to v2.5.6.0. + (PR: [#2940](https://github.com/PixarAnimationStudios/OpenUSD/pull/2940)) + - Updated to download boost from SourceForge or the official boost host. + - Added `--(no-)usdValidation` option to enable/disable building the USD + validation framework. USD validation is enabled by default. + +- Fixed various issues in pxr_boost::python when building against Python 3.11. + (Issue: [#3384](https://github.com/PixarAnimationStudios/OpenUSD/issues/3384)) + +- Fixed issue with missing headers causing build failures with Visual Studio 2022. + (Issue: [#3433](https://github.com/PixarAnimationStudios/OpenUSD/issues/3433)) + +- Fixed issues with precompiled headers when using Visual Studio and Ninja. + (Issue: [#3408](https://github.com/PixarAnimationStudios/OpenUSD/issues/3433), + PR: [#3204](https://github.com/PixarAnimationStudios/OpenUSD/pull/3204)) + +- Various fixes for compilation issues and warnings. + - Fixed compilation issues when building USD using the C++20 standard. + (PR: [#2605](https://github.com/PixarAnimationStudios/OpenUSD/pull/2605)) + - Made various fixes for compiler warnings emitted by GCC 11. + - Made various fixes for stricter compiler parsing of templates by an upcoming + clang/llvm release. + (PR: [#3434](https://github.com/PixarAnimationStudios/OpenUSD/pull/3434)) + - Correct re-declaration of structs as classes. + (PR: [#3316](https://github.com/PixarAnimationStudios/OpenUSD/pull/3316)) + - Static functions that should be static inline. + (Issue: [#3324](https://github.com/PixarAnimationStudios/OpenUSD/issues/3324), + PR: [#3356](https://github.com/PixarAnimationStudios/OpenUSD/pull/3356)) + - Overrides on member functions not marked override. + (Issue: [#3335](https://github.com/PixarAnimationStudios/OpenUSD/issues/3335), + PR: [#3355](https://github.com/PixarAnimationStudios/OpenUSD/pull/3355)) + +- Fixed various link visibility issues. + - Fixed methods from HdSingleInputFilteringSceneIndexBase. + (PR: [#3428](https://github.com/PixarAnimationStudios/OpenUSD/pull/3428)) + - Fixed HdDependenciesSchema::GetEntries. + (PR: [#3369](https://github.com/PixarAnimationStudios/OpenUSD/pull/3369)) + - Fixed HdxSelectionTracker. + +### USD + +- Increased the default version for newly-created binary files to 0.10.0. USD + release 23.11 and newer support this file version. This can be overridden with + the `USD_WRITE_NEW_USDC_FILES_AS_VERSION` env var. + +- Added UsdColorSpaceAPI for specifying the color space for a prim subtree. + +- Added UsdColorSpaceDefinitionAPI for defining custom color spaces. + +- Fixed issue where ArchDebuggerIsAttached would return false positives on some + Linux distributions. + (PR: [#3014](https://github.com/PixarAnimationStudios/OpenUSD/pull/3014)) + +- Fixed `ArchGetFileName` to return a full path on Windows. + (PR: [#3361](https://github.com/PixarAnimationStudios/OpenUSD/pull/3361)) + +- Simplified and made the TfScriptModuleLoader thread-safe. + +- Added TfNotice::RevokeAndWait() to allow thread-safe revocation of + notification. + +- Fixed issue where saving numeric data to text .usda layers could be formatted + differently depending on the system locale. + (Issue: [#3214](https://github.com/PixarAnimationStudios/OpenUSD/issues/3214), + PR: [#3222](https://github.com/PixarAnimationStudios/OpenUSD/pull/3222)) + +- Fixed an incorrect memory deallocation in TsRegressionPreventer. + +- Added WorkDispatcher::IsCancelled, which returns true if + WorkDispatcher::Cancel has been called. + +- Removed flag `PCP_DISABLE_TIME_SCALING_BY_LAYER_TCPS`. Time sample scaling is + always enabled. + +- Enabled change processing optimization for muting/unmuting and adding/removing + sublayers. This can be disabled by setting the environment variable + `PCP_ENABLE_MINIMAL_CHANGES_FOR_LAYER_OPERATIONS` to 0. See 24.11 release + notes for more details. + +- Removed SdfPropertySpec::GetTimeSampleMap in favor of + SdfAttributeSpec::GetTimeSampleMap. + +- Reverted deprecation of SdfLayer time sample methods. + +- Identifiers passed to SdfLayer::Find or FindOrOpen now have file format + arguments removed before being resolved by ArResolver to keep resolver + implementations from seeing unexpected values. + +- Made all SdfListOp setter methods enforce uniqueness by removing duplicates in + explicit setter methods. Deprecated ModifyOperations(callback, + removeDuplicates) as removeDuplicates is now assumed true. + +- Added SdfCreateAttributeInLayer and SdfCreateRelationshipInLayer. + +- Updated UsdNamespaceEditor to properly update the defaultPrim on affected + layers when necessary. + +- Updated UsdUtilsModifyAssetPaths to preserve SdfAssetPath metadata. This was a + fix to a regression introduced in 23.11. + +- Disabled UDIM resolution for ExtractExternalReferences, which addresses a + behavior introduced in 23.11 where UDIM paths were not being resolved. + (Issue: [#3173](https://github.com/PixarAnimationStudios/OpenUSD/issues/3173)) + +- The schema registry now throws a coding error if a schema is registered + without a schemaIdentifier. + +- Updated usdGenSchema to only write out aliases to plugInfo if the prim type + name does not match the class name. + +- Fixed updateSchemaWithSdrNote to add shaderId property appropriately for all + shader nodes. + +- Fixed a bug in SdrShaderProperty where int arrays would always default to zero. + +- Added NdrRegistry.AddDiscoveryResult Python binding. + +- Various UsdValidation updates: + - Moved the UsdValidation framework to its own library, pxr/usdValidation. + Moved the USD schema domain validators to their respective schema libraries. + This makes it possible for clients who do not want to use the validation + framework to not have to have any dynamic library dependencies on the + validation libraries. + - Updated UsdValidationContext::Validate(stage) to validate all prims, + including instance proxies, by default. Clients can also use the overload + that takes a traversal predicate to be used for stage traversal when + validating the stage. + - Fixed a bug where schemaType validators were not processed by the + UsdValidationContext. + - Updated UsdValidationError::GetErrorAsString to include the error + identifier information. + - Added Python wrappings for UsdValidationContext. + - Added UsdValidationTimeRange to allow prim and stage level validators to + provide GfInterval or timeCodes, including default time to be evaluated for + validation. Various UsdValidation core APIs are updated to use + UsdValidationTimeRange. Added isTimeDependent metadata to + UsdValidationValidatorMetadata to indicate if the validator is time + dependent. + - Added Validators to correspond to UsdUtilsComplianceChecker tests: + - NormalMapTextureValidator + (PR: [#3443](https://github.com/PixarAnimationStudios/OpenUSD/pull/3443)) + - FileExtensionValidator + (PR: [#3444](https://github.com/PixarAnimationStudios/OpenUSD/pull/3444)) + - MissingReferenceValidator + (PR: [#3450](https://github.com/PixarAnimationStudios/OpenUSD/pull/3450)) + - RootPackageValidator and UsdzPackageValidator + (PR: [#3449](https://github.com/PixarAnimationStudios/OpenUSD/pull/3449)) + +- Added support for scalar translate and scale xformOps. Added translateX, + translateY, translateZ, scaleX, scaleY and scaleZ xformOps to correspond to + translation along X, Y and Z and scale along X, Y and Z respectively. Also + added APIs to UsdGeomXformable to allow to add the above mentioned xforms to + UsdGeomXform. + +### Hydra + +- Removed deprecated OpenImageIO APIs from hioOiio to support OpenImageIO + 3.0.0.x. + (PR: [#3418](https://github.com/PixarAnimationStudios/OpenUSD/pull/3418)) + +- Improved diagnostics when rejecting primvars due to invalid interpolation + values. + +- HdxAovInputTask no longer reads from an invalid memory address when converting + a Float32Vec3 texture to a Float32Vec4 texture. + +- Fixed a regression that could result in assert failures in the aovInputTask + when using some renderer plugins on macOS systems with discrete GPUs. + (Issue: [#3470](https://github.com/PixarAnimationStudios/OpenUSD/issues/3470)) + +- Added the ability to only load Hydra Scene Index Plugins for a given set of + apps, using the new "loadWithApps" HdSceneIndexPlugin type registration field. + Plugin types can limit which apps will auto-load the plugin. When creating a + scene index, an app name can be provided to load only specific plugins. Not + specifying (or providing an empty) loadWithApps means all existing plugins + will continue to be loaded normally. + +- Fixed a bug in HdMergingSceneIndex where it would not send out the proper + notices when removing a prim. + (Issue: [#3261](https://github.com/PixarAnimationStudios/OpenUSD/issues/3261), + [#3471](https://github.com/PixarAnimationStudios/OpenUSD/issues/3471), + PR: [#3263](https://github.com/PixarAnimationStudios/OpenUSD/pull/3263)) + +- Added scene index emulation support for tasks. Also added + HdxFreeCameraPrimDataSource. The intention together with scene index + emulation support for tasks is to make HdxTaskController a scene index. + +- Added an overload of HdEngine::Execute taking task paths, rather than task + pointers. + +- Added API UsdviewqHydraObserver::GetNestedInputDisplayNames and + TargetToNestedInputSceneIndex to see nested input scene indices. + +- Added HdsiSceneMaterialPruningSceneIndex which can implement material specific + behaviors that the existing HdsiPrimTypePruningSceneIndex used for the same + purpose cannot. + +- Fixed a case of over-invalidation in dirty bits emulation where repr and + display style were invalidating each other. + +- Fixed dependency bugs in HdsiMaterialPrimvarTransferSceneIndex. + +- Fixed HdMapContainerDataSource producing output entries for keys with no input + entry. + +- Fixed a bug in the dependency forwarding scene index to clear and recompute + the dependency table entries for a prim when its dependencies are dirty. + +- Fixed propagation of light linking dirty bits for instancers in backend + emulation. + +- Fixed unitTestHelper byteSize field. + (PR: [#3374](https://github.com/PixarAnimationStudios/OpenUSD/pull/3374)) + +- Made correctness fix for HdRenderIndex::RemoveInputScene. + (PR: [#3304](https://github.com/PixarAnimationStudios/OpenUSD/pull/3304)) + +- Implemented a vectorized version of HdxPrimOriginInfo::FromPickHit. + (PR: [#3413](https://github.com/PixarAnimationStudios/OpenUSD/pull/3413)) + +- HdLegacyGeomSubsetSceneIndex will now properly clear its cache of descendants + when an ancestor prim is removed. + +- HdSceneIndexAdapterSceneDelegate will now use the render delegate's material + binding purpose when retrieving material bindings. + (Issue: [#3320](https://github.com/PixarAnimationStudios/OpenUSD/issue/3320), + PR: [#3352](https://github.com/PixarAnimationStudios/OpenUSD/issue/3352)) + +### UsdImaging + +- **Important**: Deprecated UsdImagingGLRenderParams::enableIdRender setting in + favor of using primId AOV. This setting will be removed in the following + release. + +- UsdPreviewSurface's shader now computes a BRDF based on clearcoat roughness + for the indirect lighting clearcoat component. + +- UsdPreviewSurface's shader now applies opacity to all components of the color, + instead of only to the diffuse component. + +- Added a new opacityMode input to the Preview Surface spec to control whether + or not fully transparent materials receive a specular response. HdPrman's + implementation of PreviewSurface also updated to match the updated spec. + +- Improved type coverage of UsdImagingDataSourceAttribute. + (Issue: [#3298](https://github.com/PixarAnimationStudios/OpenUSD/issues/3298)) + +- Made change to ensure HdSceneIndexAdapterSceneDelegate::GetMaterialResource + considers all contexts. + (Issue: [#3286](https://github.com/PixarAnimationStudios/OpenUSD/issues/3286), + PR: [#3288](https://github.com/PixarAnimationStudios/OpenUSD/pull/3288)) + +- Added a 2 * epsilon gap between opposing axis faces of drawMode cards cross + geometry to avoid issues due to co-planarity. This significantly improves the + stability of rendering, particularly with RenderMan. + +### Storm + +- Made various improvements to prim id rendering in Storm. + - Deprecated HdxRenderTaskParams::enableIdRender setting in favor of using + primId AOV. + - Added support for int32 AOV format. + - Introduced HdSt_RenderPassShaderKey to flexibility generate render pass + shaders based on desired AOVs. + +- Fixed Vulkan crashes due to missing extensions and layers, and added reporting + of missing layers and extensions. + (PR: [#3420](https://github.com/PixarAnimationStudios/OpenUSD/pull/3420)) + +- Added a null GL context to use when X11 isn't available, so tests can still + run when not using HgiGL. + (PR: [#3391](https://github.com/PixarAnimationStudios/OpenUSD/pull/3391)) + +- Added OpenGL 3.1/GLSL 1.4 support to HgiInterop and use a VertexArray, as + required by Apple OpenGL. + (PR: [#3391](https://github.com/PixarAnimationStudios/OpenUSD/pull/3391)) + +- Added HGIVULKAN_VERIFY_VK_RESULT macro that verifies result of Vulkan call and + prints result string if it fails. + (PR: [#3333](https://github.com/PixarAnimationStudios/OpenUSD/pull/3333)) + +- Fixed a bug in HgiVulkanBlitCmds::CopyBufferCpuToGpu. + (PR: [#3382](https://github.com/PixarAnimationStudios/OpenUSD/pull/3382)) + +- Fixed various HdSt and HgiVulkan build errors on Windows. + (PR: [#3411](https://github.com/PixarAnimationStudios/OpenUSD/pull/3411)) + +- Updated HdSt test outputs and baselines so test outputs can be compared using + `FC.exe` on Windows without any additional flags. + (PR: [#3409](https://github.com/PixarAnimationStudios/OpenUSD/pull/3409)) + +- Various fixes to HdSt tests to enable their passing when using HgiVulkan with + Lavapipe. + (PR: [#3170](https://github.com/PixarAnimationStudios/OpenUSD/pull/3170)) + +- Dome light environment texture sampling now converts infinite or NaN values in + any channel of a pixel to zero. + +- Added a dependency between rprim material binding and bound material. + +- Set GL_UNPACK_ALIGNMENT=1 to avoid texture distortion for single channel + textures. + (Issue: [#3260](https://github.com/PixarAnimationStudios/OpenUSD/issues/3260), + PR: [#3262](https://github.com/PixarAnimationStudios/OpenUSD/pull/3262)) + +### RenderMan Hydra Plugin + +- Improved diagnostics when issuing warnings about specific material networks. + +- HdPrman now avoids deleting and re-creating light instances in more cases when + editing lights. + +- HdPrman now enables parallel sync for additional Hydra prim types, including + materials and coordinate systems. This can be turned off with the environment + setting `HD_PRMAN_ENABLE_PARALLEL_PRIM_SYNC`, which defaults to enabled. + +- Added scene index HdPrman_PreviewSurfacePrimvarsSceneIndex that adds primvar + displacementbound:sphere to material primvars of materials using + UsdPreviewSurface (to be transferred to the gprim later by the + HdsiMaterialPrimvarTransferSceneIndex). + +- Updated args parser to support "sdrIgnore" and "sdrUsdDefault". + "sdrIgnore" can be set to shader properties in the args file to suppress the + property from getting added to its sdr representation. "sdrUsdDefault" can be + used to set a specific default value for USD schema purposes. + +- Added support for more kinds of terminal nodes for material networks that use + MaterialX patterns. Also, more than one node can make up the bxdf part of the + network, e.g. if layering is involved. + +- Added code to force hider:incremental to be enabled for interactive renders. + +- Fixed a bug where the code that maps common USD names of AOVs, like 'normal', + to RenderMan style names, like 'Nn', would inadvertently happen even when the + AOV already had an associated 'lpe'. Now we only do the mapping if the + sourceType wasn't already specified to be 'lpe' or 'primvar'. + +- Improved robustness if the scene provides a primvar schema without a value + datasource. + +- Made change to register dependencies from a light on its targeted light + filters to forward invalidations when the prims are backed by a non-emulated + scene index. + +- Made optimizations to HdPrman_UpdateObjectSettingsSceneIndex to avoid + unnecessary computation. + +- Removed Python bindings from rmanArgsParser. + +- Added the environment setting `HD_PRMAN_ALL_LIGHTS_FIXED_SAMPLE_COUNT` + (int, default: -1). When set to a value > -1, hdPrman will set + fixedSampleCount to the given value on all visible supported lights it + encounters, overriding any value that might be authored on the light. This + setting is intended to aid with testing, and should not be used in production. + +- Reduced pre-allocation of time sample arrays within instanced hierarchies to + improve memory performance on scenes with heavy instancing. + +- Added experimental legacy mesh light support: PxrMesh and PxrVolume schemas + now derive from their respective geometry classes rather than Light. + +- Lights will now properly depend on and update with their targeted light + filters. + +- Role information for certain integrator, projection, and display and sample + filter parameters is now retrieved by inspecting the relevant shader via + SdrRegistry, rather than relying on lookup tables previously in + projectionParams.h/cpp. + +- Added support for role lookup of integrator parameters set via + RenderSettings & Integrator prims. + +- Fixed an issue when geometry prototype creation fails for an instanced gprim. + Previously, this situation led to a fatal error about a missing light shader. + Now we issue a non-fatal warning with an accurate message. + +- Fixed an issue that caused ri:hider:jitter to be ignored when authored in + RenderSettings. + +### MaterialX + +- Allow for USD to be run with either MaterialX v1.38 or v1.39. + (PR: [#3159](https://github.com/PixarAnimationStudios/OpenUSD/pull/3159)) + +- Added MaterialXConfigAPI and dataSourceAttributeTypeName to help support USD + files that were written with v1.38 MaterialX nodes but run with MaterialX + v1.39 + (PR: [#3157](https://github.com/PixarAnimationStudios/OpenUSD/pull/3157)) + +- Fixed crash referencing image NodeDef. + (PR: [#3344](https://github.com/PixarAnimationStudios/OpenUSD/pull/3344)) + +### usdview + +- Resolved labels are now displayed in a new "Resolved Labels" row in + the attribute editor. Labels are resolved for prims and ancestors using the + usdSemantics schema and APIs. + (PR: [#3300](https://github.com/PixarAnimationStudios/OpenUSD/pull/3300)) + +- Removed unexposed "enableIdRender" setting from usdview. + +### usdchecker + +- Ported usdchecker to C++ and enabled it to use the new validation framework. + Clients can use the `--useNewValidationFramework` flag to use the new + validation framework. All the core USD validators available in the old + UsdUtilsComplicanceChecker are ported to the new validation framework. + This option will be made default in a subsequent OpenUSD Release. + +- Source for usdchecker has been migrated to pxr/usdValidation/bin/usdchecker. + +- New flags (`--variantSets`, `variants`, `disableVariantValidationLimit`) added + to provide more control over variant validation. These flags are only valid + when `--newValidationFramework` is used. + +### usdrecord + +- Added command line argument to enable performance tracing and allocation + tracking. These match the same options provided by usdview. + +### usdmeasureperformance + +- Added optional trace file output via `--tracedir` arg. + +### Documentation + +- Added a basic overview with examples for the relocates composition arc to the + [Terms and Concepts page](https://openusd.org/release/glossary.html#relocates). + Updated the [Namespace Editor user guide](https://openusd.org/release/user_guides/namespace_editing.html) + to demonstrate how the namespace editor uses relocates when necessary. + +- Added example asset that demonstrates using the UsdSemantics schema, available + in `extras/usd/examples/usdSemanticsExamples/bookshelf.usda`. + (PR: [#3390](https://github.com/PixarAnimationStudios/OpenUSD/pull/3390)) + +- Updated documentation of HdSceneDelegate::SamplePrimvar(), SampleTransform(), + and related methods to reflect new behavior with respect to interval + bracketing samples introduced in 24.08. + +- Updated usdchecker documentation to include the new validation framework + options. + +- Various documentation updates: + - Added OpenExec and Animated Characters presentation slides to the + [Downloads and Videos page](https://openusd.org/release/dl_downloads.html). + - Updated `VERSIONS.md` to note that OpenUSD requires v3.0 or greater of the + Jinja2 package. + (PR: [#3314](https://github.com/PixarAnimationStudios/OpenUSD/pull/3314)) + - Added documentation for the TF_DESCRIBE_SCOPE() macro. + - Updated "Generating New Schema Classes" tutorial with note regarding DLL + warning on Windows. + (Issue: [#3376](https://github.com/PixarAnimationStudios/OpenUSD/issues/3376), + PR: [#3394](https://github.com/PixarAnimationStudios/OpenUSD/pull/3394)) + - Updated UsdSemantics overview docs to disambiguate the UsdSemantics schema + from model hierarchy concepts. + (PR: [#3385](https://github.com/PixarAnimationStudios/OpenUSD/pull/3385)) + - Fixed "unsupported type" typo. + (PR: [#3266](https://github.com/PixarAnimationStudios/OpenUSD/pull/3266)) + + +
+ Previous Releases + ## [24.11] - 2024-10-25 ### Build @@ -524,9 +1003,6 @@ - Fixed crash when reading .usdc file with corrupt SdfSpecType values, see [security advisory on github](https://github.com/PixarAnimationStudios/OpenUSD/security/advisories/GHSA-4j7j-gm3f-m63w). -
- Previous Releases - ## [24.08] - 2024-07-25 OpenUSD is now licensed under the Tomorrow Open Source Technology license. This diff --git a/USD_CLA_Corporate.pdf b/USD_CLA_Corporate.pdf index 5ce0c123a4..2eb15a14bd 100644 Binary files a/USD_CLA_Corporate.pdf and b/USD_CLA_Corporate.pdf differ diff --git a/USD_CLA_Individual.pdf b/USD_CLA_Individual.pdf index 8763c6ca00..40fabb6779 100644 Binary files a/USD_CLA_Individual.pdf and b/USD_CLA_Individual.pdf differ diff --git a/VERSIONS.md b/VERSIONS.md index 5783e3f99d..7d68ca84ec 100644 --- a/VERSIONS.md +++ b/VERSIONS.md @@ -30,7 +30,7 @@ Our test machines have the following software versions installed. | Alembic | 1.8.5 | 1.8.5 | 1.8.5 | | OpenEXR | 3.1.11 | 3.1.11 | 3.1.11 | | MaterialX | 1.38.10 | 1.38.10 | 1.38.10 | -| Jinja2 | 2.0 | | | +| Jinja2 | 3.1.2 | | | | Flex | 2.5.39 | | | | Bison | 2.4.1 | | | | Doxygen | 1.9.6 | | | diff --git a/build_scripts/apple_utils.py b/build_scripts/apple_utils.py index d2039a3d03..52cac96e51 100644 --- a/build_scripts/apple_utils.py +++ b/build_scripts/apple_utils.py @@ -204,7 +204,11 @@ def CreateUniversalBinaries(context, libNames, x86Dir, armDir): "{XCODE_ROOT}/Toolchains/XcodeDefault.xctoolchain/usr/bin/lipo".format( XCODE_ROOT=xcodeRoot) for libName in libNames: - outputName = os.path.join(context.instDir, "lib", libName) + outputDir = os.path.join(context.instDir, "lib") + if not os.path.isdir(outputDir): + os.mkdir(outputDir) + + outputName = os.path.join(outputDir, libName) if not os.path.islink("{x86Dir}/{libName}".format( x86Dir=x86Dir, libName=libName)): if os.path.exists(outputName): diff --git a/build_scripts/build_usd.py b/build_scripts/build_usd.py index 6d4e8feb05..8b0ddf5f81 100644 --- a/build_scripts/build_usd.py +++ b/build_scripts/build_usd.py @@ -19,6 +19,7 @@ import datetime import fnmatch import glob +import hashlib import locale import multiprocessing import os @@ -482,6 +483,16 @@ def GetCMakeVersion(): else: return (int(major), int(minor), int(patch)) +def ComputeSHA256Hash(filename): + """Returns the SHA256 hash of the specified file.""" + hasher = hashlib.sha256() + with open(filename, "rb") as f: + buf = None + while buf != b'': + buf = f.read(4096) + hasher.update(buf) + return hasher.hexdigest() + def PatchFile(filename, patches, multiLineMatches=False): """Applies patches to the specified file. patches is a list of tuples (old string, new string).""" @@ -522,18 +533,31 @@ def DownloadFileWithUrllib(url, outputFilename): outfile.write(r.read()) def DownloadURL(url, context, force, extractDir = None, - dontExtract = None): + dontExtract = None, destFileName = None, + expectedSHA256 = None): """Download and extract the archive file at given URL to the source directory specified in the context. dontExtract may be a sequence of path prefixes that will be excluded when extracting the archive. + destFileName may be a string containing the filename where + the file will be downloaded. If unspecified, this filename + will be derived from the URL. + + expectedSHA256 may be a string containing the expected SHA256 + checksum for the downloaded file. If provided, this function + will raise a RuntimeError if the SHA256 checksum computed from + the file does not match. + Returns the absolute path to the directory where files have been extracted.""" with CurrentWorkingDirectory(context.srcDir): - # Extract filename from URL and see if file already exists. - filename = url.split("/")[-1] + if destFileName: + filename = destFileName + else: + filename = url.split("/")[-1] + if force and os.path.exists(filename): os.remove(filename) @@ -580,6 +604,15 @@ def DownloadURL(url, context, force, extractDir = None, raise RuntimeError("Failed to download {url}: {err}" .format(url=url, err=errorMsg)) + if expectedSHA256: + computedSHA256 = ComputeSHA256Hash(tmpFilename) + if computedSHA256 != expectedSHA256: + raise RuntimeError( + "Unexpected SHA256 for {url}: got {computed}, " + "expected {expected}".format( + url=url, computed=computedSHA256, + expected=expectedSHA256)) + shutil.move(tmpFilename, filename) # Open the archive and retrieve the name of the top-most directory. @@ -687,15 +720,15 @@ def InstallZlib(context, force, buildArgs): # They're not required for use on any platforms, so we elide them # for efficiency PatchFile("CMakeLists.txt", - [("add_executable(example test/example.c)", + [("add_executable(example test/example.c)", ""), - ("add_executable(minigzip test/minigzip.c)", + ("add_executable(minigzip test/minigzip.c)", ""), - ("target_link_libraries(example zlib)", + ("target_link_libraries(example zlib)", ""), - ("target_link_libraries(minigzip zlib)", + ("target_link_libraries(minigzip zlib)", ""), - ("add_test(example example)", + ("add_test(example example)", "")]) RunCMake(context, force, buildArgs) @@ -723,35 +756,41 @@ def InstallZlib(context, force, buildArgs): "include/boost/version.hpp", "include/boost-1_76/boost/version.hpp", "include/boost-1_78/boost/version.hpp", - "include/boost-1_82/boost/version.hpp" + "include/boost-1_82/boost/version.hpp", + "include/boost-1_86/boost/version.hpp" ] def InstallBoost_Helper(context, force, buildArgs): # In general we use boost 1.76.0 to adhere to VFX Reference Platform CY2022. # However, there are some cases where a newer version is required. + # - Building with Visual Studio 2022 with the 14.4x toolchain requires boost + # 1.86.0 or newer, we choose it for all Visual Studio 2022 versions for + # simplicity. # - Building with Python 3.11 requires boost 1.82.0 or newer # (https://github.com/boostorg/python/commit/a218ba) - # - Building on MacOS requires v1.82.0 or later for C++17 support starting - # with Xcode 15. We choose to use this version for all MacOS builds for + # - Building on MacOS requires v1.82.0 or later for C++17 support starting + # with Xcode 15. We choose to use this version for all MacOS builds for # simplicity." # - Building with Python 3.10 requires boost 1.76.0 or newer # (https://github.com/boostorg/python/commit/cbd2d9) # XXX: Due to a typo we've been using 1.78.0 in this case for a while. # We're leaving it that way to minimize potential disruption. - # - Building with Visual Studio 2022 requires boost 1.78.0 or newer. - # (https://github.com/boostorg/build/issues/735) # - Building on MacOS requires boost 1.78.0 or newer to resolve Python 3 # compatibility issues on Big Sur and Monterey. pyInfo = GetPythonInfo(context) pyVer = (int(pyInfo[3].split('.')[0]), int(pyInfo[3].split('.')[1])) - if MacOS() or (context.buildBoostPython and pyVer >= (3,11)): - BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.82.0/source/boost_1_82_0.zip" + if IsVisualStudio2022OrGreater(): + BOOST_VERSION = (1, 86, 0) + BOOST_SHA256 = "cd20a5694e753683e1dc2ee10e2d1bb11704e65893ebcc6ced234ba68e5d8646" + elif MacOS() or (context.buildBoostPython and pyVer >= (3,11)): + BOOST_VERSION = (1, 82, 0) + BOOST_SHA256 = "f7c9e28d242abcd7a2c1b962039fcdd463ca149d1883c3a950bbcc0ce6f7c6d9" elif context.buildBoostPython and pyVer >= (3, 10): - BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.zip" - elif IsVisualStudio2022OrGreater(): - BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.zip" + BOOST_VERSION = (1, 78, 0) + BOOST_SHA256 = "f22143b5528e081123c3c5ed437e92f648fe69748e95fa6e2bd41484e2986cc3" else: - BOOST_URL = "https://boostorg.jfrog.io/artifactory/main/release/1.76.0/source/boost_1_76_0.zip" + BOOST_VERSION = (1, 76, 0) + BOOST_SHA256 = "0fd43bb53580ca54afc7221683dfe8c6e3855b351cd6dce53b1a24a7d7fbeedd" # Documentation files in the boost archive can have exceptionally # long paths. This can lead to errors when extracting boost on Windows, @@ -765,8 +804,34 @@ def InstallBoost_Helper(context, force, buildArgs): "*/libs/wave/test/testwave/testfiles/utf8-test-*" ] - with CurrentWorkingDirectory(DownloadURL(BOOST_URL, context, force, - dontExtract=dontExtract)): + # Provide backup sources for downloading boost to avoid issues when + # one mirror goes down. + major, minor, patch = BOOST_VERSION + version = f"{major}.{minor}.{patch}" + filename = f"boost_{major}_{minor}_{patch}.zip" + urls = [ + # The sourceforge mirror is typically faster than archives.boost.io + # so we use that first. + f"https://sourceforge.net/projects/boost/files/boost/{version}/{filename}/download", + f"https://archives.boost.io/release/{version}/source/{filename}" + ] + + sourceDir = None + for url in urls: + try: + sourceDir = DownloadURL(url, context, force, + dontExtract=dontExtract, + destFileName=filename, + expectedSHA256=BOOST_SHA256) + break + except Exception as e: + PrintWarning(str(e)) + if url != urls[-1]: + PrintWarning("Trying alternative sources") + else: + raise RuntimeError("Failed to download boost") + + with CurrentWorkingDirectory(sourceDir): if Windows(): bootstrap = "bootstrap.bat" else: @@ -859,6 +924,7 @@ def InstallBoost_Helper(context, force, buildArgs): b2_settings.append("--with-date_time") if context.buildOIIO or context.enableOpenVDB: + b2_settings.append("--with-chrono") b2_settings.append("--with-system") b2_settings.append("--with-thread") @@ -955,7 +1021,7 @@ def InstallBoost(context, force, buildArgs): def InstallOneTBB(context, force, buildArgs): with CurrentWorkingDirectory(DownloadURL(ONETBB_URL, context, force)): - RunCMake(context, force, + RunCMake(context, force, ['-DTBB_TEST=OFF', '-DTBB_STRICT=OFF'] + buildArgs) @@ -1209,7 +1275,7 @@ def InstallPNG(context, force, buildArgs): ############################################################ # IlmBase/OpenEXR -OPENEXR_URL = "https://github.com/AcademySoftwareFoundation/openexr/archive/refs/tags/v3.1.11.zip" +OPENEXR_URL = "https://github.com/AcademySoftwareFoundation/openexr/archive/refs/tags/v3.1.13.zip" def InstallOpenEXR(context, force, buildArgs): with CurrentWorkingDirectory(DownloadURL(OPENEXR_URL, context, force)): @@ -1311,7 +1377,7 @@ def InstallOpenVDB(context, force, buildArgs): ############################################################ # OpenImageIO -OIIO_URL = "https://github.com/OpenImageIO/oiio/archive/refs/tags/v2.3.21.0.zip" +OIIO_URL = "https://github.com/OpenImageIO/oiio/archive/refs/tags/v2.5.16.0.zip" def InstallOpenImageIO(context, force, buildArgs): with CurrentWorkingDirectory(DownloadURL(OIIO_URL, context, force)): @@ -1705,6 +1771,11 @@ def InstallUSD(context, force, buildArgs): extraArgs.append('-DPXR_BUILD_USD_TOOLS=ON') else: extraArgs.append('-DPXR_BUILD_USD_TOOLS=OFF') + + if context.buildUsdValidation: + extraArgs.append('-DPXR_BUILD_USD_VALIDATION=ON') + else: + extraArgs.append('-DPXR_BUILD_USD_VALIDATION=OFF') if context.buildImaging: extraArgs.append('-DPXR_BUILD_IMAGING=ON') @@ -1815,6 +1886,13 @@ def InstallUSD(context, force, buildArgs): Builds and installs USD and 3rd-party dependencies to specified location. +The `build_usd.py` script by default downloads and installs the zlib library +when necessary on platforms other than Linux. For those platforms, this behavior +may be overridden by supplying the `--no-zlib` command line option. If this +option is used, then the dependencies of OpenUSD which use zlib must be able to +discover the user supplied zlib in the build environment via the means of cmake's +`find_package` utility. + - Libraries: The following is a list of libraries that this script will download and build as needed. These names can be used to identify libraries for various script @@ -2022,6 +2100,13 @@ def InstallUSD(context, force, buildArgs): action="store_false", help= "Disable performance-impacting safety checks against " "malformed input files") +subgroup = group.add_mutually_exclusive_group() +subgroup.add_argument("--usdValidation", dest="build_usd_validation", + action="store_true", default=True, help="Build USD " \ + "Validation library and validators (default)") +subgroup.add_argument("--no-usdValidation", dest="build_usd_validation", + action="store_false", help="Do not build USD " \ + "Validation library and validators") group.add_argument("--boost-python", dest="build_boost_python", action="store_true", default=False, @@ -2071,6 +2156,13 @@ def InstallUSD(context, force, buildArgs): subgroup.add_argument("--no-usdview", dest="build_usdview", action="store_false", help="Do not build usdview") +subgroup = group.add_mutually_exclusive_group() +subgroup.add_argument("--zlib", dest="build_zlib", + action="store_true", default=True, + help="Install zlib on behalf of dependencies (default)") +subgroup.add_argument("--no-zlib", dest="build_zlib", + action="store_false", + help="Do not install zlib for dependencies") group = parser.add_argument_group(title="Imaging Plugin Options") subgroup = group.add_mutually_exclusive_group() @@ -2270,6 +2362,7 @@ def __init__(self, args): self.buildExamples = args.build_examples and not embedded self.buildTutorials = args.build_tutorials and not embedded self.buildTools = args.build_tools and not embedded + self.buildUsdValidation = args.build_usd_validation and not embedded # - Documentation self.buildDocs = args.build_docs or args.build_python_docs @@ -2291,6 +2384,9 @@ def __init__(self, args): self.buildUsdview = (self.buildUsdImaging and self.buildPython and args.build_usdview) + + # - zlib + self.buildZlib = args.build_zlib # - Imaging plugins self.buildEmbree = self.buildImaging and args.build_embree @@ -2361,7 +2457,7 @@ def ForceBuildDependency(self, dep): if context.buildOneTBB: TBB = ONETBB -requiredDependencies = [ZLIB, TBB] +requiredDependencies = [TBB] if context.buildBoostPython: requiredDependencies += [BOOST] @@ -2379,18 +2475,18 @@ def ForceBuildDependency(self, dep): if context.buildImaging: if context.enablePtex: - requiredDependencies += [PTEX] + requiredDependencies += [ZLIB, PTEX] requiredDependencies += [OPENSUBDIV] if context.enableOpenVDB: - requiredDependencies += [BLOSC, BOOST, OPENEXR, OPENVDB, TBB] + requiredDependencies += [ZLIB, TBB, BLOSC, BOOST, OPENEXR, OPENVDB] if context.buildOIIO: - requiredDependencies += [BOOST, JPEG, TIFF, PNG, OPENEXR, OPENIMAGEIO] + requiredDependencies += [ZLIB, BOOST, JPEG, TIFF, PNG, OPENEXR, OPENIMAGEIO] if context.buildOCIO: - requiredDependencies += [OPENCOLORIO] + requiredDependencies += [ZLIB, OPENCOLORIO] if context.buildEmbree: requiredDependencies += [TBB, EMBREE] @@ -2401,11 +2497,12 @@ def ForceBuildDependency(self, dep): if context.buildAnimXTests: requiredDependencies += [ANIMX] -# Assume zlib already exists on Linux platforms and don't build -# our own. This avoids potential issues where a host application -# loads an older version of zlib than the one we'd build and link -# our libraries against. -if Linux(): +# Linux provides zlib. Skipping it here avoids issues where a host +# application loads a different version of zlib than the one we build against. +# Building zlib is the default when a dependency requires it, although OpenUSD +# itself does not require it. The --no-zlib flag can be passed to the build +# script to allow the dependency to find zlib in the build environment. +if (Linux() or not context.buildZlib) and ZLIB in requiredDependencies: requiredDependencies.remove(ZLIB) # Error out if user is building monolithic library on windows with draco plugin @@ -2605,6 +2702,7 @@ def _JoinVersion(v): summaryMsg += """\ Variant {buildVariant} Target {buildTarget} + UsdValidation {buildUsdValidation} Imaging {buildImaging} Ptex support: {enablePtex} OpenVDB support: {enableOpenVDB} @@ -2684,6 +2782,7 @@ def FormatBuildArguments(buildArgs): buildExamples=("On" if context.buildExamples else "Off"), buildTutorials=("On" if context.buildTutorials else "Off"), buildTools=("On" if context.buildTools else "Off"), + buildUsdValidation=("On" if context.buildUsdValidation else "Off"), buildAlembic=("On" if context.buildAlembic else "Off"), buildDraco=("On" if context.buildDraco else "Off"), buildMaterialX=("On" if context.buildMaterialX else "Off"), diff --git a/cmake/defaults/Options.cmake b/cmake/defaults/Options.cmake index 74cee361fd..a1b1bc9604 100644 --- a/cmake/defaults/Options.cmake +++ b/cmake/defaults/Options.cmake @@ -16,6 +16,7 @@ option(PXR_BUILD_EMBREE_PLUGIN "Build embree imaging plugin" OFF) option(PXR_BUILD_OPENIMAGEIO_PLUGIN "Build OpenImageIO plugin" OFF) option(PXR_BUILD_OPENCOLORIO_PLUGIN "Build OpenColorIO plugin" OFF) option(PXR_BUILD_USD_IMAGING "Build USD imaging components" ON) +option(PXR_BUILD_USD_VALIDATION "Build USD validation library and core USD validators" ON) option(PXR_BUILD_USDVIEW "Build usdview" ON) option(PXR_BUILD_ALEMBIC_PLUGIN "Build the Alembic plugin for USD" OFF) option(PXR_BUILD_DRACO_PLUGIN "Build the Draco plugin for USD" OFF) diff --git a/cmake/defaults/Packages.cmake b/cmake/defaults/Packages.cmake index 8ac373aa8f..5746dfc912 100644 --- a/cmake/defaults/Packages.cmake +++ b/cmake/defaults/Packages.cmake @@ -266,6 +266,7 @@ if (PXR_BUILD_IMAGING) # --X11 if (CMAKE_SYSTEM_NAME STREQUAL "Linux") find_package(X11) + add_definitions(-DPXR_X11_SUPPORT_ENABLED) endif() # --Embree if (PXR_BUILD_EMBREE_PLUGIN) diff --git a/cmake/defaults/Version.cmake b/cmake/defaults/Version.cmake index e9657e399e..6dd5d5f2d9 100644 --- a/cmake/defaults/Version.cmake +++ b/cmake/defaults/Version.cmake @@ -6,7 +6,7 @@ # # Versioning information set(PXR_MAJOR_VERSION "0") -set(PXR_MINOR_VERSION "24") -set(PXR_PATCH_VERSION "11") # NOTE: Must not have leading 0 for single digits +set(PXR_MINOR_VERSION "25") +set(PXR_PATCH_VERSION "2") # NOTE: Must not have leading 0 for single digits math(EXPR PXR_VERSION "${PXR_MAJOR_VERSION} * 10000 + ${PXR_MINOR_VERSION} * 100 + ${PXR_PATCH_VERSION}") diff --git a/cmake/defaults/msvcdefaults.cmake b/cmake/defaults/msvcdefaults.cmake index 13d77d0ae7..f5780c0db3 100644 --- a/cmake/defaults/msvcdefaults.cmake +++ b/cmake/defaults/msvcdefaults.cmake @@ -11,6 +11,13 @@ set(_PXR_CXX_FLAGS "${_PXR_CXX_FLAGS} /EHsc") # Standards compliant. set(_PXR_CXX_FLAGS "${_PXR_CXX_FLAGS} /Zc:rvalueCast /Zc:strictStrings") +# Visual Studio sets the value of __cplusplus to 199711L regardless of +# the C++ standard actually being used, unless /Zc:__cplusplus is enabled. +# +# For more details, see: +# https://learn.microsoft.com/en-us/cpp/build/reference/zc-cplusplus +set(_PXR_CXX_FLAGS "${_PXR_CXX_FLAGS} /Zc:__cplusplus") + # The /Zc:inline option strips out the "arch_ctor_" symbols used for # library initialization by ARCH_CONSTRUCTOR starting in Visual Studio 2019, # causing release builds to fail. Disable the option for this and later @@ -130,3 +137,9 @@ set(_PXR_CXX_FLAGS "${_PXR_CXX_FLAGS} /Gm-") # with no symbols in it. We do this a lot because of a pattern of having # a C++ source file for many header-only facilities, e.g. tf/bitUtils.cpp. set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /IGNORE:4221") + +# Enforce synchronous PDB writes when using Ninja +# (this prevents "permission denied" compile errors on program databases) +if("${CMAKE_GENERATOR}" STREQUAL "Ninja") + set(_PXR_CXX_FLAGS "${_PXR_CXX_FLAGS} /FS") +endif() diff --git a/cmake/macros/Private.cmake b/cmake/macros/Private.cmake index 311524da0c..6fe1134c14 100644 --- a/cmake/macros/Private.cmake +++ b/cmake/macros/Private.cmake @@ -551,7 +551,8 @@ function(_pxr_enable_precompiled_header TARGET_NAME) # Headers live in subdirectories. set(rel_output_header_path "${PXR_PREFIX}/${TARGET_NAME}/${output_header_name}") set(abs_output_header_path "${PROJECT_BINARY_DIR}/include/${rel_output_header_path}") - set(abs_precompiled_path ${PROJECT_BINARY_DIR}/include/${PXR_PREFIX}/${TARGET_NAME}/${CMAKE_BUILD_TYPE}/${precompiled_name}) + set(abs_precompiled_container_path "${PROJECT_BINARY_DIR}/include/${PXR_PREFIX}/${TARGET_NAME}/${CMAKE_BUILD_TYPE}") + set(abs_precompiled_path "${abs_precompiled_container_path}/${precompiled_name}") # Additional compile flags to use precompiled header. This will be set(compile_flags "") @@ -583,22 +584,23 @@ function(_pxr_enable_precompiled_header TARGET_NAME) set(abs_output_source_path ${CMAKE_CURRENT_BINARY_DIR}/${output_header_name_we}.cpp) add_custom_command( OUTPUT "${abs_output_source_path}" + COMMAND ${CMAKE_COMMAND} -E make_directory "${abs_precompiled_container_path}" COMMAND ${CMAKE_COMMAND} -E touch ${abs_output_source_path} ) - # The trigger file gets a special compile flag (/Yc). - set_source_files_properties(${abs_output_source_path} PROPERTIES - COMPILE_FLAGS "/Yc\"${rel_output_header_path}\" /FI\"${rel_output_header_path}\" /Fp\"${abs_precompiled_path}\"" - OBJECT_OUTPUTS "${abs_precompiled_path}" - OBJECT_DEPENDS "${abs_output_header_path}" - ) - # Add the header file to the target. target_sources(${TARGET_NAME} PRIVATE "${abs_output_header_path}") # Add the trigger file to the target. target_sources(${TARGET_NAME} PRIVATE "${abs_output_source_path}") + # The trigger file gets a special compile flag (/Yc). + set_source_files_properties(${abs_output_source_path} PROPERTIES + COMPILE_FLAGS "/Yc\"${rel_output_header_path}\" /FI\"${rel_output_header_path}\" /Fp\"${abs_precompiled_path}\"" + OBJECT_OUTPUTS "${abs_precompiled_path}" + OBJECT_DEPENDS "${abs_output_header_path}" + ) + # Exclude the trigger. list(APPEND pch_EXCLUDE ${abs_output_source_path}) else() diff --git a/cmake/macros/Public.cmake b/cmake/macros/Public.cmake index 1b809e738d..2647707ac6 100644 --- a/cmake/macros/Public.cmake +++ b/cmake/macros/Public.cmake @@ -386,6 +386,30 @@ function(pxr_library NAME) set(pch "OFF") endif() + if (PXR_ENABLE_PYTHON_SUPPORT AND args_PYMODULE_CPPFILES) + # If moduleDeps.cpp does not exist, create one + set(moduleDepsFileName "moduleDeps.cpp") + list(FIND args_PYTHON_CPPFILES ${moduleDepsFileName} foundModuleDeps) + if (${foundModuleDeps} EQUAL -1) + # Add moduleDeps.cpp as a built file + list(APPEND args_CPPFILES ${moduleDepsFileName}) + + # Keep only our libraries in the module dependencies + foreach(library ${args_LIBRARIES}) + if (TARGET ${library}) + list(APPEND localLibs ${library}) + endif() + endforeach() + + # Generate moduleDeps.cpp + _get_python_module_name(${NAME} pyModuleName) + add_custom_command( + OUTPUT ${moduleDepsFileName} + COMMAND ${CMAKE_COMMAND} -DlibraryName=${NAME} -DmoduleName=${pyModuleName} -DsourceDir=${PROJECT_SOURCE_DIR} -Dlibraries="${localLibs}" -Doutfile=${moduleDepsFileName} -P "${PROJECT_SOURCE_DIR}/cmake/macros/genModuleDepsCpp.cmake" + DEPENDS "CMakeLists.txt") + endif() + endif() + _pxr_library(${NAME} TYPE "${args_TYPE}" PREFIX "${prefix}" @@ -658,7 +682,24 @@ function(pxr_test_scripts) endif() foreach(file ${ARGN}) - get_filename_component(destFile ${file} NAME_WE) + # Perform regex match to extract both source resource path and + # destination resource path. + # Regex match appropriately takes care of windows drive letter followed + # by a ":", which is also the token we use to separate the source and + # destination resource paths. + string(REGEX MATCHALL "([A-Za-z]:)?([^:]+)" file "${file}") + + list(LENGTH file n) + if (n EQUAL 1) + get_filename_component(destFile ${file} NAME_WE) + elseif (n EQUAL 2) + list(GET file 1 destFile) + list(GET file 0 file) + else() + message(FATAL_ERROR + "Failed to parse test file path ${file}") + endif() + # XXX -- We shouldn't have to install to run tests. install( PROGRAMS ${file} diff --git a/cmake/macros/genModuleDepsCpp.cmake b/cmake/macros/genModuleDepsCpp.cmake new file mode 100644 index 0000000000..ab79d5af95 --- /dev/null +++ b/cmake/macros/genModuleDepsCpp.cmake @@ -0,0 +1,20 @@ +# +# Copyright 2024 Pixar +# +# Licensed under the terms set forth in the LICENSE.txt file available at +# https://openusd.org/license. +# + +# Parse library list +separate_arguments(libraryList NATIVE_COMMAND ${libraries}) + +# Format library list +foreach(library ${libraryList}) + list(APPEND reqLibs "\t\tTfToken(\"${library}\")") +endforeach() +list(JOIN reqLibs ",\n" reqLibs) + +# Read in template file and generate moduleDeps.cpp +file(READ "${sourceDir}/cmake/macros/moduleDeps.cpp.in" fileTemplate) +string(CONFIGURE "${fileTemplate}" fileContents) +file(WRITE ${outfile} ${fileContents}) \ No newline at end of file diff --git a/pxr/base/gf/moduleDeps.cpp b/cmake/macros/moduleDeps.cpp.in similarity index 76% rename from pxr/base/gf/moduleDeps.cpp rename to cmake/macros/moduleDeps.cpp.in index 233f5f9312..c9043709b2 100644 --- a/pxr/base/gf/moduleDeps.cpp +++ b/cmake/macros/moduleDeps.cpp.in @@ -1,5 +1,5 @@ // -// Copyright 2016 Pixar +// Copyright 2024 Pixar // // Licensed under the terms set forth in the LICENSE.txt file available at // https://openusd.org/license. @@ -18,13 +18,10 @@ PXR_NAMESPACE_OPEN_SCOPE TF_REGISTRY_FUNCTION(TfScriptModuleLoader) { // List of direct dependencies for this library. const std::vector reqs = { - TfToken("arch"), - TfToken("tf") - }; +${reqLibs} + }\; TfScriptModuleLoader::GetInstance(). - RegisterLibrary(TfToken("gf"), TfToken("pxr.Gf"), reqs); + RegisterLibrary(TfToken("${libraryName}"), TfToken("pxr.${moduleName}"), reqs)\; } -PXR_NAMESPACE_CLOSE_SCOPE - - +PXR_NAMESPACE_CLOSE_SCOPE \ No newline at end of file diff --git a/cmake/macros/testWrapper.py b/cmake/macros/testWrapper.py index 9e4104028d..54c30b3a27 100644 --- a/cmake/macros/testWrapper.py +++ b/cmake/macros/testWrapper.py @@ -157,11 +157,21 @@ def _diff(fileName, baselineDir, verbose, failuresDir=None): # Use the diff program or equivalent, rather than filecmp or similar # because it's possible we might want to specify other diff programs # in the future. + import platform - if platform.system() == 'Windows': - diff = 'fc.exe' - else: - diff = '/usr/bin/diff' + isWindows = platform.system() == 'Windows' + + diffTool = shutil.which('diff') + diffToolBaseArgs = ['--strip-trailing-cr'] + if not diffTool and isWindows: + diffTool = 'fc.exe' + diffToolBaseArgs = ['/t'] + + if not diffTool: + sys.stderr.write( + "Error: could not find \"diff\" or \"fc.exe\" tool. " + "Make sure it's in your PATH.\n") + return False filesToDiff = glob.glob(fileName) if not filesToDiff: @@ -171,7 +181,7 @@ def _diff(fileName, baselineDir, verbose, failuresDir=None): for fileToDiff in filesToDiff: baselineFile = _resolvePath(baselineDir, fileToDiff) - cmd = [diff, baselineFile, fileToDiff] + cmd = [diffTool, *diffToolBaseArgs, baselineFile, fileToDiff] if verbose: print("diffing with {0}".format(cmd)) diff --git a/cmake/modules/FindOpenEXR.cmake b/cmake/modules/FindOpenEXR.cmake deleted file mode 100644 index 7cf5d46810..0000000000 --- a/cmake/modules/FindOpenEXR.cmake +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright 2016 Pixar -# -# Licensed under the terms set forth in the LICENSE.txt file available at -# https://openusd.org/license. -# - -find_path(OPENEXR_INCLUDE_DIR - OpenEXR/half.h -HINTS - "${OPENEXR_LOCATION}" - "$ENV{OPENEXR_LOCATION}" -PATH_SUFFIXES - include/ -DOC - "OpenEXR headers path" -) - -if(OPENEXR_INCLUDE_DIR) - set(openexr_config_file "${OPENEXR_INCLUDE_DIR}/OpenEXR/OpenEXRConfig.h") - if(EXISTS ${openexr_config_file}) - file(STRINGS - ${openexr_config_file} - TMP - REGEX "#define OPENEXR_VERSION_STRING.*$") - string(REGEX MATCHALL "[0-9.]+" OPENEXR_VERSION ${TMP}) - - file(STRINGS - ${openexr_config_file} - TMP - REGEX "#define OPENEXR_VERSION_MAJOR.*$") - string(REGEX MATCHALL "[0-9]" OPENEXR_MAJOR_VERSION ${TMP}) - - file(STRINGS - ${openexr_config_file} - TMP - REGEX "#define OPENEXR_VERSION_MINOR.*$") - string(REGEX MATCHALL "[0-9]" OPENEXR_MINOR_VERSION ${TMP}) - endif() -endif() - -foreach(OPENEXR_LIB - Half - Iex - Imath - IlmImf - IlmThread - IlmImfUtil - IexMath - ) - - # OpenEXR libraries may be suffixed with the version number, so we search - # using both versioned and unversioned names. - set(DEBUG_POSTFIX ) - if(DEFINED PXR_USE_DEBUG_BUILD) - if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin" AND ${PXR_USE_DEBUG_BUILD} MATCHES ON) - set(DEBUG_POSTFIX _d) - endif() - endif() - find_library(OPENEXR_${OPENEXR_LIB}_LIBRARY - NAMES - ${OPENEXR_LIB}-${OPENEXR_MAJOR_VERSION}_${OPENEXR_MINOR_VERSION}${DEBUG_POSTFIX} - ${OPENEXR_LIB}{DEBUG_POSTFIX} - HINTS - "${OPENEXR_LOCATION}" - "$ENV{OPENEXR_LOCATION}" - PATH_SUFFIXES - lib/ - DOC - "OPENEXR's ${OPENEXR_LIB} library path" - ) - - if(OPENEXR_${OPENEXR_LIB}_LIBRARY) - list(APPEND OPENEXR_LIBRARIES ${OPENEXR_${OPENEXR_LIB}_LIBRARY}) - endif() -endforeach(OPENEXR_LIB) - -# So #include works -list(APPEND OPENEXR_INCLUDE_DIRS ${OPENEXR_INCLUDE_DIR}) -list(APPEND OPENEXR_INCLUDE_DIRS ${OPENEXR_INCLUDE_DIR}/OpenEXR) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(OpenEXR - REQUIRED_VARS - OPENEXR_INCLUDE_DIRS - OPENEXR_LIBRARIES - VERSION_VAR - OPENEXR_VERSION -) - diff --git a/docs/_templates/perf_metric_alab_template.tmpl b/docs/_templates/perf_metric_alab_template.tmpl index f535a1243d..7f3e09695d 100644 --- a/docs/_templates/perf_metric_alab_template.tmpl +++ b/docs/_templates/perf_metric_alab_template.tmpl @@ -1,4 +1,5 @@ {% set alab_metrics_linux_24_11 = load('performance/24.11_linux_alab.yaml', data_format='yaml') %} +{% set alab_metrics_linux_25_02 = load('performance/25.02_linux_alab.yaml', data_format='yaml') %} .. list-table:: Linux Metrics (min/max/mean in seconds) :header-rows: 1 @@ -6,28 +7,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ alab_metrics_linux_24_11['open_stage']['min'] }} | max: {{ alab_metrics_linux_24_11['open_stage']['max'] }} | mean: {{ alab_metrics_linux_24_11['open_stage']['mean']}} + - | min: {{ alab_metrics_linux_25_02['open_stage']['min'] }} + | max: {{ alab_metrics_linux_25_02['open_stage']['max'] }} + | mean: {{ alab_metrics_linux_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ alab_metrics_linux_24_11['create_first_image']['min'] }} | max: {{ alab_metrics_linux_24_11['create_first_image']['max'] }} | mean: {{ alab_metrics_linux_24_11['create_first_image']['mean']}} + - | min: {{ alab_metrics_linux_25_02['create_first_image']['min'] }} + | max: {{ alab_metrics_linux_25_02['create_first_image']['max'] }} + | mean: {{ alab_metrics_linux_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ alab_metrics_linux_24_11['close_stage']['min'] }} | max: {{ alab_metrics_linux_24_11['close_stage']['max'] }} | mean: {{ alab_metrics_linux_24_11['close_stage']['mean']}} + - | min: {{ alab_metrics_linux_25_02['close_stage']['min'] }} + | max: {{ alab_metrics_linux_25_02['close_stage']['max'] }} + | mean: {{ alab_metrics_linux_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ alab_metrics_linux_24_11['shut_down_hydra']['min'] }} | max: {{ alab_metrics_linux_24_11['shut_down_hydra']['max'] }} | mean: {{ alab_metrics_linux_24_11['shut_down_hydra']['mean']}} + - | min: {{ alab_metrics_linux_25_02['shut_down_hydra']['min'] }} + | max: {{ alab_metrics_linux_25_02['shut_down_hydra']['max'] }} + | mean: {{ alab_metrics_linux_25_02['shut_down_hydra']['mean']}} - TBD {% set alab_metrics_macos_24_11 = load('performance/24.11_macos_alab.yaml', data_format='yaml') %} +{% set alab_metrics_macos_25_02 = load('performance/25.02_macos_alab.yaml', data_format='yaml') %} .. list-table:: macOS Metrics (min/max/mean in seconds) :header-rows: 1 @@ -35,28 +50,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ alab_metrics_macos_24_11['open_stage']['min'] }} | max: {{ alab_metrics_macos_24_11['open_stage']['max'] }} | mean: {{ alab_metrics_macos_24_11['open_stage']['mean']}} + - | min: {{ alab_metrics_macos_25_02['open_stage']['min'] }} + | max: {{ alab_metrics_macos_25_02['open_stage']['max'] }} + | mean: {{ alab_metrics_macos_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ alab_metrics_macos_24_11['create_first_image']['min'] }} | max: {{ alab_metrics_macos_24_11['create_first_image']['max'] }} | mean: {{ alab_metrics_macos_24_11['create_first_image']['mean']}} + - | min: {{ alab_metrics_macos_25_02['create_first_image']['min'] }} + | max: {{ alab_metrics_macos_25_02['create_first_image']['max'] }} + | mean: {{ alab_metrics_macos_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ alab_metrics_macos_24_11['close_stage']['min'] }} | max: {{ alab_metrics_macos_24_11['close_stage']['max'] }} | mean: {{ alab_metrics_macos_24_11['close_stage']['mean']}} + - | min: {{ alab_metrics_macos_25_02['close_stage']['min'] }} + | max: {{ alab_metrics_macos_25_02['close_stage']['max'] }} + | mean: {{ alab_metrics_macos_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ alab_metrics_macos_24_11['shut_down_hydra']['min'] }} | max: {{ alab_metrics_macos_24_11['shut_down_hydra']['max'] }} | mean: {{ alab_metrics_macos_24_11['shut_down_hydra']['mean']}} + - | min: {{ alab_metrics_macos_25_02['shut_down_hydra']['min'] }} + | max: {{ alab_metrics_macos_25_02['shut_down_hydra']['max'] }} + | mean: {{ alab_metrics_macos_25_02['shut_down_hydra']['mean']}} - TBD {% set alab_metrics_win_24_11 = load('performance/24.11_windows_alab.yaml', data_format='yaml') %} +{% set alab_metrics_win_25_02 = load('performance/25.02_windows_alab.yaml', data_format='yaml') %} .. list-table:: Windows Metrics (min/max/mean in seconds) :header-rows: 1 @@ -64,23 +93,36 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ alab_metrics_win_24_11['open_stage']['min'] }} | max: {{ alab_metrics_win_24_11['open_stage']['max'] }} | mean: {{ alab_metrics_win_24_11['open_stage']['mean']}} + - | min: {{ alab_metrics_win_25_02['open_stage']['min'] }} + | max: {{ alab_metrics_win_25_02['open_stage']['max'] }} + | mean: {{ alab_metrics_win_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ alab_metrics_win_24_11['create_first_image']['min'] }} | max: {{ alab_metrics_win_24_11['create_first_image']['max'] }} | mean: {{ alab_metrics_win_24_11['create_first_image']['mean']}} + - | min: {{ alab_metrics_win_25_02['create_first_image']['min'] }} + | max: {{ alab_metrics_win_25_02['create_first_image']['max'] }} + | mean: {{ alab_metrics_win_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ alab_metrics_win_24_11['close_stage']['min'] }} | max: {{ alab_metrics_win_24_11['close_stage']['max'] }} | mean: {{ alab_metrics_win_24_11['close_stage']['mean']}} + - | min: {{ alab_metrics_win_25_02['close_stage']['min'] }} + | max: {{ alab_metrics_win_25_02['close_stage']['max'] }} + | mean: {{ alab_metrics_win_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ alab_metrics_win_24_11['shut_down_hydra']['min'] }} | max: {{ alab_metrics_win_24_11['shut_down_hydra']['max'] }} | mean: {{ alab_metrics_win_24_11['shut_down_hydra']['mean']}} + - | min: {{ alab_metrics_win_25_02['shut_down_hydra']['min'] }} + | max: {{ alab_metrics_win_25_02['shut_down_hydra']['max'] }} + | mean: {{ alab_metrics_win_25_02['shut_down_hydra']['mean']}} - TBD diff --git a/docs/_templates/perf_metric_kitchenset_template.tmpl b/docs/_templates/perf_metric_kitchenset_template.tmpl index ea90892b93..6b26d7338f 100644 --- a/docs/_templates/perf_metric_kitchenset_template.tmpl +++ b/docs/_templates/perf_metric_kitchenset_template.tmpl @@ -1,4 +1,5 @@ {% set kitchenset_metrics_linux_24_11 = load('performance/24.11_linux_kitchenset.yaml', data_format='yaml') %} +{% set kitchenset_metrics_linux_25_02 = load('performance/25.02_linux_kitchenset.yaml', data_format='yaml') %} .. list-table:: Linux Metrics (min/max/mean in seconds) :header-rows: 1 @@ -6,28 +7,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ kitchenset_metrics_linux_24_11['open_stage']['min'] }} | max: {{ kitchenset_metrics_linux_24_11['open_stage']['max'] }} | mean: {{ kitchenset_metrics_linux_24_11['open_stage']['mean']}} + - | min: {{ kitchenset_metrics_linux_25_02['open_stage']['min'] }} + | max: {{ kitchenset_metrics_linux_25_02['open_stage']['max'] }} + | mean: {{ kitchenset_metrics_linux_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ kitchenset_metrics_linux_24_11['create_first_image']['min'] }} | max: {{ kitchenset_metrics_linux_24_11['create_first_image']['max'] }} | mean: {{ kitchenset_metrics_linux_24_11['create_first_image']['mean']}} + - | min: {{ kitchenset_metrics_linux_25_02['create_first_image']['min'] }} + | max: {{ kitchenset_metrics_linux_25_02['create_first_image']['max'] }} + | mean: {{ kitchenset_metrics_linux_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ kitchenset_metrics_linux_24_11['close_stage']['min'] }} | max: {{ kitchenset_metrics_linux_24_11['close_stage']['max'] }} | mean: {{ kitchenset_metrics_linux_24_11['close_stage']['mean']}} + - | min: {{ kitchenset_metrics_linux_25_02['close_stage']['min'] }} + | max: {{ kitchenset_metrics_linux_25_02['close_stage']['max'] }} + | mean: {{ kitchenset_metrics_linux_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ kitchenset_metrics_linux_24_11['shut_down_hydra']['min'] }} | max: {{ kitchenset_metrics_linux_24_11['shut_down_hydra']['max'] }} | mean: {{ kitchenset_metrics_linux_24_11['shut_down_hydra']['mean']}} + - | min: {{ kitchenset_metrics_linux_25_02['shut_down_hydra']['min'] }} + | max: {{ kitchenset_metrics_linux_25_02['shut_down_hydra']['max'] }} + | mean: {{ kitchenset_metrics_linux_25_02['shut_down_hydra']['mean']}} - TBD {% set kitchenset_metrics_macos_24_11 = load('performance/24.11_macos_kitchenset.yaml', data_format='yaml') %} +{% set kitchenset_metrics_macos_25_02 = load('performance/25.02_macos_kitchenset.yaml', data_format='yaml') %} .. list-table:: macOS Metrics (min/max/mean in seconds) :header-rows: 1 @@ -35,28 +50,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ kitchenset_metrics_macos_24_11['open_stage']['min'] }} | max: {{ kitchenset_metrics_macos_24_11['open_stage']['max'] }} | mean: {{ kitchenset_metrics_macos_24_11['open_stage']['mean']}} + - | min: {{ kitchenset_metrics_macos_25_02['open_stage']['min'] }} + | max: {{ kitchenset_metrics_macos_25_02['open_stage']['max'] }} + | mean: {{ kitchenset_metrics_macos_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ kitchenset_metrics_macos_24_11['create_first_image']['min'] }} | max: {{ kitchenset_metrics_macos_24_11['create_first_image']['max'] }} | mean: {{ kitchenset_metrics_macos_24_11['create_first_image']['mean']}} + - | min: {{ kitchenset_metrics_macos_25_02['create_first_image']['min'] }} + | max: {{ kitchenset_metrics_macos_25_02['create_first_image']['max'] }} + | mean: {{ kitchenset_metrics_macos_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ kitchenset_metrics_macos_24_11['close_stage']['min'] }} | max: {{ kitchenset_metrics_macos_24_11['close_stage']['max'] }} | mean: {{ kitchenset_metrics_macos_24_11['close_stage']['mean']}} + - | min: {{ kitchenset_metrics_macos_25_02['close_stage']['min'] }} + | max: {{ kitchenset_metrics_macos_25_02['close_stage']['max'] }} + | mean: {{ kitchenset_metrics_macos_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ kitchenset_metrics_macos_24_11['shut_down_hydra']['min'] }} | max: {{ kitchenset_metrics_macos_24_11['shut_down_hydra']['max'] }} | mean: {{ kitchenset_metrics_macos_24_11['shut_down_hydra']['mean']}} + - | min: {{ kitchenset_metrics_macos_25_02['shut_down_hydra']['min'] }} + | max: {{ kitchenset_metrics_macos_25_02['shut_down_hydra']['max'] }} + | mean: {{ kitchenset_metrics_macos_25_02['shut_down_hydra']['mean']}} - TBD {% set kitchenset_metrics_win_24_11 = load('performance/24.11_windows_kitchenset.yaml', data_format='yaml') %} +{% set kitchenset_metrics_win_25_02 = load('performance/25.02_windows_kitchenset.yaml', data_format='yaml') %} .. list-table:: Windows Metrics (min/max/mean in seconds) :header-rows: 1 @@ -64,23 +93,36 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ kitchenset_metrics_win_24_11['open_stage']['min'] }} | max: {{ kitchenset_metrics_win_24_11['open_stage']['max'] }} | mean: {{ kitchenset_metrics_win_24_11['open_stage']['mean']}} + - | min: {{ kitchenset_metrics_win_25_02['open_stage']['min'] }} + | max: {{ kitchenset_metrics_win_25_02['open_stage']['max'] }} + | mean: {{ kitchenset_metrics_win_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ kitchenset_metrics_win_24_11['create_first_image']['min'] }} | max: {{ kitchenset_metrics_win_24_11['create_first_image']['max'] }} | mean: {{ kitchenset_metrics_win_24_11['create_first_image']['mean']}} + - | min: {{ kitchenset_metrics_win_25_02['create_first_image']['min'] }} + | max: {{ kitchenset_metrics_win_25_02['create_first_image']['max'] }} + | mean: {{ kitchenset_metrics_win_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ kitchenset_metrics_win_24_11['close_stage']['min'] }} | max: {{ kitchenset_metrics_win_24_11['close_stage']['max'] }} | mean: {{ kitchenset_metrics_win_24_11['close_stage']['mean']}} + - | min: {{ kitchenset_metrics_win_25_02['close_stage']['min'] }} + | max: {{ kitchenset_metrics_win_25_02['close_stage']['max'] }} + | mean: {{ kitchenset_metrics_win_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ kitchenset_metrics_win_24_11['shut_down_hydra']['min'] }} | max: {{ kitchenset_metrics_win_24_11['shut_down_hydra']['max'] }} | mean: {{ kitchenset_metrics_win_24_11['shut_down_hydra']['mean']}} + - | min: {{ kitchenset_metrics_win_25_02['shut_down_hydra']['min'] }} + | max: {{ kitchenset_metrics_win_25_02['shut_down_hydra']['max'] }} + | mean: {{ kitchenset_metrics_win_25_02['shut_down_hydra']['mean']}} - TBD diff --git a/docs/_templates/perf_metric_moorelane_template.tmpl b/docs/_templates/perf_metric_moorelane_template.tmpl index abb044e5c5..47b2d8b4be 100644 --- a/docs/_templates/perf_metric_moorelane_template.tmpl +++ b/docs/_templates/perf_metric_moorelane_template.tmpl @@ -1,4 +1,5 @@ {% set moorelane_metrics_linux_24_11 = load('performance/24.11_linux_moorelane.yaml', data_format='yaml') %} +{% set moorelane_metrics_linux_25_02 = load('performance/25.02_linux_moorelane.yaml', data_format='yaml') %} .. list-table:: Linux Metrics (min/max/mean in seconds) :header-rows: 1 @@ -6,28 +7,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ moorelane_metrics_linux_24_11['open_stage']['min'] }} | max: {{ moorelane_metrics_linux_24_11['open_stage']['max'] }} | mean: {{ moorelane_metrics_linux_24_11['open_stage']['mean']}} + - | min: {{ moorelane_metrics_linux_25_02['open_stage']['min'] }} + | max: {{ moorelane_metrics_linux_25_02['open_stage']['max'] }} + | mean: {{ moorelane_metrics_linux_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ moorelane_metrics_linux_24_11['create_first_image']['min'] }} | max: {{ moorelane_metrics_linux_24_11['create_first_image']['max'] }} | mean: {{ moorelane_metrics_linux_24_11['create_first_image']['mean']}} + - | min: {{ moorelane_metrics_linux_25_02['create_first_image']['min'] }} + | max: {{ moorelane_metrics_linux_25_02['create_first_image']['max'] }} + | mean: {{ moorelane_metrics_linux_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ moorelane_metrics_linux_24_11['close_stage']['min'] }} | max: {{ moorelane_metrics_linux_24_11['close_stage']['max'] }} | mean: {{ moorelane_metrics_linux_24_11['close_stage']['mean']}} + - | min: {{ moorelane_metrics_linux_25_02['close_stage']['min'] }} + | max: {{ moorelane_metrics_linux_25_02['close_stage']['max'] }} + | mean: {{ moorelane_metrics_linux_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ moorelane_metrics_linux_24_11['shut_down_hydra']['min'] }} | max: {{ moorelane_metrics_linux_24_11['shut_down_hydra']['max'] }} | mean: {{ moorelane_metrics_linux_24_11['shut_down_hydra']['mean']}} + - | min: {{ moorelane_metrics_linux_25_02['shut_down_hydra']['min'] }} + | max: {{ moorelane_metrics_linux_25_02['shut_down_hydra']['max'] }} + | mean: {{ moorelane_metrics_linux_25_02['shut_down_hydra']['mean']}} - TBD {% set moorelane_metrics_macos_24_11 = load('performance/24.11_macos_moorelane.yaml', data_format='yaml') %} +{% set moorelane_metrics_macos_25_02 = load('performance/25.02_macos_moorelane.yaml', data_format='yaml') %} .. list-table:: macOS Metrics (min/max/mean in seconds) :header-rows: 1 @@ -35,28 +50,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ moorelane_metrics_macos_24_11['open_stage']['min'] }} | max: {{ moorelane_metrics_macos_24_11['open_stage']['max'] }} | mean: {{ moorelane_metrics_macos_24_11['open_stage']['mean']}} + - | min: {{ moorelane_metrics_macos_25_02['open_stage']['min'] }} + | max: {{ moorelane_metrics_macos_25_02['open_stage']['max'] }} + | mean: {{ moorelane_metrics_macos_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ moorelane_metrics_macos_24_11['create_first_image']['min'] }} | max: {{ moorelane_metrics_macos_24_11['create_first_image']['max'] }} | mean: {{ moorelane_metrics_macos_24_11['create_first_image']['mean']}} + - | min: {{ moorelane_metrics_macos_25_02['create_first_image']['min'] }} + | max: {{ moorelane_metrics_macos_25_02['create_first_image']['max'] }} + | mean: {{ moorelane_metrics_macos_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ moorelane_metrics_macos_24_11['close_stage']['min'] }} | max: {{ moorelane_metrics_macos_24_11['close_stage']['max'] }} | mean: {{ moorelane_metrics_macos_24_11['close_stage']['mean']}} + - | min: {{ moorelane_metrics_macos_25_02['close_stage']['min'] }} + | max: {{ moorelane_metrics_macos_25_02['close_stage']['max'] }} + | mean: {{ moorelane_metrics_macos_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ moorelane_metrics_macos_24_11['shut_down_hydra']['min'] }} | max: {{ moorelane_metrics_macos_24_11['shut_down_hydra']['max'] }} | mean: {{ moorelane_metrics_macos_24_11['shut_down_hydra']['mean']}} + - | min: {{ moorelane_metrics_macos_25_02['shut_down_hydra']['min'] }} + | max: {{ moorelane_metrics_macos_25_02['shut_down_hydra']['max'] }} + | mean: {{ moorelane_metrics_macos_25_02['shut_down_hydra']['mean']}} - TBD {% set moorelane_metrics_win_24_11 = load('performance/24.11_windows_moorelane.yaml', data_format='yaml') %} +{% set moorelane_metrics_win_25_02 = load('performance/25.02_windows_moorelane.yaml', data_format='yaml') %} .. list-table:: Windows Metrics (min/max/mean in seconds) :header-rows: 1 @@ -64,23 +93,36 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ moorelane_metrics_win_24_11['open_stage']['min'] }} | max: {{ moorelane_metrics_win_24_11['open_stage']['max'] }} | mean: {{ moorelane_metrics_win_24_11['open_stage']['mean']}} + - | min: {{ moorelane_metrics_win_25_02['open_stage']['min'] }} + | max: {{ moorelane_metrics_win_25_02['open_stage']['max'] }} + | mean: {{ moorelane_metrics_win_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ moorelane_metrics_win_24_11['create_first_image']['min'] }} | max: {{ moorelane_metrics_win_24_11['create_first_image']['max'] }} | mean: {{ moorelane_metrics_win_24_11['create_first_image']['mean']}} + - | min: {{ moorelane_metrics_win_25_02['create_first_image']['min'] }} + | max: {{ moorelane_metrics_win_25_02['create_first_image']['max'] }} + | mean: {{ moorelane_metrics_win_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ moorelane_metrics_win_24_11['close_stage']['min'] }} | max: {{ moorelane_metrics_win_24_11['close_stage']['max'] }} | mean: {{ moorelane_metrics_win_24_11['close_stage']['mean']}} + - | min: {{ moorelane_metrics_win_25_02['close_stage']['min'] }} + | max: {{ moorelane_metrics_win_25_02['close_stage']['max'] }} + | mean: {{ moorelane_metrics_win_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ moorelane_metrics_win_24_11['shut_down_hydra']['min'] }} | max: {{ moorelane_metrics_win_24_11['shut_down_hydra']['max'] }} | mean: {{ moorelane_metrics_win_24_11['shut_down_hydra']['mean']}} + - | min: {{ moorelane_metrics_win_25_02['shut_down_hydra']['min'] }} + | max: {{ moorelane_metrics_win_25_02['shut_down_hydra']['max'] }} + | mean: {{ moorelane_metrics_win_25_02['shut_down_hydra']['mean']}} - TBD diff --git a/docs/_templates/perf_metric_shaderball_template.tmpl b/docs/_templates/perf_metric_shaderball_template.tmpl index d77228fed4..6e55acee2a 100644 --- a/docs/_templates/perf_metric_shaderball_template.tmpl +++ b/docs/_templates/perf_metric_shaderball_template.tmpl @@ -1,4 +1,5 @@ {% set shaderball_metrics_linux_24_11 = load('performance/24.11_linux_shaderball.yaml', data_format='yaml') %} +{% set shaderball_metrics_linux_25_02 = load('performance/25.02_linux_shaderball.yaml', data_format='yaml') %} .. list-table:: Linux Metrics (min/max/mean in seconds) :header-rows: 1 @@ -6,28 +7,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ shaderball_metrics_linux_24_11['open_stage']['min'] }} | max: {{ shaderball_metrics_linux_24_11['open_stage']['max'] }} | mean: {{ shaderball_metrics_linux_24_11['open_stage']['mean']}} + - | min: {{ shaderball_metrics_linux_25_02['open_stage']['min'] }} + | max: {{ shaderball_metrics_linux_25_02['open_stage']['max'] }} + | mean: {{ shaderball_metrics_linux_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ shaderball_metrics_linux_24_11['create_first_image']['min'] }} | max: {{ shaderball_metrics_linux_24_11['create_first_image']['max'] }} | mean: {{ shaderball_metrics_linux_24_11['create_first_image']['mean']}} + - | min: {{ shaderball_metrics_linux_25_02['create_first_image']['min'] }} + | max: {{ shaderball_metrics_linux_25_02['create_first_image']['max'] }} + | mean: {{ shaderball_metrics_linux_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ shaderball_metrics_linux_24_11['close_stage']['min'] }} | max: {{ shaderball_metrics_linux_24_11['close_stage']['max'] }} | mean: {{ shaderball_metrics_linux_24_11['close_stage']['mean']}} + - | min: {{ shaderball_metrics_linux_25_02['close_stage']['min'] }} + | max: {{ shaderball_metrics_linux_25_02['close_stage']['max'] }} + | mean: {{ shaderball_metrics_linux_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ shaderball_metrics_linux_24_11['shut_down_hydra']['min'] }} | max: {{ shaderball_metrics_linux_24_11['shut_down_hydra']['max'] }} | mean: {{ shaderball_metrics_linux_24_11['shut_down_hydra']['mean']}} + - | min: {{ shaderball_metrics_linux_25_02['shut_down_hydra']['min'] }} + | max: {{ shaderball_metrics_linux_25_02['shut_down_hydra']['max'] }} + | mean: {{ shaderball_metrics_linux_25_02['shut_down_hydra']['mean']}} - TBD {% set shaderball_metrics_macos_24_11 = load('performance/24.11_macos_shaderball.yaml', data_format='yaml') %} +{% set shaderball_metrics_macos_25_02 = load('performance/25.02_macos_shaderball.yaml', data_format='yaml') %} .. list-table:: macOS Metrics (min/max/mean in seconds) :header-rows: 1 @@ -35,28 +50,42 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ shaderball_metrics_macos_24_11['open_stage']['min'] }} | max: {{ shaderball_metrics_macos_24_11['open_stage']['max'] }} | mean: {{ shaderball_metrics_macos_24_11['open_stage']['mean']}} + - | min: {{ shaderball_metrics_macos_25_02['open_stage']['min'] }} + | max: {{ shaderball_metrics_macos_25_02['open_stage']['max'] }} + | mean: {{ shaderball_metrics_macos_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ shaderball_metrics_macos_24_11['create_first_image']['min'] }} | max: {{ shaderball_metrics_macos_24_11['create_first_image']['max'] }} | mean: {{ shaderball_metrics_macos_24_11['create_first_image']['mean']}} + - | min: {{ shaderball_metrics_macos_25_02['create_first_image']['min'] }} + | max: {{ shaderball_metrics_macos_25_02['create_first_image']['max'] }} + | mean: {{ shaderball_metrics_macos_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ shaderball_metrics_macos_24_11['close_stage']['min'] }} | max: {{ shaderball_metrics_macos_24_11['close_stage']['max'] }} | mean: {{ shaderball_metrics_macos_24_11['close_stage']['mean']}} + - | min: {{ shaderball_metrics_macos_25_02['close_stage']['min'] }} + | max: {{ shaderball_metrics_macos_25_02['close_stage']['max'] }} + | mean: {{ shaderball_metrics_macos_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ shaderball_metrics_macos_24_11['shut_down_hydra']['min'] }} | max: {{ shaderball_metrics_macos_24_11['shut_down_hydra']['max'] }} | mean: {{ shaderball_metrics_macos_24_11['shut_down_hydra']['mean']}} + - | min: {{ shaderball_metrics_macos_25_02['shut_down_hydra']['min'] }} + | max: {{ shaderball_metrics_macos_25_02['shut_down_hydra']['max'] }} + | mean: {{ shaderball_metrics_macos_25_02['shut_down_hydra']['mean']}} - TBD {% set shaderball_metrics_win_24_11 = load('performance/24.11_windows_shaderball.yaml', data_format='yaml') %} +{% set shaderball_metrics_win_25_02 = load('performance/25.02_windows_shaderball.yaml', data_format='yaml') %} .. list-table:: Windows Metrics (min/max/mean in seconds) :header-rows: 1 @@ -64,23 +93,36 @@ * - Metric - 24.11 - 25.02 + - 25.05 * - Open stage - | min: {{ shaderball_metrics_win_24_11['open_stage']['min'] }} | max: {{ shaderball_metrics_win_24_11['open_stage']['max'] }} | mean: {{ shaderball_metrics_win_24_11['open_stage']['mean']}} + - | min: {{ shaderball_metrics_win_25_02['open_stage']['min'] }} + | max: {{ shaderball_metrics_win_25_02['open_stage']['max'] }} + | mean: {{ shaderball_metrics_win_25_02['open_stage']['mean']}} - TBD * - Render first image - | min: {{ shaderball_metrics_win_24_11['create_first_image']['min'] }} | max: {{ shaderball_metrics_win_24_11['create_first_image']['max'] }} | mean: {{ shaderball_metrics_win_24_11['create_first_image']['mean']}} + - | min: {{ shaderball_metrics_win_25_02['create_first_image']['min'] }} + | max: {{ shaderball_metrics_win_25_02['create_first_image']['max'] }} + | mean: {{ shaderball_metrics_win_25_02['create_first_image']['mean']}} - TBD * - Close stage - | min: {{ shaderball_metrics_win_24_11['close_stage']['min'] }} | max: {{ shaderball_metrics_win_24_11['close_stage']['max'] }} | mean: {{ shaderball_metrics_win_24_11['close_stage']['mean']}} + - | min: {{ shaderball_metrics_win_25_02['close_stage']['min'] }} + | max: {{ shaderball_metrics_win_25_02['close_stage']['max'] }} + | mean: {{ shaderball_metrics_win_25_02['close_stage']['mean']}} - TBD * - Shut down Hydra - | min: {{ shaderball_metrics_win_24_11['shut_down_hydra']['min'] }} | max: {{ shaderball_metrics_win_24_11['shut_down_hydra']['max'] }} | mean: {{ shaderball_metrics_win_24_11['shut_down_hydra']['mean']}} + - | min: {{ shaderball_metrics_win_25_02['shut_down_hydra']['min'] }} + | max: {{ shaderball_metrics_win_25_02['shut_down_hydra']['max'] }} + | mean: {{ shaderball_metrics_win_25_02['shut_down_hydra']['mean']}} - TBD diff --git a/docs/dl_downloads.rst b/docs/dl_downloads.rst index 948b0470d6..bc717716d2 100644 --- a/docs/dl_downloads.rst +++ b/docs/dl_downloads.rst @@ -4,14 +4,20 @@ Downloads and Videos ==================== -SIGGRAPH 2024 Notes -=================== +.. contents:: Table of Contents + :local: + :depth: 2 + +Presentations +============= -| `SIGGRAPH 2024 USD, Hydra, and OpenSubdiv Birds of a Feather Notes `__ +| `The State and Future of USD Animation Characters, Sept 2024 `__ (`Recording also available `__, must be a member of the Metaverse Standards Forum to access) +| `OpenExec, Oct 2024 `__ (`Recording also available `__) -SIGGRAPH 2023 Notes -=================== +SIGGRAPH Birds of a Feather Notes +================================= +| `SIGGRAPH 2024 USD, Hydra, and OpenSubdiv Birds of a Feather Notes `__ | `SIGGRAPH 2023 USD, Hydra, and OpenSubdiv Birds of a Feather Notes `__ SIGGRAPH 2019 Course Notes diff --git a/docs/glossary.rst b/docs/glossary.rst index ded8b663d9..7892fb2fef 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -1194,7 +1194,7 @@ A good way to understand inherits is to start by understanding `references the results will be indistinguishable from each other! Within a `layerStack <#usdglossary-layerstack>`_ (and ignoring any interaction with `variantSets <#usdglossary-variantset>`_ since VariantSets come between Inherits and -References in `LIVRPS <#usdglossary-livrpsstrengthordering>`_) inherits are +References in `LIVERPS <#usdglossary-livrpsstrengthordering>`_) inherits are indistinguishable in effect from *local* references. **The key difference between references and inherits** is that references fully encapsulate their targets, and therefore "disappear" when composed through another layer of @@ -1729,35 +1729,36 @@ applied to references. See also the FAQ on deleting items with list ops: .. _usdglossary-livrpsstrengthordering: -LIVRPS Strength Ordering -************************ - -LIVRPS is an acronym for **Local, Inherits, VariantSets, References, Payload, -Specializes**, and is the fundamental rubric for understanding how `opinions -<#usdglossary-opinions>`_ and `namespace <#usdglossary-namespace>`_ compose in -USD. **LIVRPS** describes the strength ordering in which the various composition -arcs combine, **within each** `LayerStack <#usdglossary-layerstack>`_. For -example, when we are trying to determine the value of an `attribute -<#usdglossary-attribute>`_ or `metadatum <#usdglossary-metadata>`_ on a stage at -*path* that subscribes to the `value resolution <#usdglossary-valueresolution>`_ -policy that "strongest opinion wins" (which is all attributes and most -metadata), we iterate through `PrimSpecs <#usdglossary-primspec>`_ in the -following order looking for an opinion for the requested datum: +LIVERPS Strength Ordering +************************* + +LIVERPS is an acronym for **Local, Inherits, VariantSets, Relocates, References, +Payload, Specializes**, and is the fundamental rubric for understanding how +:ref:`opinions ` and +:ref:`namespace ` compose in USD. **LIVERPS** describes +the strength ordering in which the various composition arcs combine, +**within each** :ref:`LayerStack `. For example, when we +are trying to determine the value of an :ref:`attribute ` +or :ref:`metadatum ` on a stage at *path* that subscribes +to the :ref:`value resolution ` policy that +"strongest opinion wins" (which is all attributes and most metadata), we iterate +through :ref:`PrimSpecs ` in the following order looking +for an opinion for the requested datum: #. **Local**: Iterate through all the layers in the local LayerStack looking for opinions on the PrimSpec at *path* in each layer - recall that according to the definition of LayerStack, this is where the effect - of direct opinions in all `SubLayers <#usdglossary-sublayers>`_ of the + of direct opinions in all :ref:`SubLayers ` of the root layer of the LayerStack will be consulted. If no opinion is found, then... #. **Inherits**: - Resolve the `Inherits <#usdglossary-inherits>`_ affecting + Resolve the :ref:`Inherits ` affecting the prim at *path*, and iterate through the resulting targets. For - each target, **recursively apply** **LIVRP** **evaluation** on + each target, **recursively apply** **LIVERP** **evaluation** on the targeted LayerStack - **Note that the "S" is not present** - we ignore Specializes arcs while recursing . If no opinion is found, then... @@ -1765,33 +1766,42 @@ following order looking for an opinion for the requested datum: #. **VariantSets**: Apply the resolved variant selections to all - `VariantSets <#usdglossary-variantset>`_ that affect the PrimSpec at - *path* in the LayerStack, and iterate through the selected `Variants - <#usdglossary-variant>`_ on each VariantSet. For each target, - **recursively apply** **LIVRP** **evaluation** on the targeted + :ref:`VariantSets ` that affect the PrimSpec at + *path* in the LayerStack, and iterate through the selected + :ref:`Variants ` on each VariantSet. For each + target, **recursively apply** **LIVERP** **evaluation** on the targeted LayerStack - **Note that the "S" is not present** - we ignore Specializes arcs while recursing. If no opinion is found, then... + #. **r(E)locates**: + + Resolve any :ref:`Relocates ` target path + affecting the prim at *path*, and iterate through the resulting relocates + source. For each source, **recursively apply** **LIVERP** **evaluation** + on the source's remote LayerStack - **Note that the "S" is not present** + - we ignore Specializes arcs while recursing. If no opinion is found, + then... + #. **References**: - Resolve the `References <#usdglossary-references>`_ + Resolve the :ref:`References ` affecting the prim at *path*, and iterate through the resulting - targets. For each target, **recursively apply** **LIVRP** **evaluation** + targets. For each target, **recursively apply** **LIVERP** **evaluation** on the targeted LayerStack - **Note that the "S" is not present** - we ignore Specializes arcs while recursing. If no opinion is found, then... #. **Payload**: - Resolve the `Payload <#usdglossary-payload>`_ + Resolve the :ref:`Payload ` arcs affecting the prim at *path*; if *path* has been **loaded on the stage,** iterate through the resulting targets just as we would references from step 4. If no opinion is found, then... #. **Specializes**: - Resolve the `Specializes <#usdglossary-specializes>`_ + Resolve the :ref:`Specializes ` affecting the prim at *path*, and iterate through the resulting - targets, **recursively applying *full* LIVRPS evaluation** on each target + targets, **recursively applying *full* LIVERPS evaluation** on each target prim. If no opinion is found, then... #. Indicate that we could find no authored opinion @@ -1799,21 +1809,18 @@ following order looking for an opinion for the requested datum: We have omitted some details, such as how, for any composition arc in the above recipe, we order arcs applied directly on the PrimSpec in relation to the same kind of arc authored on an *ancestral* PrimSpec in the LayerStack - the short -answer is that `"ancestral arcs" are weaker than "direct arcs" -<#usdglossary-directopinion>`_, and why we ignore the "S" when we recurse for -the other arcs, which we discuss more in the entry for `Specializes -<#usdglossary-specializes>`_. It may sound like a great deal of work to need -to perform for every value lookup, and it absolutely would be if we followed all -the steps as described above, during `value resolution -<#usdglossary-valueresolution>`_. This is the reason that we compute and cache -an `Index <#usdglossary-index>`_ for every prim on the Stage: the Index -"pre-applies" the above algorithm to find all the PrimSpecs that contribute any -opinions to the prim, and caches the list in a recursive data structure that can -be very efficiently processed whenever we need to resolve some value on the -prim. +answer is that "ancestral arcs" are weaker than "direct arcs" (see +:ref:`usdglossary-directopinion`). Additionally, we skip over "S" during +recursion for other arcs, as explained further in the entry for +:ref:`Specializes `. Performing every step as described +above for each value lookup would be very costly. This is why we compute and +cache an :ref:`Index ` for every prim on the Stage. The Index +"pre-applies" the full algorithm to gather all PrimSpecs contributing opinions +to a prim, caching the list in a recursive data structure that allows efficient +processing whenever a value on the prim needs to be resolved. The algorithm for computing the namespace of the stage (i.e. what prims are -present and where) are slightly more involved, but still follows the LIVRPS +present and where) are slightly more involved, but still follows the LIVERPS recipe. .. _usdglossary-load-unload: @@ -1961,7 +1968,7 @@ Opinions Metadatum, Attribute, or Relationship, you are expressing an *opinion* for that object in a PrimSpec in a particular Layer. On a composed Stage, any object may be affected by multiple opinions from different layers; the ordering of -these opinions is determined by the `LIVRPS strength ordering +these opinions is determined by the `LIVERPS strength ordering <#usdglossary-livrpsstrengthordering>`_. .. _usdglossary-over: @@ -2680,6 +2687,451 @@ we will get: as the result, even though that was not the authored value in :filename:`Marble.usd`. +.. _usdglossary-relocates: + +Relocates +********* + +*Relocates* is a :ref:`composition arc ` that maps +a prim :ref:`path ` defined in a remote +:ref:`LayerStack ` (i.e. across a composition arc) to a +new path location in the local namespace. + +Relocates are defined in layer metadata, as a list of source path to target +path mappings. Note that these paths can only be prim paths, not property paths. + +.. code-block:: usda + :caption: example relocates defined in layer metadata + + #usda 1.0 + ( + relocates = { + : , + : + } + ) + +Relocates let you rename or reparent prims in situations where you would not be +able to edit the prims directly. Normally, prims with underlying PrimSpecs from +composition arcs cannot be directly reparented. While it's possible to reparent +the underlying "composition source" prims directly, such a change would be a +destructive change that would affect all other instances that share that scene +description. Relocates provides a way to *non-destructively* reparent or rename +prims by specifying a mapping of source namespace paths to target namespace +paths in the local namespace, ensuring that the source of the composition arc is +not modified. + +As an example, if you had layer :filename:`refLayer.usda` with the following +prims: + +.. code-block:: usda + :caption: refLayer.usda + + def "PrimA" () + { + def "PrimAChild" () + { + uniform string testString = "test" + float childValue = 3.5 + } + } + +In another layer, :filename:`main.usda`, "PrimA" is referenced: + +.. code-block:: usda + :caption: main.usda + + def "MainPrim" ( + prepend references = @refLayer.usda@ + ) + { + } + +You cannot directly rename or reparent :sdfpath:`/MainPrim/PrimAChild`. However, +you can provide a relocates mapping (in :filename:`main.usda`) of +:sdfpath:`MainPrim/PrimAChild` to another path in the local namespace: + +.. code-block:: usda + :caption: relocates added to main.usda + + #usda 1.0 + ( + relocates = { + : + } + ) + +This renames :sdfpath:`/MainPrim/PrimAChild` to +:sdfpath:`/MainPrim/RenamedPrimAChild` without affecting the +:filename:`refLayer.usda` layer. + +You could then add an override for :sdfpath:`/MainPrim/RenamedPrimAChild` in +:filename:`main.usda`. Note that the override uses the *relocated path*: + +.. code-block:: usda + :caption: override added to main.usda + + def "MainPrim" ( + prepend references = @refLayer.usda@ + ) + { + over RenamedPrimAChild + { + float childValue = 5.2 + } + } + +The resulting stage composition will apply the override to the relocated prim +reference: + +.. code-block:: usda + :caption: flattened main.usda + + def "MainPrim" + { + def "RenamedPrimAChild" + { + float childValue = 5.2 + uniform string testString = "test" + } + } + +**Things to note:** + + * You cannot relocate a root prim. In other words, the source path for a + relocates cannot be a root prim. This is because it's impossible for a + root prim to be introduced by an ancestral composition arc, and relocates + can only used to map prims introduced via composition arcs. + + * When a source path is relocated, that original source path is considered + *no longer valid in the current namespace*. Any local opinions authored + on a source path will generate a "invalid option at relocation source + path" error. See + :ref:`local opinions not allowed at source paths ` + below for more details. + + * Source and target paths must be complete scene paths. Paths with variant + selections (e.g. :sdfpath:`/Prim{var=sel}Child`) are not supported. + + * Relocates that would create invalid or conflicting namespace paths are not + allowed, such as: + + * Relocating a prim to an existing ancestor: + :sdfpath:`Prim/Child/Grandchild` : :sdfpath:`Prim/Child` is not + allowed. + + * Relocating a prim to a descendant of the source path: + :sdfpath:`/Prim/Child` : :sdfpath:`/Prim/Child/Grandchild` is not + allowed. + + * Relocating the same source path to multiple targets, or relocating + multiple source paths to the same target. + + * Relocating a prim to the source path of a different relocate in the + same namespace: + :sdfpath:`/Prim/Prim1` : :sdfpath:`/Prim/Prim2`, + :sdfpath:`/Prim/Prim2` : :sdfpath:`/Prim/Prim3` is not allowed. + "Transitive" relocates must be collapsed into the smallest relocation, + e.g. for the previous example, + :sdfpath:`/Prim/Prim1` : :sdfpath:`/Prim/Prim3` should be used instead. + This applies to relocates in the same LayerStack. + + * If a relocate has "ancestral relocates" (e.g. an ancestor prim that has + also been relocated), the relocate source path must use the ancestral + relocated path. For example, if you have :sdfpath:`/Root` referencing + :sdfpath:`/Ref`, and :sdfpath:`/Ref` also references :sdfpath:`/Ref2`, + if :sdfpath:`/Root/Ref` is relocated to :sdfpath:`/Root/RefRelocated`, + any additional relocate that would use :sdfpath:`/Root/Ref/Ref2` as a + source path must use the relocated path + :sdfpath:`/Root/RefRelocated/Ref2`. + +With respect to +:ref:`composition strength ordering `, +relocates is stronger than :ref:`usdglossary-references`, but weaker than +:ref:`usdglossary-variantset`. In the previous example, if we had an authored +opinion at a relocates target location that used the :ref:`usdglossary-inherits` +composition arc (which is stronger than relocates) we might have a +:filename:`main.usda` layer that looks like the following: + +.. code-block:: usda + :caption: main.usda with added class and inherits + + #usda 1.0 + ( + relocates = { + : + } + ) + + class "WorkClass" + { + float childValue = 20.5 + uniform string testString = "from WorkClass" + } + + def "MainPrim" ( + prepend references = @refLayer.usda@ + ) + { + def "RenamedPrimAChild" + ( + inherits = + ) + { + } + } + +When the stage is composed, the inherited opinions for +:sdfpath:`MainPrim/RenamedPrimAChild` will be stronger: + +.. code-block:: usda + :caption: flattened main.usda with inherits and relocates applied + + def "MainPrim" + { + def "RenamedPrimAChild" + { + float childValue = 20.5 + uniform string testString = "from WorkClass" + } + } + +**Relocates and inherits** + +A relocated prim will still inherit the same opinions it would have had it not +been relocated. This can result in some subtle composition behavior. + +For example, we have a layer :filename:`model.usda` that defines +:sdfpath:`/ClassA` and has prim :sdfpath:`/Model` that inherits from +:sdfpath:`/ClassA`. It also has a relocate for :sdfpath:`/Model/Rig/LRig` to +:sdfpath:`/Model/Anim/LAnim`: + +.. code-block:: usda + :caption: model.usda with ClassA and Model that inherits from ClassA + + #usda 1.0 + ( + relocates = { + : + } + ) + + class "ClassA" + { + def "Rig" + { + def "LRig" + { + uniform token modelClassALRig = "test" + } + } + + def "Anim" + { + def "LAnim" + { + uniform token modelClassALAnim = "test" + } + } + } + + def "Model" ( + inherits = + ) + { + } + +We reference :sdfpath:`/Model` in another layer, :filename:`root.usda`, which +also has a :sdfpath:`/ClassA` class: + +.. code-block:: usda + :caption: root.usda + + def "Model_1" ( + references = @./model.usda@ + ) + { + } + + class "ClassA" + { + def "Rig" + { + def "LRig" + { + uniform token rootClassALRig = "test" + } + } + + def "Anim" + { + def "LAnim" + { + uniform token rootClassALAnim = "test" + } + } + } + +If we load :filename:`root.usda` and inspect the flattened stage, +:sdfpath:`/Model/Rig/LRig` in :filename:`model.usda` has inherited from +:sdfpath:`/ClassA/Rig/LRig` even though it was relocated to +:sdfpath:`/Model/Anim/LAnim` in that layer, and does *not* inherit opinions from +:sdfpath:`/ClassA/Anim/LAnim`. However, note that :sdfpath:`/Model_1/Anim/LAnim` +in the :filename:`root.usda` layer does inherit from the layer's +:sdfpath:`/ClassA/Anim/LAnim`. + +.. code-block:: usda + :caption: flattened root.usda + + def "Model_1" + { + def "Rig" + { + } + + def "Anim" + { + def "LAnim" + { + uniform token modelClassALRig = "test" + uniform token rootClassALAnim = "test" + uniform token rootClassALRig = "test" + } + } + } + + +**Relocates and ancestral arcs during composition** + +One aspect of relocates and composition is that relocates will *ignore* +all ancestral arcs *except variant arcs* when we build the +:ref:`PrimIndex ` for a prim. So, if you had a layer that +relocates a prim to be the child of a prim with an ancestral inherits arc: + +.. code-block:: usda + :caption: layer with ancestral inherits and relocates + + #usda 1.0 + ( + relocates = { + : + } + ) + + def "ClassA" + ( + ) + { + def "Child" + { + uniform token testString = "from ClassA/Child" + uniform token classAChildString = "test" + } + } + + def "RefPrim" + ( + ) + { + def "Child" + { + uniform token testString = "from RefPrim/Child" + uniform token refPrimChildString = "test" + } + } + + def "PrimA" + ( + prepend references = + ) + { + } + + + def "PrimWithInherits" + ( + inherits = + ) + { + } + +With the relocates for :sdfpath:`/PrimA/Child` to +:sdfpath:`/PrimWithInherits/Child`, the ancestral opinions from +:sdfpath:`/ClassA/Child` are ignored. + +.. code-block:: usda + :caption: flattened PrimWithInherits + + def "PrimWithInherits" + { + def "Child" + { + uniform token refPrimChildString = "test" + uniform token testString = "from RefPrim/Child" + } + } + +However, as mentioned earlier, ancestral *variant* arcs will still compose with +relocates. If we introduce ancestral opinions from a variant in +:sdfpath:`/PrimWithInherits` instead of using inherits: + +.. code-block:: usda + :caption: replace inherits with variantset in PrimWithInherits + + def "PrimWithInherits" + ( + # Removed inherits of ClassA + # Added variantSet and selection with authored Child opinions + variants = { + string varSet = "Set1" + } + prepend variantSets = "varSet" + ) + { + variantSet "varSet" = { + "Set1" () + { + def "Child" + { + uniform token testString = "from varSet Child" + uniform token varChildString = "test" + } + } + "Set2" () + { + } + } + } + +The ancestral opinions from the selected variant will be applied: + +.. code-block:: usda + :caption: flattened PrimWithInherits with ancestral variant opinions + + def "PrimWithInherits" + { + def "Child" + { + uniform token refPrimChildString = "test" + uniform token testString = "from varSet Child" + uniform token varChildString = "test" + } + } + +See :ref:`composition strength ordering ` +for more details on relocates and composition. + +**Local opinions not allowed at source paths** + +.. _usdglossary-relocates-source-invalid: + +When a source path is relocated, that original source path is considered +no longer valid in the current namespace. So, if you relocated +:sdfpath:`/PrimA/Child` to :sdfpath:`/PrimA/NewChild`, you cannot have any +local opinions authored at :sdfpath:`/PrimA/Child`. This avoids ambiguity and +ensures there's exactly one location (the target path) in the namespace to +express opinions about a given object. + .. _usdglossary-rootlayerstack: Root LayerStack diff --git a/docs/intro.rst b/docs/intro.rst index 03305bbb50..1aa8a140be 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -70,8 +70,8 @@ following ongoing production concerns: departments, or within the same department, to simultaneously work on the same asset or scene, by allowing each artist to work in their own file (called a :ref:`glossary:Layer`), all of which will be combined - and resolved in a :ref:`strength ordering ` clearly specified in the USD files themselves. This ability + and resolved in a :ref:`strength ordering ` + clearly specified in the USD files themselves. This ability is not a magic bullet that can automatically adjust shading data in a stronger layer when the modeling artist changes the topology of geometry defined in a weaker layer, but it allows each artist to work @@ -200,7 +200,7 @@ refinement of "base" in all views of your scene. The most powerful and unifying aspect of USD's composition semantics is that all of the above operators can be applied to any prim, in any combination, and the composition engine will :ref:`resolve the resulting graph in a predictable way -`. The other desirable +`. The other desirable property that falls out of this uniform treatment of composition arcs is that stronger layers in a composition can override the scene description in weaker layers *uniformly*, regardless of whether the weaker layers were subLayered, diff --git a/docs/performance/24.11_linux_shaderball.yaml b/docs/performance/24.11_linux_shaderball.yaml index bab10efdde..b854b418b7 100644 --- a/docs/performance/24.11_linux_shaderball.yaml +++ b/docs/performance/24.11_linux_shaderball.yaml @@ -1,40 +1,40 @@ bring_up_the_ui: - max: 0.43229 - mean: 0.4245096 - min: 0.419582 + max: 0.484038 + mean: 0.4738047 + min: 0.457381 close_stage: - max: 0.000754 - mean: 0.0006054 - min: 0.000499 + max: 0.00095 + mean: 0.000746 + min: 0.000569 configure_and_load_plugins: - max: 0.000142 - mean: 0.0001347 - min: 0.000129 + max: 0.000155 + mean: 0.0001349 + min: 0.000126 create_first_image: - max: 2.904303 - mean: 2.8882178 - min: 2.870519 + max: 2.829025 + mean: 2.8189242 + min: 2.80578 open_and_close_usdview: - max: 3.479013 - mean: 3.464824 - min: 3.44698 + max: 3.492909 + mean: 3.4580702 + min: 3.439503 open_stage: - max: 0.150255 - mean: 0.1480813 - min: 0.144294 + max: 0.168689 + mean: 0.16472699999999998 + min: 0.154571 reset_prim_browser: - max: 0.001618 - mean: 0.0015671 - min: 0.001534 + max: 0.001619 + mean: 0.0015558 + min: 0.001495 shut_down_hydra: - max: 0.022668 - mean: 0.0223569 - min: 0.022082 + max: 0.022807 + mean: 0.0223177 + min: 0.022005 tear_down_the_ui: - max: 0.011874 - mean: 0.0116779 - min: 0.011499 + max: 0.014951 + mean: 0.0120658 + min: 0.009447 traverse_stage: - max: 0.000408 - mean: 0.000371 - min: 0.000353 + max: 0.000462 + mean: 0.00037600000000000003 + min: 0.000351 diff --git a/docs/performance/24.11_macos_shaderball.yaml b/docs/performance/24.11_macos_shaderball.yaml index d39901392b..5e29402c3b 100644 --- a/docs/performance/24.11_macos_shaderball.yaml +++ b/docs/performance/24.11_macos_shaderball.yaml @@ -1,40 +1,40 @@ bring_up_the_ui: - max: 0.343762 - mean: 0.3297607 - min: 0.317855 + max: 0.359631 + mean: 0.3520272 + min: 0.347792 close_stage: - max: 0.000296 - mean: 0.0002641 - min: 0.000241 + max: 0.000339 + mean: 0.0002655 + min: 0.000238 configure_and_load_plugins: - max: 9.8e-05 - mean: 8.26e-05 - min: 7.6e-05 + max: 9.4e-05 + mean: 8.74e-05 + min: 8.3e-05 create_first_image: max: N/A mean: N/A min: N/A open_and_close_usdview: - max: 1.152715 - mean: 1.1196469 - min: 1.096133 + max: 1.218009 + mean: 1.2019958 + min: 1.179886 open_stage: - max: 0.098122 - mean: 0.0901067 - min: 0.088529 + max: 0.098173 + mean: 0.0973833 + min: 0.096413 reset_prim_browser: - max: 0.000821 - mean: 0.0006841 - min: 0.000646 + max: 0.000745 + mean: 0.0006994 + min: 0.000647 shut_down_hydra: - max: 0.014153 - mean: 0.0116556 - min: 0.009064 + max: 0.013276 + mean: 0.0104122 + min: 0.008993 tear_down_the_ui: - max: 0.016629 - mean: 0.014055 - min: 0.01147 + max: 0.014566 + mean: 0.0132383 + min: 0.012023 traverse_stage: - max: 0.000291 - mean: 0.0002508 - min: 0.000229 + max: 0.000374 + mean: 0.00026639999999999997 + min: 0.000242 diff --git a/docs/performance/24.11_windows_shaderball.yaml b/docs/performance/24.11_windows_shaderball.yaml index fc6496fca1..a97c69d5c9 100644 --- a/docs/performance/24.11_windows_shaderball.yaml +++ b/docs/performance/24.11_windows_shaderball.yaml @@ -1,40 +1,40 @@ bring_up_the_ui: - max: 4.444394 - mean: 4.0969829 - min: 4.007564 + max: 3.958488 + mean: 3.8399875 + min: 3.77976 close_stage: - max: 0.002616 - mean: 0.0017503 - min: 0.001371 + max: 0.002975 + mean: 0.0017164 + min: 0.001281 configure_and_load_plugins: - max: 0.00118 - mean: 0.0009801 - min: 0.00086 + max: 0.00109 + mean: 0.0009346000000000001 + min: 0.000843 create_first_image: - max: 4.963428 - mean: 4.7824023 - min: 4.546317 + max: 4.745389 + mean: 4.6892396000000005 + min: 4.571696 open_and_close_usdview: - max: 9.518625 - mean: 9.09599 - min: 8.81436 + max: 8.943871 + mean: 8.763062099999999 + min: 8.640669 open_stage: - max: 0.320872 - mean: 0.2995894 - min: 0.284833 + max: 0.300575 + mean: 0.2809994 + min: 0.270022 reset_prim_browser: - max: 0.004614 - mean: 0.0041768 - min: 0.003919 + max: 0.004022 + mean: 0.0038661 + min: 0.003717 shut_down_hydra: - max: 0.04744 - mean: 0.0423858 - min: 0.040305 + max: 0.043999 + mean: 0.0421298 + min: 0.040251 tear_down_the_ui: - max: 0.057904 - mean: 0.0550358 - min: 0.050231 + max: 0.068668 + mean: 0.0652362 + min: 0.062304 traverse_stage: - max: 0.001217 - mean: 0.0011315 - min: 0.001047 + max: 0.001128 + mean: 0.0010755 + min: 0.001025 diff --git a/docs/performance/25.02_linux_alab.yaml b/docs/performance/25.02_linux_alab.yaml new file mode 100644 index 0000000000..c28ac786e4 --- /dev/null +++ b/docs/performance/25.02_linux_alab.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.911439 + mean: 0.84275395 + min: 0.770689 +close_stage: + max: 0.269124 + mean: 0.09663697 + min: 0.0905 +configure_and_load_plugins: + max: 0.000163 + mean: 0.00013447 + min: 0.000123 +create_first_image: + max: 8.149738 + mean: 7.57883956 + min: 6.865629 +open_and_close_usdview: + max: 9.453057 + mean: 8.83175125 + min: 8.093528 +open_stage: + max: 0.563632 + mean: 0.50131423 + min: 0.4406 +reset_prim_browser: + max: 0.001979 + mean: 0.00175532 + min: 0.00169 +shut_down_hydra: + max: 0.115286 + mean: 0.09681611 + min: 0.086453 +tear_down_the_ui: + max: 0.048896 + mean: 0.03373466 + min: 0.020433 +traverse_stage: + max: 0.007704 + mean: 0.00720531 + min: 0.006777 diff --git a/docs/performance/25.02_linux_kitchenset.yaml b/docs/performance/25.02_linux_kitchenset.yaml new file mode 100644 index 0000000000..2c81310217 --- /dev/null +++ b/docs/performance/25.02_linux_kitchenset.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.450073 + mean: 0.39564978 + min: 0.369955 +close_stage: + max: 0.014355 + mean: 0.00892537 + min: 0.007885 +configure_and_load_plugins: + max: 0.000177 + mean: 0.00013637 + min: 0.000125 +create_first_image: + max: 0.464893 + mean: 0.29444296 + min: 0.271989 +open_and_close_usdview: + max: 1.052828 + mean: 0.87884392 + min: 0.833702 +open_stage: + max: 0.129695 + mean: 0.09578667 + min: 0.079919 +reset_prim_browser: + max: 0.001492 + mean: 0.00120099 + min: 0.001131 +shut_down_hydra: + max: 0.020161 + mean: 0.01476924 + min: 0.013788 +tear_down_the_ui: + max: 0.013316 + mean: 0.00814103 + min: 0.006512 +traverse_stage: + max: 0.001279 + mean: 0.00121918 + min: 0.001169 diff --git a/docs/performance/25.02_linux_moorelane.yaml b/docs/performance/25.02_linux_moorelane.yaml new file mode 100644 index 0000000000..662cb9b82e --- /dev/null +++ b/docs/performance/25.02_linux_moorelane.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.828215 + mean: 0.63398924 + min: 0.609399 +close_stage: + max: 0.02652 + mean: 0.02451658 + min: 0.023531 +configure_and_load_plugins: + max: 0.000295 + mean: 0.00013562 + min: 0.000125 +create_first_image: + max: 11.18661 + mean: 10.92291829 + min: 10.74976 +open_and_close_usdview: + max: 12.323612 + mean: 12.06217025 + min: 11.855464 +open_stage: + max: 0.094922 + mean: 0.08151592 + min: 0.076916 +reset_prim_browser: + max: 0.009406 + mean: 0.00889919 + min: 0.008551 +shut_down_hydra: + max: 0.074561 + mean: 0.06535938 + min: 0.060153 +tear_down_the_ui: + max: 0.12093 + mean: 0.11051621 + min: 0.105882 +traverse_stage: + max: 0.001515 + mean: 0.0013725 + min: 0.001311 diff --git a/docs/performance/25.02_linux_shaderball.yaml b/docs/performance/25.02_linux_shaderball.yaml new file mode 100644 index 0000000000..f2e641d7e9 --- /dev/null +++ b/docs/performance/25.02_linux_shaderball.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.496512 + mean: 0.47173693 + min: 0.448845 +close_stage: + max: 0.00172 + mean: 0.00083931 + min: 0.000551 +configure_and_load_plugins: + max: 0.00016 + mean: 0.00013359 + min: 0.000122 +create_first_image: + max: 2.549609 + mean: 2.40064236 + min: 2.339219 +open_and_close_usdview: + max: 3.198006 + mean: 3.04869962 + min: 2.97429 +open_stage: + max: 0.191218 + mean: 0.16476692 + min: 0.145738 +reset_prim_browser: + max: 0.001907 + mean: 0.00158025 + min: 0.001508 +shut_down_hydra: + max: 0.036495 + mean: 0.03046249 + min: 0.028971 +tear_down_the_ui: + max: 0.01495 + mean: 0.012032029999999999 + min: 0.009684 +traverse_stage: + max: 0.000447 + mean: 0.00036077 + min: 0.000332 diff --git a/docs/performance/25.02_macos_alab.yaml b/docs/performance/25.02_macos_alab.yaml new file mode 100644 index 0000000000..a6bc51b939 --- /dev/null +++ b/docs/performance/25.02_macos_alab.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.833911 + mean: 0.77374126 + min: 0.687303 +close_stage: + max: 0.046855 + mean: 0.03937341 + min: 0.033369 +configure_and_load_plugins: + max: 9.2e-05 + mean: 7.621e-05 + min: 7.1e-05 +create_first_image: + max: N/A + mean: N/A + min: N/A +open_and_close_usdview: + max: 6.179748 + mean: 5.96798742 + min: 5.747123 +open_stage: + max: 0.567854 + mean: 0.5199257 + min: 0.447765 +reset_prim_browser: + max: 0.00112 + mean: 0.00082208 + min: 0.000788 +shut_down_hydra: + max: 0.11464 + mean: 0.10125593 + min: 0.091086 +tear_down_the_ui: + max: 0.01794 + mean: 0.0147086 + min: 0.01284 +traverse_stage: + max: 0.003799 + mean: 0.00347155 + min: 0.003164 diff --git a/docs/performance/25.02_macos_kitchenset.yaml b/docs/performance/25.02_macos_kitchenset.yaml new file mode 100644 index 0000000000..2b780d9d7e --- /dev/null +++ b/docs/performance/25.02_macos_kitchenset.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.319429 + mean: 0.30113817 + min: 0.292441 +close_stage: + max: 0.003521 + mean: 0.00249618 + min: 0.002071 +configure_and_load_plugins: + max: 0.000116 + mean: 7.677e-05 + min: 7.1e-05 +create_first_image: + max: N/A + mean: N/A + min: N/A +open_and_close_usdview: + max: 0.721653 + mean: 0.70672701 + min: 0.693745 +open_stage: + max: 0.094733 + mean: 0.07521936 + min: 0.068011 +reset_prim_browser: + max: 0.000752 + mean: 0.00052464 + min: 0.000484 +shut_down_hydra: + max: 0.020057 + mean: 0.01476464 + min: 0.008866 +tear_down_the_ui: + max: 0.016418 + mean: 0.01394441 + min: 0.011373 +traverse_stage: + max: 0.0008 + mean: 0.00057752 + min: 0.000544 diff --git a/docs/performance/25.02_macos_moorelane.yaml b/docs/performance/25.02_macos_moorelane.yaml new file mode 100644 index 0000000000..dceda7a41d --- /dev/null +++ b/docs/performance/25.02_macos_moorelane.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.488371 + mean: 0.47201016 + min: 0.463011 +close_stage: + max: 0.00909 + mean: 0.00396154 + min: 0.003228 +configure_and_load_plugins: + max: 0.000118 + mean: 7.641e-05 + min: 7.1e-05 +create_first_image: + max: N/A + mean: N/A + min: N/A +open_and_close_usdview: + max: 9.359889 + mean: 8.68951359 + min: 8.33917 +open_stage: + max: 0.093883 + mean: 0.08819937 + min: 0.086742 +reset_prim_browser: + max: 0.005867 + mean: 0.00549084 + min: 0.00523 +shut_down_hydra: + max: 0.329771 + mean: 0.29719891 + min: 0.098075 +tear_down_the_ui: + max: 0.047822 + mean: 0.03717532 + min: 0.033172 +traverse_stage: + max: 0.000879 + mean: 0.000606 + min: 0.000579 diff --git a/docs/performance/25.02_macos_shaderball.yaml b/docs/performance/25.02_macos_shaderball.yaml new file mode 100644 index 0000000000..5173fd7ecf --- /dev/null +++ b/docs/performance/25.02_macos_shaderball.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 0.340783 + mean: 0.32877338 + min: 0.321071 +close_stage: + max: 0.001005 + mean: 0.00029506 + min: 0.000245 +configure_and_load_plugins: + max: 8.2e-05 + mean: 7.537e-05 + min: 7.1e-05 +create_first_image: + max: N/A + mean: N/A + min: N/A +open_and_close_usdview: + max: 1.249503 + mean: 1.21183052 + min: 1.179005 +open_stage: + max: 0.096531 + mean: 0.09162102 + min: 0.090059 +reset_prim_browser: + max: 0.000831 + mean: 0.00067506 + min: 0.000636 +shut_down_hydra: + max: 0.019225 + mean: 0.01475921 + min: 0.0112 +tear_down_the_ui: + max: 0.016371 + mean: 0.01323882 + min: 0.009822 +traverse_stage: + max: 0.000295 + mean: 0.00023278 + min: 0.000217 diff --git a/docs/performance/25.02_windows_alab.yaml b/docs/performance/25.02_windows_alab.yaml new file mode 100644 index 0000000000..20ac07803b --- /dev/null +++ b/docs/performance/25.02_windows_alab.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 4.445508 + mean: 4.1717602 + min: 4.095395 +close_stage: + max: 0.353328 + mean: 0.31994381 + min: 0.285362 +configure_and_load_plugins: + max: 0.001222 + mean: 0.00095023 + min: 0.000874 +create_first_image: + max: 12.310513 + mean: 11.36062476 + min: 11.124981 +open_and_close_usdview: + max: 17.523834 + mean: 16.54782061 + min: 16.222575 +open_stage: + max: 0.530343 + mean: 0.42763175 + min: 0.410067 +reset_prim_browser: + max: 0.006109 + mean: 0.00425615 + min: 0.003945 +shut_down_hydra: + max: 0.452494 + mean: 0.30029581 + min: 0.215869 +tear_down_the_ui: + max: 0.329101 + mean: 0.2393362 + min: 0.105651 +traverse_stage: + max: 0.009664 + mean: 0.00782833 + min: 0.007404 diff --git a/docs/performance/25.02_windows_kitchenset.yaml b/docs/performance/25.02_windows_kitchenset.yaml new file mode 100644 index 0000000000..a4484ecf18 --- /dev/null +++ b/docs/performance/25.02_windows_kitchenset.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 4.093091 + mean: 3.83718389 + min: 3.757905 +close_stage: + max: 0.025273 + mean: 0.02057523 + min: 0.017333 +configure_and_load_plugins: + max: 0.001403 + mean: 0.0009346000000000001 + min: 0.000821 +create_first_image: + max: 3.162327 + mean: 2.970126 + min: 2.917131 +open_and_close_usdview: + max: 7.462501 + mean: 7.08912163 + min: 6.955648 +open_stage: + max: 0.139086 + mean: 0.12787824 + min: 0.111618 +reset_prim_browser: + max: 0.00432 + mean: 0.00359397 + min: 0.003373 +shut_down_hydra: + max: 0.028106 + mean: 0.02360369 + min: 0.021171 +tear_down_the_ui: + max: 0.106419 + mean: 0.10032219 + min: 0.056925 +traverse_stage: + max: 0.002516 + mean: 0.00197429 + min: 0.001783 diff --git a/docs/performance/25.02_windows_moorelane.yaml b/docs/performance/25.02_windows_moorelane.yaml new file mode 100644 index 0000000000..80d17496fa --- /dev/null +++ b/docs/performance/25.02_windows_moorelane.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 4.677956 + mean: 4.16316914 + min: 3.995886 +close_stage: + max: 0.082089 + mean: 0.06802329 + min: 0.062537 +configure_and_load_plugins: + max: 0.001198 + mean: 0.00094998 + min: 0.00084 +create_first_image: + max: 16.936302 + mean: 15.28659683 + min: 14.465477 +open_and_close_usdview: + max: 22.017953 + mean: 20.4393259 + min: 19.607275 +open_stage: + max: 0.195054 + mean: 0.16251379 + min: 0.144654 +reset_prim_browser: + max: 0.015288 + mean: 0.01235384 + min: 0.011793 +shut_down_hydra: + max: 0.662102 + mean: 0.48684717 + min: 0.425902 +tear_down_the_ui: + max: 0.360198 + mean: 0.23232395 + min: 0.188706 +traverse_stage: + max: 0.002648 + mean: 0.0021083 + min: 0.00191 diff --git a/docs/performance/25.02_windows_shaderball.yaml b/docs/performance/25.02_windows_shaderball.yaml new file mode 100644 index 0000000000..c690585bdf --- /dev/null +++ b/docs/performance/25.02_windows_shaderball.yaml @@ -0,0 +1,40 @@ +bring_up_the_ui: + max: 4.336201 + mean: 3.99499495 + min: 3.907712 +close_stage: + max: 0.002109 + mean: 0.00140918 + min: 0.001221 +configure_and_load_plugins: + max: 0.001144 + mean: 0.00093865 + min: 0.000833 +create_first_image: + max: 5.128265 + mean: 4.85809605 + min: 4.787113 +open_and_close_usdview: + max: 9.723654 + mean: 9.13730595 + min: 9.000209 +open_stage: + max: 0.306585 + mean: 0.2770631 + min: 0.265009 +reset_prim_browser: + max: 0.004871 + mean: 0.00406247 + min: 0.003719 +shut_down_hydra: + max: 0.060537 + mean: 0.05581599 + min: 0.052362 +tear_down_the_ui: + max: 0.126213 + mean: 0.10235839 + min: 0.098512 +traverse_stage: + max: 0.001536 + mean: 0.00111524 + min: 0.001 diff --git a/docs/performance/linux.svg b/docs/performance/linux.svg index 72324c5b8b..c4e543c845 100644 --- a/docs/performance/linux.svg +++ b/docs/performance/linux.svg @@ -32,10 +32,10 @@ z +" id="m2859318261" style="stroke:#000000;stroke-width:0.8;"/> - + @@ -102,7 +102,7 @@ L 12.40625 0 z " id="DejaVuSans-49"/> - + @@ -111,7 +111,71 @@ z - + + + + + + + + + + + + + + + + + + + + + + - + +" id="m19dd98e29d" style="stroke:#000000;stroke-width:0.8;"/> - + - + @@ -394,12 +458,12 @@ L -3.5 0 - + - + - + @@ -407,12 +471,12 @@ L -3.5 0 - + - + - + - + - + - + - + - + - + - - - @@ -543,12 +584,12 @@ z - + - + - + @@ -556,7 +597,7 @@ z - + - + +" id="m8a5546ecdb" style="stroke:#1f77b4;stroke-linejoin:miter;"/> - - + + + - - + +" id="m18edc3e67a" style="stroke:#ff7f0e;stroke-linejoin:miter;"/> - - + + + - - + +" id="m568c44db85" style="stroke:#2ca02c;stroke-linejoin:miter;"/> - - + + + - - + +" id="mc86b756c0a" style="stroke:#d62728;"/> - - + + + @@ -901,7 +950,7 @@ L 414.72 307.584 L 414.72 41.472 " style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/> - + - - - + - + - + - + - + - - + - + - + - + - + @@ -1397,19 +1446,19 @@ z - - + - + - + - + - + @@ -1421,19 +1470,19 @@ L 344.305938 83.926688 - - + - + - + - + - + @@ -1450,7 +1499,7 @@ L 344.305938 98.604813 - + diff --git a/docs/performance/macos.svg b/docs/performance/macos.svg index f338f5f484..dc38505ce8 100644 --- a/docs/performance/macos.svg +++ b/docs/performance/macos.svg @@ -32,10 +32,10 @@ z +" id="m69e9bb522c" style="stroke:#000000;stroke-width:0.8;"/> - + @@ -102,7 +102,7 @@ L 12.40625 0 z " id="DejaVuSans-49"/> - + @@ -111,7 +111,71 @@ z - + + + + + + + + + + + + + + + + + + + + + + - + +" id="med6443d1f1" style="stroke:#000000;stroke-width:0.8;"/> - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + +" id="m4d8a461809" style="stroke:#1f77b4;stroke-linejoin:miter;"/> - - + + + - - + +" id="m1472b5f23b" style="stroke:#ff7f0e;stroke-linejoin:miter;"/> - - + + + - - + +" id="mc7c3ab0e9c" style="stroke:#2ca02c;stroke-linejoin:miter;"/> - - + + + - - + +" id="m038231c9db" style="stroke:#d62728;"/> - - + + + @@ -994,7 +1019,7 @@ L 414.72 307.584 L 414.72 41.472 " style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/> - + - - - + - + - + - + - + - - + - + - + - + - + @@ -1476,19 +1501,19 @@ z - - + - + - + - + - + @@ -1500,19 +1525,19 @@ L 344.305938 83.926688 - - + - + - + - + - + @@ -1529,7 +1554,7 @@ L 344.305938 98.604813 - + diff --git a/docs/performance/windows.svg b/docs/performance/windows.svg index ae18c642e8..d596aa1098 100644 --- a/docs/performance/windows.svg +++ b/docs/performance/windows.svg @@ -32,10 +32,10 @@ z +" id="m50beb435d5" style="stroke:#000000;stroke-width:0.8;"/> - + @@ -102,7 +102,7 @@ L 12.40625 0 z " id="DejaVuSans-49"/> - + @@ -111,7 +111,71 @@ z - + + + + + + + + + + + + + + + + + + + + + + - + +" id="md402ad0c52" style="stroke:#000000;stroke-width:0.8;"/> - + - + - + - + - + - + - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + +" id="m28cb0de855" style="stroke:#1f77b4;stroke-linejoin:miter;"/> - - + + + - - + +" id="mefacd60b9c" style="stroke:#ff7f0e;stroke-linejoin:miter;"/> - - + + + - - + +" id="mcadb47bdd6" style="stroke:#2ca02c;"/> + + + + + + + + + + - - + + @@ -876,7 +965,7 @@ L 414.72 307.584 L 414.72 41.472 " style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/> - + - - - + - + - + - + - + - - + - + - + - + - + @@ -1360,19 +1449,19 @@ z - - + - + - + - + - + @@ -1385,11 +1474,35 @@ L 345.399688 83.926688 + + + + + + + + + + + + + + + + + + + + + + - + diff --git a/docs/ref_performance_metrics.rst b/docs/ref_performance_metrics.rst index d4b1ad055f..a010049208 100644 --- a/docs/ref_performance_metrics.rst +++ b/docs/ref_performance_metrics.rst @@ -12,6 +12,10 @@ page describes what metrics are collected, what hardware and software configurations are used, the actual metrics results, and how to generate the metrics locally. +.. contents:: Table of Contents + :local: + :depth: 2 + *************** What We Measure *************** @@ -30,8 +34,8 @@ default (items in **bold** are reported on this page): * **Total time to start and quit** :program:`usdview` * Time to traverse the prims in the stage -We run 10 iterations for each asset, and capture the minimum and maximum times -for that set of iterations. We also calculate the mean time across the 10 +We run 100 iterations for each asset, and capture the minimum and maximum times +for that set of iterations. We also calculate the mean time across the 100 iterations. For each asset, we first warm the filesystem cache by loading the @@ -105,10 +109,16 @@ Windows platforms (as described in :ref:`perf_environments`). .. note:: - For the 24.11 release, there are known issues with obtaining metrics for - the Moore Lane asset on Windows, and the create_first_image metric on - macOS. We are actively investigating these issues and will update - published metrics when these issues are resolved. + A small increase or decrease in performance metrics over different releases + may not necessarily indicate an overall performance improvement or + regression in USD. Best-case minimum measurements are taken over multiple + iterations, but these measurements may still be subject to some variation, + so use the below results with caution. We are investigating these sources + of variation. + + Additionally, there are known issues with obtaining the create_first_image + metric on macOS. We will update published metrics when this issue is + resolved. .. image:: performance/linux.svg :width: 500 @@ -125,10 +135,14 @@ Standard Shader Ball This asset is designed to be a comprehensive test of a broad array of material properties in a single render. Geometry is expressed using USD, materials are defined using MaterialX, texture maps are provided in OpenEXR format and encoded -using the Academy Color Encoding System ACEScg color space. +using the Academy Color Encoding System ACEScg color space. -.. image:: https://raw.githubusercontent.com/usd-wg/assets/main/full_assets/StandardShaderBall/media/figure02.png - :width: 500 +In our performance sweep we use the ``mtlx_bubblegum`` material. Note the +image below may not exactly match the image generated during the performance +run; the primary intention is not necessarily to exercise specific renderers. + +.. image:: https://raw.githubusercontent.com/usd-wg/assets/main/full_assets/StandardShaderBall/media/example_materials/mtlx_bubblegum.jpg + :width: 300 The shader ball asset can be `downloaded here `__. @@ -214,7 +228,10 @@ are used (for each asset): .. code-block:: - python usdmeasureperformance.py -i 10 -a min -o + python usdmeasureperformance.py -i 100 -a min -o + +Optionally, `--tracedir ` will output trace information to `dir` that may aid in +performance debugging. Adding Custom Metrics ===================== diff --git a/docs/spec_usdpreviewsurface.rst b/docs/spec_usdpreviewsurface.rst index 56038a7997..b5ca022c31 100644 --- a/docs/spec_usdpreviewsurface.rst +++ b/docs/spec_usdpreviewsurface.rst @@ -151,12 +151,24 @@ and :usda:`opacity`. Roughness for the second specular lobe. Clearcoat results are calculated using the same normal map data used by the primary specular lobe. +.. _updateopacity: + * **opacity - float - 1.0** When *opacity* is 1.0 then the gprim is fully opaque, if it is smaller than - 1.0 then the prim is translucent, when it is 0 the gprim is transparent. Note - that even a fully transparent object still receives lighting as, for example, - perfectly clear glass still has a specular response. + 1.0 then the prim is translucent, when it is 0 the gprim is fully transparent. + Fully transparent objects will still receive a lighting response when the + *opacityMode* is *transparent*, however they will not receive any lighting + when the *opacityMode* is set to *presence*. + +* **opacityMode - token - transparent** + + This input dictates how materials with zero opacity are interpreted and + takes one of two inputs: *transparent* or *presence*. When the value is + *transparent* materials with zero opacity still receive a lighting response + as, for example, perfectly clear glass still has a specular response. + However, when set to *presence* these materials will not receive any + lighting and thus they always serve as cutouts. .. _addopacitythreshold: @@ -174,10 +186,9 @@ and :usda:`opacity`. greater than or equal to the *opacityThreshold* will be fully visible. Thus, the *opacityThreshold* serves as a switch for how the *opacity* input is interpreted; this "translucent or masked" behavior is common in engines and - renderers, and makes the UsdPreviewSurface easier to interchange. It does - imply, however, that it is not possible to faithfully recreate a - glassy/translucent material that also provides an opacity-based mask... so no - single-polygon glass leaves. + renderers, and makes the UsdPreviewSurface easier to interchange. Recreating + a translucent material that also provides a zero opacity-based mask, can be + achieved by setting *opacityThreshold* to zero and *opacityMode* to *presence*. .. _updateior: @@ -1073,10 +1084,20 @@ From version 2.3... of specular components, including the clearcoat when :math:`UsdPreviewSurface.clearcoat > 0`. -Version 2.5 - Current Head -########################## +Version 2.5 +########### From version 2.4... * :ref:`Updates UDIM specification to include tile 1100.` Changes the baseline UDIM tile support from 1001-1099, inclusive, to 1001-1100. This allows for a 10x10 grid of UDIM tiles. + +Version 2.6 - Current Head +########################## + +From version 2.5... + * :ref:`Adds opacityMode for different handling of materials with zero opacity. ` + `opacityMode` of `transparent` is the previous 2.5 spec behavior in which + materials with opacity = 0 still receive a specular reflection, + an `opacityMode` of `presence` causes fully transparent materials to + receive no lighting response. diff --git a/docs/toolset.help b/docs/toolset.help index 1b5ee04e89..d3eaf2fe3a 100644 --- a/docs/toolset.help +++ b/docs/toolset.help @@ -91,7 +91,7 @@ optional arguments: usage: usdview [-h] [--renderer {GL,Prman,Embree,Tiny}] [--select PRIMPATH] [--camera CAMERA] [--mask PRIMPATH[,PRIMPATH...]] [--clearsettings] - [--config {foo}] [--defaultsettings] [--norender] [--noplugins] + [--config {}] [--defaultsettings] [--norender] [--noplugins] [--unloaded] [--bboxStandin] [--timing] [--allow-async] [--traceToFile TRACETOFILE] [--traceFormat {chrome,trace}] [--tracePython] [--memstats {none,stage,stageAndImaging}] @@ -127,7 +127,7 @@ optional arguments: either use commas with no spaces or quote the argument and separate paths by commas and/or spaces. --clearsettings Restores usdview settings to default - --config {foo} Load usdview with the state settings found in the + --config {} Load usdview with the state settings found in the specified config. If not provided will use the previously saved application state and automatically persist state on close @@ -207,6 +207,8 @@ usage: usdrecord [-h] [--mask PRIMPATH[,PRIMPATH...]] [--enableDomeLightVisibility] [--renderPassPrimPath RPPRIMPATH] [--renderSettingsPrimPath RSPRIMPATH] + [--traceToFile TRACETOFILE] [--traceFormat {chrome,trace}] + [--memstats] usdFilePath outputImagePath Generates images from a USD file @@ -298,6 +300,19 @@ optional arguments: properties authored on the RenderSettings will override other arguments (imageWidth, camera, outputImagePath) + --traceToFile TRACETOFILE + Start tracing at application startup and write + --traceFormat specified format output to the specified + trace file when the application quits + --traceFormat {chrome,trace} + Output format for trace file specified by + --traceToFile. 'chrome' files can be read in chrome, + 'trace' files are simple text reports. + (default=chrome) + --memstats Use the Pxr MallocTags memory accounting system to + profile USD, saving results to a tmp file, with a + summary to the console. Will have no effect if + MallocTags are not supported in the USD installation. ==== usdrecord end ==== @@ -410,45 +425,71 @@ optional arguments: ==== usdzip end ==== ==== usdchecker start ==== -usage: usdchecker [-h] [-s] [-p] [-o [OUTFILE]] [--noAssetChecks] [--arkit] - [-d] [-v] [-t] - [inputFile] - -Utility for checking the compliance of a given USD stage or a USDZ package. -Only the first sample of any relevant time-sampled attribute is checked, -currently. General USD checks are always performed, and more restrictive -checks targeted at distributable consumer content are also applied when the " ---arkit" option is specified. +Utility for checking the compliance of a given USD stage or a USDZ package. +Only the first sample of any relevant time-sampled attribute is checked, +currently. General USD checks are always performed, and more restrictive checks +targeted at distributable consumer content are also applied when the "--arkit" +option is specified. In order to use the new validation framework provide the +'--useNewValidationFramework' option. +Usage: usdchecker [OPTIONS] [inputFile] -positional arguments: - inputFile Name of the input file to inspect. +Positionals: + inputFile FILE Name of the input file to inspect. -optional arguments: - -h, --help show this help message and exit - -s, --skipVariants If specified, only the prims that are present in the - default (i.e. selected) variants are checked. When - this option is not specified, prims in all possible - combinations of variant selections are checked. - -p, --rootPackageOnly - Check only the specifiedpackage. Nested packages, - dependencies and their contents are not validated. - -o [OUTFILE], --out [OUTFILE] - The file to which all the failed checks are output. If - unspecified, the failed checks are output to stdout; - if "stderr", terminal coloring will be surpressed. - --noAssetChecks If specified, do NOT perform extra checks to help - ensure the stage or package can be easily and safely - referenced into aggregate stages. - --arkit Check if the given USD stage is compatible with - RealityKit's implementation of USDZ as of 2023. These - assets operate under greater constraints that usdz - files for more general in-house uses, and this option - attempts to ensure that these constraints are met. - -d, --dumpRules Dump the enumerated set of rules being checked for the - given set of options. - -v, --verbose Enable verbose output mode. - -t, --strict Return failure code even if only warnings are issued, - for stricter compliance. +Options: + -h,--help Print this help message and exit + -s,--skipVariants If specified, only the prims that are present in + the default (i.e. selected) variants are checked. + When this option is not specified, prims in all + possible combinations of variant selections are + checked. + -p,--rootPackageOnly Check only the specified package. Nested packages, + dependencies and their contents are not validated. + -o,--out FILE The file to which all the failed checks are + output. If unspecified, the failed checks are + output to stdout; if "stderr", terminal coloring + will be suppressed. + --noAssetChecks If specified, do NOT perform extra checks to help + ensure the stage or package can be easily and + safely referenced into aggregate stages. + --arkit Check if the given USD stage is compatible with + RealityKit's implementation of USDZ as of 2023. + These assets operate under greater constraints + that usdz files for more general in-house uses, + and this option attempts to ensure that these + constraints are met. + -d,--dumpRules Dump the enumerated set of rules being checked for + the given set of options. + -v,--verbose Enable verbose output mode. + -t,--strict Return failure code even if only warnings are + issued, for stricter compliance. + --useNewValidationFramework Enable the new validation framework. + --variantSets TEXT ... List of variantSets to validate. All variants for + the given variantSets are validated. This can also + be used with --variants to validate the given + variants in combination with variants from the + explicitly specified variantSets. This option is + only valid when using the new validation + framework. + --variants TEXT ... List of ',' separated variantSet:variant pairs to + validate. Each set of variants in the list are + validated separately. Example: --variants + foo:bar,baz:qux will validate foo:bar and baz:qux + together but --variants foo:bar --variants + baz:qux will validate foo:bar and baz:qux + separately. This can also be used with + --variantSets to validate the given variants in + combination with variants from the explicitly + specified variantSets. This option is only valid + when using the new validation framework. + --disableVariantValidationLimit + Disable the limit set to restrict the number of + variants validation calls. This is useful when the + number of variants is large and we want to + validate all possible combinations of variants. + Default is to limit the number of validation calls + to 1000. This option is only valid when using the + new validation framework. ==== usdchecker end ==== @@ -622,6 +663,7 @@ Options: usage: usdmeasureperformance [-h] [-c [CUSTOM_METRICS ...]] -o OUTPUT [-i ITERATIONS] [-a {min,mean,max} [{min,mean,max} ...]] + [-t TRACEDIR] asset Measure and export USD functional performance @@ -653,9 +695,16 @@ optional arguments: yaml format will be a key value dictionary with _ to aggregated time value. If multiple aggregations are requested, the output - yaml format will be: {: , + yaml format will be : {: , :...}.When no aggregation is set, the output format will be : [, , ...] or one measured value for each iteration. + -t TRACEDIR, --tracedir TRACEDIR + Outputs a trace file for each run of usdview in the + given directory if provided and if 'aggregation' + includes min or max. A trace file for the iteration of + testusdview or usdview from which the aggregated value + of each metric was observed will be output in the form + _.trace ==== usdmeasureperformance end ==== diff --git a/docs/toolset.rst b/docs/toolset.rst index 0725f6113e..7c6b8276e5 100644 --- a/docs/toolset.rst +++ b/docs/toolset.rst @@ -389,7 +389,13 @@ usdmeasureperformance ********************* Helper script to measure usdview asset performance. Recommended usage is -`usdmeasureperformance -i 10 --agg min -o ` +`usdmeasureperformance -i 10 --agg min -o `. +If there exists a file ending in `overrides.usda` in the same directory as the +given asset file, the file will be supplied as `--sessionLayer` to usdview and +testusdview invocations. This allows provision of specific variant selections, +for example. The first file found by os.listdir will be used. Ensure there is +only one file ending in `overrides.usda` in the asset directory to remove +ambiguity. .. literalinclude:: toolset.help :language: none diff --git a/docs/tut_authoring_variants.rst b/docs/tut_authoring_variants.rst index 5f053cbc6c..9aa46d74d6 100644 --- a/docs/tut_authoring_variants.rst +++ b/docs/tut_authoring_variants.rst @@ -22,7 +22,7 @@ these files to a working directory and make them writable. composed result shows the locally-authored blue color. That stronger local opinion overrides the opinions from variants. - See :ref:`glossary:LIVRPS Strength Ordering` for more details on + See :ref:`usdglossary-livrpsstrengthordering` for more details on strength order in USD. .. code-block:: python @@ -279,7 +279,7 @@ these files to a working directory and make them writable. .. admonition:: Opinion Strength Order - :ref:`Strength order ` is a fundamental + :ref:`Strength order ` is a fundamental part of USD. #. Examine the composed result. diff --git a/docs/tut_generating_new_schema.rst b/docs/tut_generating_new_schema.rst index 701d4bdfdd..60f4444b58 100644 --- a/docs/tut_generating_new_schema.rst +++ b/docs/tut_generating_new_schema.rst @@ -365,8 +365,10 @@ To rebuild the plugin, simply go to the root of your build directory and run. Using the Schema Classes ######################## -.. note:: Because this schema is an external plugin, the USD build must be told - where to find it before it can be used. This can be done by either: +.. note:: + + Because this schema is an external plugin, the USD build must be told + where to find it before it can be used. This can be done by either: * Setting the :filename:`PXR_PLUGINPATH_NAME` environment variable to the location of the plugin's :filename:`resources` directory. For @@ -376,7 +378,13 @@ Using the Schema Classes * Copying :filename:`usdSchemaExamples.so` (on Windows, :filename:`usdSchemaExamples.dll` and :filename:`.lib`) and the - :filename:`usdSchemaExamples` directory to :filename`/plugin/usd` + :filename:`usdSchemaExamples` directory to :filename:`/plugin/usd` + + You may encounter "ImportError DLL load failed" when running from + Python 3.8+ on Windows. This is due to the DLL directory not being added to + trusted locations and can be resolved by manually adding the + :filename:`resources` directory before import via + :mono:`os.add_dll_directory("/path/to/plugin/resources/directory")`. Create a usd file named Test.usda with the following content: diff --git a/docs/tut_setup_version_badge.rst b/docs/tut_setup_version_badge.rst index 0e4f9f69d4..eb7ca85bbd 100644 --- a/docs/tut_setup_version_badge.rst +++ b/docs/tut_setup_version_badge.rst @@ -2,4 +2,4 @@ :fa:`cogs` :ref:`Configure your Environment ` - :fa:`check` Tested with `USD 24.11 `_ + :fa:`check` Tested with `USD 25.02 `_ diff --git a/docs/tut_simple_shading.rst b/docs/tut_simple_shading.rst index 3f49e7f800..2704bfbba1 100644 --- a/docs/tut_simple_shading.rst +++ b/docs/tut_simple_shading.rst @@ -49,7 +49,7 @@ other scenes. In a python shell, execute the following: from pxr import Gf, Kind, Sdf, Usd, UsdGeom, UsdShade - stage = Usd.Stage.CreateNew("simpleShading.usd") + stage = Usd.Stage.CreateNew("simpleShading.usda") UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) modelRoot = UsdGeom.Xform.Define(stage, "/TexModel") @@ -87,7 +87,7 @@ multiple times). In another command shell, try: .. code-block:: sh - > usdview simpleShading.usd + > usdview simpleShading.usda We should see something like: diff --git a/docs/tut_usdview_plugin.rst b/docs/tut_usdview_plugin.rst index 8b62eeeaf7..56295123a4 100644 --- a/docs/tut_usdview_plugin.rst +++ b/docs/tut_usdview_plugin.rst @@ -112,7 +112,9 @@ to the PXR_PLUGINPATH_NAME environment variable. At this point, if we open :program:`usdview` we should see a new "Tutorial" menu. If we open this menu and select "Print Message," we should see "Hello, -World!" printed to the console. +World!" printed to the console. If the "Tutorial" menu does not appear, +try using full paths in the above environment variables and ensure files +are named exactly `__init__.py` and `plugInfo.json`. Congratulations! We have just created a new :program:`usdview` plugin! @@ -204,7 +206,7 @@ module. from pxr import Tf from pxr.Usdviewq.plugin import PluginContainer - import printer + from . import printer class TutorialPluginContainer(PluginContainer): diff --git a/docs/usdfaq.rst b/docs/usdfaq.rst index 0dcd79a07c..a34cf89fff 100644 --- a/docs/usdfaq.rst +++ b/docs/usdfaq.rst @@ -320,7 +320,7 @@ Sublayering has the following advantages: your results together rather than sublayer them - or add another layer into the mix so that you can reference in the root layer of your sublayer stack. This is because the data in the layers that you SubLayer form the - **L** in :ref:`LIVRPS Strength Ordering `, + **L** in :ref:`LIVERPS Strength Ordering `, whereas opinions from VariantSets form the **V**, so anything defined in VariantSets will be *weaker* than the data in your sublayers. By extension, if you want to compose several layers into the existing file @@ -438,14 +438,14 @@ List-edited composition arcs Meaningfully deleting composition arcs imposes some restrictions, because of the central aspect of *encapsulation* in USD composition. Encapsulation means that -whenever you use a references, payload, inherits, variantSet, or specializes arc -on a prim, **the result that gets woven into the composition is immutable by -stronger layers** - values can be overridden, and objects can be added, but the -"internal referencing structure" of the arc's target cannot be changed. This is -important for several reasons: - - * It makes the :ref:`LIVRPS composition algorithm ` explainable and (more easily) understandable, because it is +whenever you use a references, relocates, payload, inherits, variantSet, or +specializes arc on a prim, **the result that gets woven into the composition is +immutable by stronger layers** - values can be overridden, and objects can be +added, but the "internal referencing structure" of the arc's target cannot be +changed. This is important for several reasons: + + * It makes the :ref:`LIVERPS composition algorithm ` + explainable and (more easily) understandable, because it is properly recursive. * It enables more efficient composition engine implementations, because it diff --git a/docs/user_guides/namespace_editing.rst b/docs/user_guides/namespace_editing.rst index ec201d3ffa..eabc5df1b1 100644 --- a/docs/user_guides/namespace_editing.rst +++ b/docs/user_guides/namespace_editing.rst @@ -10,19 +10,26 @@ A namespace edit is an operation that removes or changes the namespace path of a **composed** prim or property on a stage. Edit operations currently include deleting and moving (renaming and/or reparenting) a composed prim or property. -While it is possible to do some of these operations using Sdf or UsdStage APIs, -these APIs can only edit prims or properties on a single layer. Namespace -editing handles edits for prims and properties that are composed from multiple -layers or other composition arcs. Using namespace editing lets you robustly -delete and move prims that have opinions throughout a LayerStack that you have -the authority to edit. Namespace editing also allows "non-destructive" -deletion and moving of prims defined in LayerStacks that you don't have the -authority to edit. +While it is possible to do some of these operations using Sdf or UsdStage APIs, +these APIs can only edit prims or properties on a single layer. Additionally, +any paths targeting the moved or deleted objects will need to be manually fixed. +Use namespace editing to easily and safely delete or move prims and properties +across the :ref:`LayerStack `. Namespace editing handles +edits for objects composed from multiple layers and handles changing +:ref:`EditTargets ` or fixing paths targeting the +edited objects, automatically. + +Namespace also provides non-destructive editing of prims defined across +composition arcs by adding the **relocates** composition arc if needed. +By using relocates, namespace editing ensures non-destructive edits by *not* +modifying the source of a composition arc. This is necessary for workflows where +you might not be able to make changes to the source of the composition +arc directly (e.g. you're referencing prims from assets maintained by a +different department that are also being referenced elsewhere). As a simple example, if you had :filename:`model.usda` with various prims: .. code-block:: usda - :caption: model.usda #usda 1.0 ( @@ -39,10 +46,9 @@ As a simple example, if you had :filename:`model.usda` with various prims: } And :filename:`main.usda`, which has :filename:`model.usda` as a sublayer and -overs for :sdfpath:`/modelScope` and :sdfpath:`/modelScope/model1`: +overrides for :sdfpath:`/modelScope` and :sdfpath:`/modelScope/model1`: .. code-block:: usda - :caption: main.usda #usda 1.0 ( @@ -60,15 +66,16 @@ overs for :sdfpath:`/modelScope` and :sdfpath:`/modelScope/model1`: } After loading :filename:`main.usda` into your stage, if you wanted to fully -remove :sdfpath:`/modelScope/model1` from your composed stage, using UsdStage +remove :sdfpath:`/modelScope/model1` from your composed stage using UsdStage APIs, you'd have to delete it twice: once in :filename:`main.usda`, and then, -after changing edit targets, again in the :filename:`model.usda` layer: +after changing :ref:`EditTargets `, again in the +:filename:`model.usda` layer: .. code-block:: python stage = Usd.Stage.Open("main.usda") - # This only removes model1 in model.usda, not + # This only removes model1 in main.usda, not # model1 in the model.usda layer removeResult = stage.RemovePrim("/modelScope/model1") @@ -77,8 +84,8 @@ after changing edit targets, again in the :filename:`model.usda` layer: # set the edit target to the model.usda layer and call RemovePrim() again Whereas using namespace editing will properly handle removing the prim from -both :filename:`main.usda` and :filename:`model.usda`, handling edit target -changes for you: +both :filename:`main.usda` and :filename:`model.usda` in a single edit +operation, handling edit target changes for you: .. code-block:: python @@ -86,19 +93,19 @@ changes for you: removeResult = editor.DeletePrimAtPath("/modelScope/model1") editor.ApplyEdits() -Namespace editing also handles issues such as making sure that paths to -and overrides of edited objects are still valid after renaming or reparenting +Namespace editing handles issues such as making sure that paths targeting +edited objects and overrides are still valid after renaming or reparenting prims. Namespace editing tries to fix any existing composition arcs, relationship targets, attribute connections, and overrides that used paths to renamed or reparented prims to use the new paths. -Namespace editing in a future update will also use **relocates** for more -complex edit scenarios. Relocates are another composition arc that maps a prim -path in a namespace to a new path location. For example, if you were referencing -a prim from :filename:`refModel.usda`: +Namespace editing will also use **relocates** for more complex edit scenarios. +Relocates are another composition arc that maps a prim path defined in a remote +:ref:`LayerStack ` (i.e. across a composition arc) to a +new path location in the local namespace. For an example of how namespace +editing uses relocates, if you had :filename:`refModel.usda`: .. code-block:: usda - :caption: refModel.usda def "modelA" () { @@ -107,16 +114,19 @@ a prim from :filename:`refModel.usda`: } } +Which is referenced in :filename:`main.usda`: + .. code-block:: usda - :caption: main.usda def "mainModelA" ( prepend references = @refModel.usda@ ) { -You will be able to use namespace editing to move or rename -:sdfpath:`/mainModelA/modelAChild` using relocates. +You might want to use namespace editing to move or rename +:sdfpath:`/mainModelA/modelAChild`. Because :sdfpath:`/mainModelA/modelAChild` +is composed across a reference composition arc, it can't be directly edited, +so namespace editing will use relocates to create the edit. .. code-block:: python @@ -125,6 +135,31 @@ You will be able to use namespace editing to move or rename removeResult = editor.MovePrimAtPath("/mainModelA/modelAChild", "/mainModelA/renamedChild") editor.ApplyEdits() +The resulting stage root layer for :file:`main.usda` will look like: + +.. code-block:: usda + + #usda 1.0 + ( + relocates = { + : + } + ) + + def "mainModelA" ( + prepend references = @refModel.usda@ + ) + { + } + +.. note:: + + **relocates** is a new composition arc for USD that is a separate feature + from namespace editing, and can be used independently of namespace editing. + See :ref:`usdglossary-relocates` for more details. + +.. _nsedit_using_usdnamespaceeditor: + ************************ Using UsdNamespaceEditor ************************ @@ -147,7 +182,7 @@ rename or reparent prims and properties using :usdcpp:`UsdPrim` and When you call a :usdcpp:`UsdNamespaceEditor` edit operation, the operation paths are validated (e.g. the paths passed to :code:`MovePrimAtPath()` are checked to make sure they are valid paths), and then the operation is queued (to support -batches of operations, see :ref:`namespace_editing_batch_edits` below). To +batches of operations, see :ref:`nsedit_batch_edits` below). To execute individual edit operations, as the following examples do, call :code:`ApplyEdits()` after each operation call. @@ -204,6 +239,82 @@ Note that after renaming or reparenting a :usdcpp:`UsdPrim` or :usdcpp:`UsdProperty`, the :usdcpp:`UsdPrim` or :usdcpp:`UsdProperty` reference used in the operation will no longer be valid, as the path has changed. +.. _nsedit_setting_editor_options: + +Setting Editor Options +====================== + +When you create a namespace editor, you can optionally provide edit options +that control editor behavior. The current set of options are: + +* **allowRelocatesAuthoring**: If :code:`True` the namespace editor will use + relocates when needed to make edits. If :code:`False`, the namespace editor + will not use relocates and will issue errors when applying or validating edits + that require relocates. The default is :code:`True`. + +The following example creates the :code:`noRelocatesEditOptions` edit options, +disables :code:`noRelocatesEditOptions.allowRelocatesAuthoring`, and creates +a namespace editor for :code:`stage` with these options. + +.. code-block:: python + + noRelocatesEditOptions = Usd.NamespaceEditor.EditOptions() + noRelocatesEditOptions.allowRelocatesAuthoring = False + + # Create/use namespace editor that will not use relocates + noRelocatesEditor = Usd.NamespaceEditor(stage, noRelocatesEditOptions) + +.. _nsedit_working_with_relocates: + +Working With Relocates +====================== + +As mentioned earlier, namespace editing will use **relocates** if necessary +for edit operations across composition arcs. Namespace editing will add or +update relocates in the appropriate layer's metadata relocates list. The +following example shows layer metadata with a relocate added by a namespace +editing operation to rename :sdfpath:`/mainModelA/modelAChild` to +:sdfpath:`/mainModelA/renamedChild`: + +.. code-block:: usda + + #usda 1.0 + ( + relocates = { + : + } + ) + +For delete operations that require using relocates, namespace editing will +create a relocates mapping that maps a prim or property to a "deleted" target: + +.. code-block:: python + + # Delete a referenced prim, which will add a new relocates + editor.DeletePrimAtPath('/RootPrim/ChildInRef') + editor.ApplyEdits() + +.. code-block:: + + #usda 1.0 + ( + relocates = { + : <> + } + ) + +Note that the way in which relocates must "transfer" composed opinions prevents +you from re-defining a new prim at the deleted or moved target location: + +.. code-block:: python + + # Continuing from the earlier Python code, now try and define a prim at the + # deleted /RootPrim/ChildInRef path. This will result in a + # "Failed to define UsdPrim" error. + stage.DefinePrim('/RootPrim/ChildInRef') + +.. _nsedit_fixing_paths_for_moved_objects: + Fixing Paths For Moved Objects ============================== @@ -318,16 +429,108 @@ With the flattened results looking like: } } -.. note:: - Currently, direct edits across composition arcs, such as renaming just - :sdfpath:`/Shot1/shotAsset/assetChild` in the above example, is not supported - via namespace editing and will result in the following error (from - :code:`CanApplyEdits()` or :code:`ApplyEdits()`): "The prim to move requires - authoring relocates since it composes opinions introduced by ancestral - composition arcs; authoring relocates is not yet supported". Relocates will - be available in a future update. +.. _nsedit_fixing_edits_with_dependent_stages: -.. _namespace_editing_batch_edits: +Applying Edits to Dependent Stages +================================== + +In some situations, a namespace edit applied to one stage can impact other +**dependent stages**. A dependent stage is any additional stage open in the +current session that has a composition dependency on any layer edits made for +the editor's primary stage. For example, you might have a stage that has +references to prims in the editor's primary stage, and you might have edits +that rename or delete the referenced prims. + +By default, an editor only makes edits and fixes to the editor's primary stage. +However, you can add dependent stages to an editor via +:code:`AddDependentStage()` and the editor will make any necessary additional +edits in those dependent stages to update the composition dependencies +appropriately. + +For example, we might have the layer :filename:`layer1.usda`, which will get +composed into the stage for which we'll create a namespace editor. + +.. code-block:: usda + + #usda 1.0 + ( + ) + + def "Prim1" { + def "Child" { + } + } + + def "InternalRef1" ( + references = + ) { + over "Child" { + int overChildAttr + } + } + +We also have an additional layer :filename:`layer2.usda` that references prims +in :filename:`layer1.usda`: + +.. code-block:: usda + + #usda 1.0 + ( + ) + + def "OtherStageRef2" ( + references = @layer1.usda@ + ) { + int overChildAttr + } + +We can open both layers in separate stages, create a namespace editor for the +stage containing layer1, and add the stage containing layer2 as a dependent +stage. + +.. code-block:: python + + stage1 = Usd.Stage.Open("layer1.usda") + stage2 = Usd.Stage.Open("layer2.usda") + + # Create a namespace editor for stage1 + editor = Usd.NamespaceEditor(stage1) + + # Add stage2 as a dependent stage for our stage1 editor + editor.AddDependentStage(stage2) + + # Move /Prim1/Child to /Prim1/RenamedChild. This will not only + # update the prims and references in stage1, but also update the + # OtherStageRef2 reference in stage2 + editor.MovePrimAtPath('/Prim1/Child', '/Prim1/RenamedChild') + editor.ApplyEdits() + +After the edit, the root layer of stage2 looks like: + +.. code-block:: usda + + #usda 1.0 + + def "OtherStageRef2" ( + references = @layer1.usda@ + ) + { + int overChildAttr + } + +You can use :code:`RemoveDependentStage()` to remove any added dependent stages +before making a namespace edit, when you do not want the dependent stage +dependencies updated. + +If you have several dependent stages, you can set a list of dependent stages on +an editor using :code:`SetDependentStages()`. + +Note that namespace editing finds dependencies in dependent stages based on +what is currently loaded in those stages. If a stage has dependencies in +unloaded payloads, load mask filtered prims, unselected variants, or children of +inactive prims, the namespace editor cannot find and update those dependencies. + +.. _nsedit_batch_edits: Batch Edits =========== @@ -370,12 +573,16 @@ Executing batches of edits will usually be more efficient than applying each edit individually. USD will process the list of edits in a batch to determine the most efficient way to apply them. +.. _nsedit_editing_best_practices: + ******************************** Namespace Editing Best Practices ******************************** The following are some general best practices and caveats for namespace editing. +.. _nsedit_canapplyedits_validate_operations: + Use CanApplyEdits() To Validate Edit Operations =============================================== @@ -404,6 +611,8 @@ indicating the path does not resolve to a valid prim. else: # Handle error, using canApplyResult.whyNot as needed, etc. +.. _nsedit_builtin_properties_not_editable: + Built-In Properties From Schemas Are Not Editable ================================================= @@ -429,3 +638,35 @@ would fail. editor.DeletePropertyAtPath("/testSphere/customProp") # This is allowed editor.DeletePropertyAtPath("/testSphere/radius") # This is not allowed and will cause an error + +.. _nsedit_relocates_performance_impact: + +Be Aware of Relocates Performance Impact +======================================== + +If your namespace editing operations result in adding **relocates** to your +stage, this can increase the number of composition arcs in your stage, which +could impact stage composition performance. + +If you want to test whether a namespace edit will add relocates, you can +use an editor configured to disallow authoring relocates, and use +:code:`CanApplyEdits()` looking for any errors that indicate relocates would +be needed. + +.. code-block:: python + + # Create/use namespace editor that will not use relocates + noRelocatesEditOptions = Usd.NamespaceEditor.EditOptions() + noRelocatesEditOptions.allowRelocatesAuthoring = False + noRelocatesEditor = Usd.NamespaceEditor(stage, noRelocatesEditOptions) + + # Rename /mainModelA/modelAChild to /mainModelA/renamedChild + # This editor is configured to not use relocates, so an error will be shown + removeResult = noRelocatesEditor.MovePrimAtPath("/mainModelA/modelAChild", "/mainModelA/renamedChild") + applyResult = noRelocatesEditor.CanApplyEdits() + if applyResult is not True: + # We should get a "The prim to edit requires authoring relocates since + # it composes opinions introduced by ancestral composition arcs; + # relocates authoring must be enabled to perform this edit" error + print ("noRelocatesEditor: Cannot apply edits, reason: " + applyResult.whyNot) + \ No newline at end of file diff --git a/extras/imaging/examples/hdui/dataSourceValueTreeView.cpp b/extras/imaging/examples/hdui/dataSourceValueTreeView.cpp index 99d2fe0400..bd3ec70f0b 100644 --- a/extras/imaging/examples/hdui/dataSourceValueTreeView.cpp +++ b/extras/imaging/examples/hdui/dataSourceValueTreeView.cpp @@ -185,7 +185,7 @@ class Hdui_UnsupportedTypeValueItemModel : public Hdui_ValueItemModel return QVariant(); } - return QVariant("(unsuppored type)"); + return QVariant("(unsupported type)"); } int rowCount(const QModelIndex &parent = QModelIndex()) const override { diff --git a/extras/performance/usdmeasureperformance.py b/extras/performance/usdmeasureperformance.py index 51962158af..6c54e6b7a5 100644 --- a/extras/performance/usdmeasureperformance.py +++ b/extras/performance/usdmeasureperformance.py @@ -13,10 +13,19 @@ any additional custom scripts to `testusdview` in addition to the default EXPLICIT_METRICS, please provide the "--custom-metrics" command line argument. See the --help documentation for more info on `--custom-metrics` format. + +If there exists a file ending in `overrides.usda` in the same directory as the +given asset file, the file will be supplied as `--sessionLayer` to usdview and +testusdview invocations. This allows provision of specific variant selections, +for example. The first file found by os.listdir will be used. Ensure there is +only one file ending in `overrides.usda` in the asset directory to remove +ambiguity. """ import argparse import functools +import re import os +import shutil import statistics import subprocess import sys @@ -39,17 +48,18 @@ } -def parseOutput(output, parseFn): +def parseOutput(output, parseFn, traceFile = None): """ Invokes `parseFn` and converts the returned tuples of (success, metric_identifier, time) to a dictionary of - (metric_identifier, time). + (metric_identifier, (time, traceFile)) if traces are being collected, + (metric_identifier, (time, None)) otherwise. """ metrics = {} for line in output.splitlines(): res = parseFn(line) if res[0]: - metrics[res[1]] = res[2] + metrics[res[1]] = (res[2], traceFile) return metrics @@ -70,11 +80,23 @@ def warmCache(assetPath): raise -def measurePerformance(assetPath): +def measurePerformance(assetPath, traceDir, iteration, sessionLayer): """ Run usdview to produce native timing information. """ - command = f"usdview --quitAfterStartup --timing {assetPath}" + traceArgs = "" + traceFile = None + if traceDir is not None: + traceFile = os.path.join(traceDir, f"usdview_{iteration}.trace") + traceArgs = f"--traceToFile {traceFile} --traceFormat trace" + + if sessionLayer: + sessionLayer = f"--sessionLayer {sessionLayer}" + else: + sessionLayer = "" + + command = (f"usdview --quitAfterStartup --timing {sessionLayer} " + f"{assetPath} {traceArgs}") try: result = subprocess.run(command, shell=True, @@ -85,17 +107,34 @@ def measurePerformance(assetPath): raise output = result.stdout.decode() - return parseOutput(output, parseTiming) + return parseOutput(output, parseTiming, traceFile) -def measureTestusdviewPerf(assetPath, testusdviewMetrics): +def measureTestusdviewPerf(assetPath, + testusdviewMetrics, + traceDir, + iteration, + sessionLayer): """ Run timing scripts for metrics registered in `testusdviewMetrics`. """ + if sessionLayer: + sessionLayer = f"--sessionLayer {sessionLayer}" + else: + sessionLayer = "" + metrics = {} for script, metricExpressions in testusdviewMetrics.items(): - command = ("testusdview --norender --testScript " - f"{script} {assetPath}") + traceArgs = "" + traceFile = None + if traceDir is not None: + scriptRep = re.sub(r'\W+', '_', os.path.basename(script)) + traceFile = os.path.join(traceDir, + f"testusdview_{scriptRep}_{iteration}.trace") + traceArgs = f"--traceToFile {traceFile} --traceFormat trace" + + command = (f"testusdview --norender {traceArgs} --testScript " + f"{script} {sessionLayer} {assetPath}") try: result = subprocess.run(command, shell=True, @@ -107,59 +146,77 @@ def measureTestusdviewPerf(assetPath, testusdviewMetrics): output = result.stdout.decode() parseFn = functools.partial(parseTimingGeneric, metricExpressions) - currMetrics = parseOutput(output, parseFn) + currMetrics = parseOutput(output, parseFn, traceFile) metrics.update(currMetrics) return metrics -def export(metricsList, outputPath, aggregations): +def export(metricsList, outputPath, aggregations, traceDir): """ Write `metrics` to the given `outputPath`. If zero aggregations, the reported yaml has form { name : list of times }. If one aggregation, the reported yaml has form { name_ : aggregated time }. If multiple, - the reported yaml has form { name : { agg1 : time, agg2 : time, ... }} + the reported yaml has form { name : { agg1 : time, agg2 : time, ... }}. + + In addition, if traces have been requested, copy source trace files for + min/max metrics to _.trace """ + # All original trace files from all iterations, to be deleted. + pendingDeletes = set() + # Transpose list of metrics to dict of (metric name, list of values) metricsDict = {} for metric in metricsList: - for name, time in metric.items(): + for name, (time, traceFile) in metric.items(): if name not in metricsDict: metricsDict[name] = [] - metricsDict[name].append(time) + if traceFile is not None: + pendingDeletes.add(traceFile) + + metricsDict[name].append((time, traceFile)) + + # Trace file source to destination filenames, relevant only when + # traceDir is not None + pendingCopies = {} + + # Dict to output to metrics.yaml + resultDict = {} if len(aggregations) == 0: - resultDict = metricsDict - elif len(aggregations) == 1: - resultDict = {} - agg = aggregations[0] - for name, times in metricsDict.items(): - aggName = f"{name}_{agg}" - if agg == "min": - resultDict[aggName] = min(times) - elif agg == "mean": - resultDict[aggName] = statistics.mean(times) - elif agg == "max": - resultDict[aggName] = max(times) - else: - raise ValueError(f"Internal error -- aggregation {agg}" - " not implemented") + for name, timeTuples in metricsDict.items(): + resultDict[name] = [t[0] for t in timeTuples] else: - resultDict = {} - for name, times in metricsDict.items(): + for name, timeTuples in metricsDict.items(): resultDict[name] = {} for agg in aggregations: if agg == "min": - resultDict[name][agg] = min(times) + time, traceFile = min(timeTuples) + pendingCopies[f"{name}_{agg}.trace"] = traceFile elif agg == "mean": - resultDict[name][agg] = statistics.mean(times) + times = [t[0] for t in timeTuples] + time = statistics.mean(times) elif agg == "max": - resultDict[name][agg] = max(times) + time, traceFile = max(timeTuples) + pendingCopies[f"{name}_{agg}.trace"] = traceFile else: raise ValueError("Internal error -- aggregation " f"{agg} not implemented") + resultDict[name][agg] = time + + # Collapse { name : { agg : time }} to { name_agg : time } when + # only one aggregation is given. + if len(aggregations) == 1: + collapsedDict = {} + for name, aggDict in resultDict.items(): + for agg, time in aggDict.items(): + aggName = f"{name}_{agg}" + collapsedDict[aggName] = time + + resultDict = collapsedDict + if outputPath.endswith(".yaml"): with open(outputPath, "w") as f: yaml.dump(resultDict, f) @@ -169,17 +226,43 @@ def export(metricsList, outputPath, aggregations): print(f"Performance metrics have been output to {outputPath}") + # If traces are requested, any min/max metric's associated trace file + # will be copied to a file that looks like + # _.trace + if traceDir is not None: + for filename, src in pendingCopies.items(): + dest = os.path.join(traceDir, filename) + shutil.copyfile(src, dest) -def run(assetPath, testusdviewMetrics): + # Delete original per-iteration trace files + for trace in pendingDeletes: + os.remove(trace) + + +def run(assetPath, testusdviewMetrics, traceDir, iteration): """ Collect performance metrics. """ + # Supply session layer overrides, if found + assetDir = os.path.dirname(assetPath) + sessionLayer = None + for fname in os.listdir(assetDir): + if fname.endswith("overrides.usda"): + sessionLayer = os.path.join(assetDir, fname) + break + # Measure `usdview --timing` native metrics - usdviewMetrics = measurePerformance(assetPath) + usdviewMetrics = measurePerformance(assetPath, + traceDir, + iteration, + sessionLayer) # Measure custom `testusdview` metrics customMetrics = measureTestusdviewPerf(assetPath, - testusdviewMetrics) + testusdviewMetrics, + traceDir, + iteration, + sessionLayer) metrics = {} metrics.update(usdviewMetrics) @@ -265,11 +348,19 @@ def parseArgs(): "will be a key value dictionary with " "_ to aggregated " "time value. If multiple aggregations are " - "requested, the output yaml format will be" + "requested, the output yaml format will be " ": {: , :...}." "When no aggregation is set, the output format " "will be : [, , ...] or " "one measured value for each iteration.") + parser.add_argument("-t", "--tracedir", type=str, + help="Outputs a trace file for each run of usdview in " + "the given directory if provided and if " + "'aggregation' includes min or max. A trace file " + "for the iteration of testusdview or usdview " + "from which the aggregated value of each metric " + "was observed will be output in the form " + "_.trace") args = parser.parse_args() @@ -288,6 +379,17 @@ def parseArgs(): if args.aggregation and args.iterations == 1: print(f"WARNING: aggregation {args.aggregation} is set but " "iterations is 1") + + aggs = args.aggregation + if args.tracedir and ("min" in aggs or "max" in aggs): + if not os.path.exists(args.tracedir): + os.makedirs(args.tracedir, exist_ok=True) + print(f"Created trace output directory {args.tracedir}") + + print(f"Trace files will be output to the '{args.tracedir}' dir") + else: + print("Trace files will not be output, missing --tracedir ", + "or --aggregation that includes min or max") return args @@ -298,11 +400,11 @@ def main(): warmCache(args.asset) metricsList = [] - for _ in range(args.iterations): - metrics = run(args.asset, customMetrics) + for i in range(args.iterations): + metrics = run(args.asset, customMetrics, args.tracedir, i) metricsList.append(metrics) - export(metricsList, args.output, args.aggregation) + export(metricsList, args.output, args.aggregation, args.tracedir) if __name__ == "__main__": diff --git a/extras/usd/examples/usdResolverExample/CMakeLists.txt b/extras/usd/examples/usdResolverExample/CMakeLists.txt index 3f02942277..437a3a9f4f 100644 --- a/extras/usd/examples/usdResolverExample/CMakeLists.txt +++ b/extras/usd/examples/usdResolverExample/CMakeLists.txt @@ -16,9 +16,6 @@ pxr_plugin(${PXR_PACKAGE} PUBLIC_HEADERS api.h - PYTHON_CPPFILES - moduleDeps.cpp - PYMODULE_CPPFILES module.cpp wrapResolverContext.cpp diff --git a/extras/usd/examples/usdResolverExample/moduleDeps.cpp b/extras/usd/examples/usdResolverExample/moduleDeps.cpp deleted file mode 100644 index 72d28a6125..0000000000 --- a/extras/usd/examples/usdResolverExample/moduleDeps.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright 2016 Pixar -// -// Licensed under the terms set forth in the LICENSE.txt file available at -// https://openusd.org/license. -// -//////////////////////////////////////////////////////////////////////// - -#include "pxr/pxr.h" -#include "pxr/base/tf/registryManager.h" -#include "pxr/base/tf/scriptModuleLoader.h" -#include "pxr/base/tf/token.h" - -#include - -PXR_NAMESPACE_OPEN_SCOPE - -TF_REGISTRY_FUNCTION(TfScriptModuleLoader) { - // List of direct dependencies for this library. - const std::vector reqs = { - TfToken("ar"), - TfToken("arch"), - TfToken("js"), - TfToken("tf"), - TfToken("vt") - }; - TfScriptModuleLoader::GetInstance(). - RegisterLibrary(TfToken("usdResolverExample"), TfToken("pxr.UsdResolverExample"), reqs); -} - -PXR_NAMESPACE_CLOSE_SCOPE - - diff --git a/extras/usd/examples/usdSchemaExamples/CMakeLists.txt b/extras/usd/examples/usdSchemaExamples/CMakeLists.txt index c19bf3e204..7ee863ea00 100644 --- a/extras/usd/examples/usdSchemaExamples/CMakeLists.txt +++ b/extras/usd/examples/usdSchemaExamples/CMakeLists.txt @@ -16,9 +16,6 @@ pxr_plugin(${PXR_PACKAGE} PUBLIC_HEADERS api.h - PYTHON_CPPFILES - moduleDeps.cpp - PYMODULE_FILES __init__.py ) diff --git a/extras/usd/examples/usdSchemaExamples/moduleDeps.cpp b/extras/usd/examples/usdSchemaExamples/moduleDeps.cpp deleted file mode 100644 index 7dd4e51cd6..0000000000 --- a/extras/usd/examples/usdSchemaExamples/moduleDeps.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// -// Copyright 2016 Pixar -// -// Licensed under the terms set forth in the LICENSE.txt file available at -// https://openusd.org/license. -// -//////////////////////////////////////////////////////////////////////// - -#include "pxr/pxr.h" -#include "pxr/base/tf/registryManager.h" -#include "pxr/base/tf/scriptModuleLoader.h" -#include "pxr/base/tf/token.h" - -#include - -PXR_NAMESPACE_OPEN_SCOPE - -TF_REGISTRY_FUNCTION(TfScriptModuleLoader) { - // List of direct dependencies for this library. - const std::vector reqs = { - TfToken("sdf"), - TfToken("tf"), - TfToken("usd"), - TfToken("vt") - }; - TfScriptModuleLoader::GetInstance(). - RegisterLibrary(TfToken("usdSchemaExamples"), TfToken("pxr.UsdSchemaExamples"), reqs); -} - -PXR_NAMESPACE_CLOSE_SCOPE - - diff --git a/extras/usd/examples/usdSemanticsExamples/bookshelf.usda b/extras/usd/examples/usdSemanticsExamples/bookshelf.usda new file mode 100644 index 0000000000..d06aa38e23 --- /dev/null +++ b/extras/usd/examples/usdSemanticsExamples/bookshelf.usda @@ -0,0 +1,164 @@ +#usda 1.0 +( + doc = "This layer represents an example of using UsdSemantics" + upAxis = "Y" + startTimeCode = 0 + endTimeCode = 200 +) + +def Xform "Bookshelf" ( + apiSchemas = ["SemanticsLabelsAPI:category", "SemanticsLabelsAPI:style"] +) +{ + token[] semantics:labels:category = ["furniture", "cabinet"] + token[] semantics:labels:style = ["modern", "chic"] + double3 xformOp:translate = (0, 100.87384788680441, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + + def Xform "Shelves" ( + apiSchemas = ["SemanticsLabelsAPI:category"] + ) + { + token[] semantics:labels:category = ["horizontal shelving"] + double3 xformOp:translate = (113, 0, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + + def Cube "shelf_geo_01" ( + apiSchemas = ["MaterialBindingAPI", "SemanticsLabelsAPI:state"] + ) + { + token[] semantics:labels:state.timeSamples = { + 0: ["empty"], + 50: ["partially full"], + 100: ["full"] + } + float3[] extent = [(-50, -50, -50), (50, 50, 50)] + rel material:binding = ( + bindMaterialAs = "weakerThanDescendants" + ) + double size = 100 + double3 xformOp:scale = (0.9999999999999999, 0.0773693394230031, 0.7625704657327257) + double3 xformOp:translate = (-113, 70.05470741643151, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + } + + def Cube "shelf_geo_02" ( + apiSchemas = ["MaterialBindingAPI", "SemanticsLabelsAPI:state"] + ) + { + token[] semantics:labels:state = ["partially full"] + float3[] extent = [(-50, -50, -50), (50, 50, 50)] + rel material:binding = ( + bindMaterialAs = "weakerThanDescendants" + ) + double size = 100 + double3 xformOp:scale = (0.9999999999999999, 0.0773693394230031, 0.7625704657327257) + double3 xformOp:translate = (-113, 28.205436432969066, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + } + + def Cube "shelf_geo_03" ( + apiSchemas = ["MaterialBindingAPI", "SemanticsLabelsAPI:state"] + ) + { + token[] semantics:labels:state = ["full"] + float3[] extent = [(-50, -50, -50), (50, 50, 50)] + rel material:binding = ( + bindMaterialAs = "weakerThanDescendants" + ) + double size = 100 + double3 xformOp:scale = (0.9999999999999999, 0.0773693394230031, 0.7625704657327257) + double3 xformOp:translate = (-113, -14.418843141218574, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + } + + def Cube "shelf_geo_04" ( + apiSchemas = ["MaterialBindingAPI"] + ) + { + float3[] extent = [(-50, -50, -50), (50, 50, 50)] + rel material:binding = ( + bindMaterialAs = "weakerThanDescendants" + ) + double size = 100 + double3 xformOp:scale = (0.9999999999999999, 0.0773693394230031, 0.7625704657327257) + double3 xformOp:translate = (-113, -57.934833429151425, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + } + + def Cube "shelf_geo_05" ( + apiSchemas = ["MaterialBindingAPI"] + ) + { + float3[] extent = [(-50, -50, -50), (50, 50, 50)] + rel material:binding = ( + bindMaterialAs = "weakerThanDescendants" + ) + double size = 100 + double3 xformOp:scale = (0.9999999999999999, 0.0773693394230031, 0.7625704657327257) + double3 xformOp:translate = (-113, -99.27174085182759, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + } + } + + def Cube "bookshelf_geo" ( + apiSchemas = ["MaterialBindingAPI"] + ) + { + float3[] extent = [(-50, -50, -50), (50, 50, 50)] + rel material:binding = ( + bindMaterialAs = "weakerThanDescendants" + ) + double size = 100 + double3 xformOp:scale = (1.0484495248498045, 1.991987664077115, 0.13958063483880567) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] + } +} + +def Scope "looks" +{ + def Material "wood" ( + apiSchemas = ["SemanticsLabelsAPI:material"] + ) + { + token[] semantics:labels:material = ["wood", "oak", "American White Oak"] + token outputs:displacement.connect = + token outputs:surface.connect = + + def Shader "Shader" + { + uniform token info:id = "UsdPreviewSurface" + float inputs:clearcoat = 0 + float inputs:clearcoatRoughness = 0.01 + color3f inputs:diffuseColor = (0.5145, 0.3639, 0.229) ( + renderType = "color" + ) + float inputs:displacement = 0 + color3f inputs:emissiveColor = (0, 0, 0) ( + renderType = "color" + ) + float inputs:ior = 1.5 + float inputs:metallic = 0 + normal3f inputs:normal = (0, 0, 1) + float inputs:occlusion = 1 + float inputs:opacity = 1 + float inputs:opacityThreshold = 0 ( + connectability = "interfaceOnly" + ) + float inputs:roughness = 0.5 + color3f inputs:specularColor = (0, 0, 0) ( + renderType = "color" + ) + int inputs:useSpecularWorkflow = 0 ( + connectability = "interfaceOnly" + ) + token outputs:displacement ( + renderType = "material" + ) + token outputs:surface ( + renderType = "material" + ) + } + } +} + diff --git a/extras/usd/examples/usdviewPlugins/sendMail.py b/extras/usd/examples/usdviewPlugins/sendMail.py index f06d85473c..b36a21b466 100644 --- a/extras/usd/examples/usdviewPlugins/sendMail.py +++ b/extras/usd/examples/usdviewPlugins/sendMail.py @@ -194,7 +194,7 @@ def _GetSendMailInfo(usdviewApi, allowScreenCapSel): # set field defaults dialog.emailInfo = _GenerateDefaultInfo(usdviewApi, dialog) - dialog.setMinimumWidth((window.size().width()/2)) + dialog.setMinimumWidth(window.size().width()//2) # add layout to dialog and launch dialog.setLayout(_GenerateLayout(dialog, allowScreenCapSel)) diff --git a/extras/usd/tutorials/animatedTop/top.geom.usd b/extras/usd/tutorials/animatedTop/top.geom.usd index 601f026d2b..f4b3b4968e 100644 Binary files a/extras/usd/tutorials/animatedTop/top.geom.usd and b/extras/usd/tutorials/animatedTop/top.geom.usd differ diff --git a/extras/usd/tutorials/simpleShading/generate_simpleShading.py b/extras/usd/tutorials/simpleShading/generate_simpleShading.py index 1c7cdae743..1dc9159747 100644 --- a/extras/usd/tutorials/simpleShading/generate_simpleShading.py +++ b/extras/usd/tutorials/simpleShading/generate_simpleShading.py @@ -32,7 +32,7 @@ texCoords = UsdGeom.PrimvarsAPI(billboard).CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying) -texCoords.Set([(0, 0), (2, 0), (2,2), (0, 2)]) +texCoords.Set([(0, 0), (1, 0), (1,1), (0, 1)]) # Now make a Material that contains a PBR preview surface, a texture reader, # and a primvar reader to fetch the texture coordinate from the geometry diff --git a/extras/usd/tutorials/simpleShading/simpleShading.usda b/extras/usd/tutorials/simpleShading/simpleShading.usda index 0582102349..77b8cf75f6 100644 --- a/extras/usd/tutorials/simpleShading/simpleShading.usda +++ b/extras/usd/tutorials/simpleShading/simpleShading.usda @@ -16,7 +16,7 @@ def Xform "TexModel" ( int[] faceVertexIndices = [0, 1, 2, 3] rel material:binding = point3f[] points = [(-430, -145, 0), (430, -145, 0), (430, 145, 0), (-430, 145, 0)] - texCoord2f[] primvars:st = [(0, 0), (2, 0), (2, 2), (0, 2)] ( + texCoord2f[] primvars:st = [(0, 0), (1, 0), (1, 1), (0, 1)] ( interpolation = "varying" ) } diff --git a/pxr/CMakeLists.txt b/pxr/CMakeLists.txt index 4c7301b871..633830b047 100644 --- a/pxr/CMakeLists.txt +++ b/pxr/CMakeLists.txt @@ -8,6 +8,10 @@ if (EXISTS "${PROJECT_SOURCE_DIR}/pxr/exec") add_subdirectory(exec) endif() +if (${PXR_BUILD_USD_VALIDATION}) + add_subdirectory(usdValidation) +endif() + if (${PXR_BUILD_IMAGING}) add_subdirectory(imaging) if (${PXR_BUILD_USD_IMAGING}) diff --git a/pxr/base/arch/attributes.h b/pxr/base/arch/attributes.h index 1d2820d1e7..42f9a6e875 100644 --- a/pxr/base/arch/attributes.h +++ b/pxr/base/arch/attributes.h @@ -159,6 +159,7 @@ PXR_NAMESPACE_OPEN_SCOPE # define ARCH_SCANF_FUNCTION(_fmt, _firstArg) \ __attribute__((format(scanf, _fmt, _firstArg))) # define ARCH_NOINLINE __attribute__((noinline)) +# define ARCH_ALWAYS_INLINE __attribute__((always_inline)) # define ARCH_UNUSED_ARG __attribute__ ((unused)) # define ARCH_UNUSED_FUNCTION __attribute__((unused)) # define ARCH_USED_FUNCTION __attribute__((used)) @@ -169,6 +170,7 @@ PXR_NAMESPACE_OPEN_SCOPE # define ARCH_PRINTF_FUNCTION(_fmt, _firstArg) # define ARCH_SCANF_FUNCTION(_fmt, _firstArg) # define ARCH_NOINLINE // __declspec(noinline) +# define ARCH_ALWAYS_INLINE # define ARCH_UNUSED_ARG # define ARCH_UNUSED_FUNCTION # define ARCH_USED_FUNCTION diff --git a/pxr/base/arch/debugger.cpp b/pxr/base/arch/debugger.cpp index e1596b3ac9..a25a4c5983 100644 --- a/pxr/base/arch/debugger.cpp +++ b/pxr/base/arch/debugger.cpp @@ -22,14 +22,18 @@ #endif #include #include +#include #include #include #include #include #include #include +#include #include #include +#include +#include #endif #if defined(ARCH_OS_DARWIN) #include @@ -327,50 +331,61 @@ Arch_DebuggerAttachExecPosix(void* data) #if defined(ARCH_OS_LINUX) +// Reads /proc/self/status, finds the line starting with "field:", and +// returns the portion following the ":". +// Note that the returned string will generally include leading whitespace static -bool -Arch_DebuggerIsAttachedPosix() +std::string Arch_ReadProcStatusField(const std::string_view field) { - // Check for a ptrace based debugger by trying to ptrace. - pid_t parent = getpid(); - pid_t pid = nonLockingFork(); - if (pid < 0) { - // fork failed. We'll guess there's no debugger. - return false; + std::ifstream procStatusFile("/proc/self/status"); + if (!procStatusFile) { + ARCH_WARNING("Unable to open /proc/self/status"); + return std::string(); } - - // Child process. - if (pid == 0) { - // Attach to the parent with ptrace() this will fail if the - // parent is already being traced. - if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) == -1) { - // A debugger is probably attached if the error is EPERM. - _exit(errno == EPERM ? 1 : 0); + for (std::string line; std::getline(procStatusFile, line);) { + // the field needs to start with the given fieldLen AND the ':' char + if (line.size() < field.size() + 1) { + continue; } - // Wait for the parent to stop as a result of the attach. - int status; - while (waitpid(parent, &status, 0) == -1 && errno == EINTR) { - // Do nothing + if (line.compare(0, field.size(), field) == 0 && + line[field.size()] == ':') { + // We found our "field:" line + return line.substr(field.size() + 1); } + } - // Detach and continue the parent. - ptrace(PTRACE_DETACH, parent, 0, SIGCONT); + ARCH_WARNING((std::string("Unable to find given field in " + "/proc/self/status: ") += field).c_str()); + return std::string(); +} - // A debugger was not attached. - _exit(0); - } +// Reads the "TracerPid:" field from /proc/self/status +// Returns a result < 0 if there was an error. +static +int Arch_ReadTracerPid() { - // Parent process - int status; - while (waitpid(pid, &status, 0) == -1 && errno == EINTR) { - // Do nothing - } - if (WIFEXITED(status)) { - return WEXITSTATUS(status) != 0; + const std::string field = Arch_ReadProcStatusField("TracerPid"); + + // Trim any leading spaces or tabs in a locale-independent way. + char const *b = field.c_str(); + char const * const e = field.c_str() + field.size(); + while (b != e && (*b == '\t' || *b == ' ')) { + ++b; } - return false; + // Try to convert to int. + int tracerPid = 0; + auto [ptr, err] = std::from_chars(b, e, tracerPid); + + return err == std::errc() ? tracerPid : -1; +} + +static +bool +Arch_DebuggerIsAttachedPosix() +{ + return Arch_ReadTracerPid() > 0; } #elif defined(ARCH_OS_DARWIN) diff --git a/pxr/base/arch/fileSystem.cpp b/pxr/base/arch/fileSystem.cpp index 50ce636cb3..b8195c2b3b 100644 --- a/pxr/base/arch/fileSystem.cpp +++ b/pxr/base/arch/fileSystem.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -532,24 +533,35 @@ ArchGetFileName(FILE *file) } return result; #elif defined (ARCH_OS_WINDOWS) - static constexpr DWORD bufSize = - sizeof(FILE_NAME_INFO) + sizeof(WCHAR) * 4096; - HANDLE hfile = _FileToWinHANDLE(file); - auto fileNameInfo = reinterpret_cast(malloc(bufSize)); string result; - if (GetFileInformationByHandleEx( - hfile, FileNameInfo, static_cast(fileNameInfo), bufSize)) { + std::vector filePath(MAX_PATH); + HANDLE hfile = _FileToWinHANDLE(file); + DWORD dwSize = GetFinalPathNameByHandleW(hfile, filePath.data(), MAX_PATH, VOLUME_NAME_DOS); + // * dwSize == 0. Fail. + // * dwSize < MAX_PATH. Success, and dwSize returns the size without null terminator. + // * dwSize >= MAX_PATH. Buffer is too small, and dwSize returns the size with null terminator. + if (dwSize >= MAX_PATH) { + filePath.resize(dwSize); + dwSize = GetFinalPathNameByHandleW(hfile, filePath.data(), dwSize, VOLUME_NAME_DOS); + } + + if (dwSize != 0) { size_t outSize = WideCharToMultiByte( - CP_UTF8, 0, fileNameInfo->FileName, - fileNameInfo->FileNameLength/sizeof(WCHAR), + CP_UTF8, 0, filePath.data(), + dwSize, NULL, 0, NULL, NULL); result.resize(outSize); WideCharToMultiByte( - CP_UTF8, 0, fileNameInfo->FileName, - fileNameInfo->FileNameLength/sizeof(WCHAR), + CP_UTF8, 0, filePath.data(), + -1, &result.front(), outSize, NULL, NULL); + + // Strip path prefix if necessary. + // See https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats + // for format of DOS device paths. + auto canonicalPath = std::filesystem::canonical(result); + result = canonicalPath.string(); } - free(fileNameInfo); return result; #else #error Unknown system architecture diff --git a/pxr/base/arch/mallocHook.cpp b/pxr/base/arch/mallocHook.cpp index 6679414543..a03724dbc4 100644 --- a/pxr/base/arch/mallocHook.cpp +++ b/pxr/base/arch/mallocHook.cpp @@ -15,6 +15,7 @@ # include #endif #include +#include #if defined(ARCH_OS_IPHONE) #elif defined(ARCH_OS_DARWIN) @@ -25,10 +26,16 @@ PXR_NAMESPACE_OPEN_SCOPE -// Malloc hooks were removed in glibc 2.34. -#if defined(ARCH_OS_LINUX) && \ +// The old glibc malloc hooks were removed in glibc 2.34. So we only +// auto-enable support for the ArchMallocHook if we're compiling on that version +// or older. If you have a different allocator that supports the old hook +// variables, you can -DPXR_ARCH_SUPPORT_MALLOC_HOOKS when compiling to manually +// enable support. This code will then try to dlsym() the hooks at runtime, +// when initialized. +#if !defined(PXR_ARCH_SUPPORT_MALLOC_HOOKS) && \ + defined(ARCH_OS_LINUX) && \ defined(__GLIBC__) && __GLIBC__ <= 2 && __GLIBC_MINOR__ < 34 -#define MALLOC_HOOKS_AVAILABLE +#define PXR_ARCH_SUPPORT_MALLOC_HOOKS #endif using std::string; @@ -57,8 +64,7 @@ using std::string; * Note that support for non-linux and non-64 bit platforms is not provided. */ - -#ifdef MALLOC_HOOKS_AVAILABLE +#ifdef PXR_ARCH_SUPPORT_MALLOC_HOOKS /* * These are hook variables (they're not functions, so they don't need @@ -70,22 +76,19 @@ using std::string; # define __MALLOC_HOOK_VOLATILE #endif /* !defined(__MALLOC_HOOK_VOLATILE) */ -PXR_NAMESPACE_CLOSE_SCOPE - -extern void* -(*__MALLOC_HOOK_VOLATILE __malloc_hook)( - size_t __size, const void*); -extern void* -(*__MALLOC_HOOK_VOLATILE __realloc_hook)( - void* __ptr, size_t __size, const void*); -extern void* -(*__MALLOC_HOOK_VOLATILE __memalign_hook)( - size_t __alignment, size_t __size, const void*); -extern void -(*__MALLOC_HOOK_VOLATILE __free_hook)( - void* __ptr, const void*); +#define _MHV __MALLOC_HOOK_VOLATILE +using MallocHookPtr = void *(*_MHV)(size_t, const void*); +using ReallocHookPtr = void *(*_MHV)(void*, size_t, const void*); +using MemalignHookPtr = void *(*_MHV)(size_t, size_t, const void*); +using FreeHookPtr = void (*_MHV)(void*, const void*); +#undef _MHV -PXR_NAMESPACE_OPEN_SCOPE +// These are pointers to the function pointer hook variables. We look these up +// via dlsym(). +static MallocHookPtr *mallocHookPtr; +static ReallocHookPtr *reallocHookPtr; +static MemalignHookPtr *memalignHookPtr; +static FreeHookPtr *freeHookPtr; template static bool _GetSymbol(T* addr, const char* name, string* errMsg) { @@ -156,14 +159,6 @@ ArchIsJemallocActive() return _MallocProvidedBySameLibraryAs("__jemalloc_malloc", skipMallocCheck); } -static bool -_MallocHookAvailable() -{ - return (ArchIsPxmallocActive() || - ArchIsPtmallocActive() || - ArchIsJemallocActive()); -} - struct Arch_MallocFunctionNames { const char* mallocFn; @@ -198,12 +193,12 @@ _GetUnderlyingMallocFunctionNames() return names; } -#endif // MALLOC_HOOKS_AVAILABLE +#endif // PXR_ARCH_SUPPORT_MALLOC_HOOKS bool ArchIsPtmallocActive() { -#ifdef MALLOC_HOOKS_AVAILABLE +#ifdef PXR_ARCH_SUPPORT_MALLOC_HOOKS const std::string impl = ArchGetEnv("TF_MALLOC_TAG_IMPL"); if (!_CheckMallocTagImpl(impl, "ptmalloc")) { return false; @@ -255,8 +250,8 @@ ArchMallocHook::Initialize( #if !defined(ARCH_OS_LINUX) *errMsg = "ArchMallocHook only available for Linux/glibc systems"; return false; -#elif !defined(MALLOC_HOOKS_AVAILABLE) - *errMsg = "C library does not provide malloc hooks"; +#elif !defined(PXR_ARCH_SUPPORT_MALLOC_HOOKS) + *errMsg = "ArchMallocHook support disabled at compile time"; return false; #else @@ -265,9 +260,22 @@ ArchMallocHook::Initialize( return false; } - if (!_MallocHookAvailable()) { - *errMsg = - "ArchMallocHook functionality not available for current allocator"; + const bool allocatorSupportsHooks = + (ArchIsPxmallocActive() || + ArchIsPtmallocActive() || + ArchIsJemallocActive()) && + _GetSymbol(&mallocHookPtr, "__malloc_hook", errMsg) && + _GetSymbol(&reallocHookPtr, "__realloc_hook", errMsg) && + _GetSymbol(&memalignHookPtr, "__memalign_hook", errMsg) && + _GetSymbol(&freeHookPtr, "__free_hook", errMsg); + + if (!allocatorSupportsHooks) { + std::string prevErr = std::exchange( + *errMsg, "ArchMallocHook functionality not available for " + "current allocator"); + if (!prevErr.empty()) { + *errMsg += ": " + prevErr; + } return false; } @@ -286,18 +294,16 @@ ArchMallocHook::Initialize( // the system (glibc) malloc symbols instead of the custom allocator's // (jemalloc's). Pixar's pxmalloc wrapper does the same, for the same // reason. - if ((__malloc_hook && - __malloc_hook != reinterpret_cast(malloc)) || - (__realloc_hook && - __realloc_hook != reinterpret_cast(realloc)) || - (__memalign_hook && - __memalign_hook != reinterpret_cast(memalign)) || - (__free_hook && - __free_hook != reinterpret_cast(free))) { + auto toVoidP = [](auto x) { return reinterpret_cast(x); }; + + if ((mallocHookPtr && *mallocHookPtr != toVoidP(malloc)) || + (reallocHookPtr && *reallocHookPtr != toVoidP(realloc)) || + (memalignHookPtr && *memalignHookPtr != toVoidP(memalign)) || + (freeHookPtr && *freeHookPtr != toVoidP(free))) { *errMsg = - "One or more malloc/realloc/free hook variables are already set.\n" - "This probably means another entity in the program is trying to\n" - "do its own profiling, pre-empting yours."; + "One or more malloc/realloc/free hook variables are already set. " + "This probably means another entity in the program is trying to " + "do its own profiling, preempting yours."; return false; } @@ -314,17 +320,16 @@ ArchMallocHook::Initialize( return false; } - if (mallocWrapper) - __malloc_hook = mallocWrapper; - - if (reallocWrapper) - __realloc_hook = reallocWrapper; - - if (memalignWrapper) - __memalign_hook = memalignWrapper; + auto setIf = [](auto hookPtr, auto wrapper) { + if (wrapper) { + *hookPtr = wrapper; + } + }; - if (freeWrapper) - __free_hook = freeWrapper; + setIf(mallocHookPtr, mallocWrapper); + setIf(reallocHookPtr, reallocWrapper); + setIf(memalignHookPtr, memalignWrapper); + setIf(freeHookPtr, freeWrapper); return true; #endif diff --git a/pxr/base/arch/pragmas.h b/pxr/base/arch/pragmas.h index 1d15ab7434..39af797a66 100644 --- a/pxr/base/arch/pragmas.h +++ b/pxr/base/arch/pragmas.h @@ -48,6 +48,9 @@ #define ARCH_PRAGMA_UNUSED_FUNCTION \ _Pragma("GCC diagnostic ignored \"-Wunused-function\"") + #define ARCH_PRAGMA_STRINGOP_OVERFLOW \ + _Pragma("GCC diagnostic ignored \"-Wstringop-overflow=\"") + #elif defined(ARCH_COMPILER_CLANG) #define ARCH_PRAGMA_PUSH \ @@ -181,6 +184,10 @@ #define ARCH_PRAGMA_UNUSED_FUNCTION #endif +#if !defined ARCH_PRAGMA_STRINGOP_OVERFLOW + #define ARCH_PRAGMA_STRINGOP_OVERFLOW +#endif + #if !defined ARCH_PRAGMA_UNUSED_PRIVATE_FIELD #define ARCH_PRAGMA_UNUSED_PRIVATE_FIELD #endif diff --git a/pxr/base/arch/testenv/testDemangle.cpp b/pxr/base/arch/testenv/testDemangle.cpp index c94d9915f7..03e287b8d8 100644 --- a/pxr/base/arch/testenv/testDemangle.cpp +++ b/pxr/base/arch/testenv/testDemangle.cpp @@ -38,6 +38,37 @@ typedef Mangled Remangled; enum MangleEnum { ONE, TWO, THREE }; +static std::string +GetAlternativeTemplateTypename(std::string typeName) +{ + // Since C++11, the parser specification has been improved to be + // able to interpret successive right angle brackets in nested template + // declarations. The implementation of the C++ ABI has been updated + // accordingly on some systems, e.g. starting with Clang 14 on macOS 13.3. + // We accept a demangled result without additional white space between + // successive right angle brackets. + + const std::string oldStyle = "> >"; + const std::string newStyle = ">>"; + + std::string::size_type pos = 0; + while ((pos = typeName.find(oldStyle, pos)) != std::string::npos) { + typeName.replace(pos, oldStyle.size(), newStyle); + pos += newStyle.size() - 1; + } + + printf("\texpected alternative: '%s'\n", typeName.c_str()); + + return typeName; +} + +static bool +TypeNamesMatch(const std::string& demangledName, const std::string& expected) +{ + return (demangledName == expected) || + (demangledName == GetAlternativeTemplateTypename(expected)); +} + template static bool TestDemangle(const std::string& typeName) @@ -51,10 +82,10 @@ TestDemangle(const std::string& typeName) printf("ArchDemangle('%s') => '%s', expected '%s'\n", mangledName.c_str(), toBeDemangledName.c_str(), typeName.c_str()); - ARCH_AXIOM(toBeDemangledName == typeName); - ARCH_AXIOM(ArchGetDemangled(mangledName) == typeName); - ARCH_AXIOM(ArchGetDemangled(typeInfo) == typeName); - ARCH_AXIOM(ArchGetDemangled() == typeName); + ARCH_AXIOM(TypeNamesMatch(toBeDemangledName, typeName)); + ARCH_AXIOM(TypeNamesMatch(ArchGetDemangled(mangledName), typeName)); + ARCH_AXIOM(TypeNamesMatch(ArchGetDemangled(typeInfo), typeName)); + ARCH_AXIOM(TypeNamesMatch(ArchGetDemangled(), typeName)); return true; } @@ -82,20 +113,10 @@ int main() TestDemangle("unsigned long"); TestDemangle >("MangledAlso"); - // Since C++11, the parser specification has been improved to be able - // to interpret multiple right angle brackets in nested template - // declarations. The implementation of the C++ ABI has been updated - // accordingly starting with Clang 14 on macOS 13.3 -#if defined(MAC_OS_VERSION_13_3) - const bool improvedAngleBracketDemangling = true; -#else - const bool improvedAngleBracketDemangling = false; -#endif - const char* const nestedTemplateTypeName = - improvedAngleBracketDemangling - ? "MangledAlso>" - : "MangledAlso >"; - TestDemangle > >(nestedTemplateTypeName); + TestDemangle > >( + "MangledAlso >"); + TestDemangle > > >( + "MangledAlso > >"); const char* const badType = "type_that_doesnt_exist"; #if defined(ARCH_OS_WINDOWS) diff --git a/pxr/base/arch/testenv/testFileSystem.cpp b/pxr/base/arch/testenv/testFileSystem.cpp index bbd790699d..b078621a75 100644 --- a/pxr/base/arch/testenv/testFileSystem.cpp +++ b/pxr/base/arch/testenv/testFileSystem.cpp @@ -14,6 +14,7 @@ #include #include #include +#include PXR_NAMESPACE_USING_DIRECTIVE @@ -92,6 +93,12 @@ int main() fclose(firstFile); ARCH_AXIOM(ArchGetFileLength(firstName.c_str()) == strlen(testContent)); + // Open a file, check that the file path from FILE* handle is matched. + ARCH_AXIOM((firstFile = ArchOpenFile(firstName.c_str(), "rb")) != NULL); + std::string filePath = ArchGetFileName(firstFile); + ARCH_AXIOM(std::filesystem::equivalent(filePath, firstName)); + fclose(firstFile); + // Map the file and assert the bytes are what we expect they are. ARCH_AXIOM((firstFile = ArchOpenFile(firstName.c_str(), "rb")) != NULL); ArchConstFileMapping cfm = ArchMapFileReadOnly(firstFile); diff --git a/pxr/base/gf/CMakeLists.txt b/pxr/base/gf/CMakeLists.txt index aa515ab818..465b8f1aa5 100644 --- a/pxr/base/gf/CMakeLists.txt +++ b/pxr/base/gf/CMakeLists.txt @@ -88,9 +88,6 @@ pxr_library(gf ilmbase_toFloat.h nc/nanocolor.h - PYTHON_CPPFILES - moduleDeps.cpp - PYMODULE_CPPFILES module.cpp wrapBBox3d.cpp diff --git a/pxr/base/gf/wrapColorSpace.cpp b/pxr/base/gf/wrapColorSpace.cpp index 902ae826af..5d16343839 100644 --- a/pxr/base/gf/wrapColorSpace.cpp +++ b/pxr/base/gf/wrapColorSpace.cpp @@ -29,6 +29,16 @@ static std::string __repr__(GfColorSpace const &self) TfStringPrintf("ColorSpace(%s)", TfPyRepr(self.GetName()).c_str()); } +pxr_boost::python::tuple ConvertPrimariesAndWhitePoint(const GfColorSpace& self) { + auto result = self.GetPrimariesAndWhitePoint(); + return pxr_boost::python::make_tuple( + std::get<0>(result), + std::get<1>(result), + std::get<2>(result), + std::get<3>(result) + ); +} + } // anon void wrapColorSpace() @@ -50,7 +60,8 @@ void wrapColorSpace() .def("GetGamma", &GfColorSpace::GetGamma) .def("GetLinearBias", &GfColorSpace::GetLinearBias) .def("GetTransferFunctionParams", &GfColorSpace::GetTransferFunctionParams) - .def("GetPrimariesAndWhitePoint", &GfColorSpace::GetPrimariesAndWhitePoint) + .def("GetPrimariesAndWhitePoint", &ConvertPrimariesAndWhitePoint) + .def("IsValid", &GfColorSpace::IsValid) .def(self == self) .def(self != self); diff --git a/pxr/base/plug/CMakeLists.txt b/pxr/base/plug/CMakeLists.txt index 6d4c74ce0a..5693921352 100644 --- a/pxr/base/plug/CMakeLists.txt +++ b/pxr/base/plug/CMakeLists.txt @@ -32,9 +32,6 @@ pxr_library(plug CPPFILES initConfig.cpp - PYTHON_CPPFILES - moduleDeps.cpp - PYMODULE_CPPFILES module.cpp wrapNotice.cpp diff --git a/pxr/base/plug/moduleDeps.cpp b/pxr/base/plug/moduleDeps.cpp deleted file mode 100644 index 3e08febc6e..0000000000 --- a/pxr/base/plug/moduleDeps.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright 2016 Pixar -// -// Licensed under the terms set forth in the LICENSE.txt file available at -// https://openusd.org/license. -// -//////////////////////////////////////////////////////////////////////// - -#include "pxr/pxr.h" -#include "pxr/base/tf/registryManager.h" -#include "pxr/base/tf/scriptModuleLoader.h" -#include "pxr/base/tf/token.h" - -#include - -PXR_NAMESPACE_OPEN_SCOPE - -TF_REGISTRY_FUNCTION(TfScriptModuleLoader) { - // List of direct dependencies for this library. - const std::vector reqs = { - TfToken("arch"), - TfToken("js"), - TfToken("tf"), - TfToken("trace"), - TfToken("work") - }; - TfScriptModuleLoader::GetInstance(). - RegisterLibrary(TfToken("plug"), TfToken("pxr.Plug"), reqs); -} - -PXR_NAMESPACE_CLOSE_SCOPE - - diff --git a/pxr/base/plug/testPlug.py b/pxr/base/plug/testPlug.py index ba8feed7da..578e9cc62e 100644 --- a/pxr/base/plug/testPlug.py +++ b/pxr/base/plug/testPlug.py @@ -327,5 +327,10 @@ def test_DebugCodeExistence(self): self.assertTrue('PLUG_INFO_SEARCH' in debugCodes) self.assertTrue('PLUG_REGISTRATION' in debugCodes) + def test_StlSequencesForPlugPluginPtr(self): + pd1 = Plug.Registry().GetPluginForType('TestPlugDerived1') + td1 = Plug._TestPlugBase1('TestPlugDerived1') + self.assertTrue(td1.TestAcceptPluginSequence([pd1])) + if __name__ == '__main__': unittest.main() diff --git a/pxr/base/plug/testPlugBase.h b/pxr/base/plug/testPlugBase.h index 9b17ae5aaa..2bd210cd69 100644 --- a/pxr/base/plug/testPlugBase.h +++ b/pxr/base/plug/testPlugBase.h @@ -13,6 +13,7 @@ #include "pxr/base/tf/stringUtils.h" #include "pxr/base/tf/type.h" #include "pxr/base/tf/weakBase.h" +#include "pxr/base/plug/plugin.h" #include @@ -36,6 +37,10 @@ class _TestPlugBase : public TfRefBase, public TfWeakBase { return TfCreateRefPtr(new This()); } + bool TestAcceptPluginSequence(const PlugPluginPtrVector &plugins) { + return true; + } + PLUG_API static RefPtr Manufacture(const std::string & subclass); diff --git a/pxr/base/plug/wrapPlugin.cpp b/pxr/base/plug/wrapPlugin.cpp index 07929dbc5b..2c87ed069a 100644 --- a/pxr/base/plug/wrapPlugin.cpp +++ b/pxr/base/plug/wrapPlugin.cpp @@ -87,6 +87,7 @@ void wrapPlugin() (arg("path"), arg("verify") = true)) ; + TfPyRegisterStlSequencesFromPython(); // The call to JsConvertToContainerType in _ConvertDict creates // vectors of pxr_boost::python::objects for array values, so register diff --git a/pxr/base/plug/wrapTestPlugBase.cpp b/pxr/base/plug/wrapTestPlugBase.cpp index ea6f172535..5037ec7c7e 100644 --- a/pxr/base/plug/wrapTestPlugBase.cpp +++ b/pxr/base/plug/wrapTestPlugBase.cpp @@ -7,6 +7,8 @@ #include "pxr/pxr.h" #include "pxr/base/plug/testPlugBase.h" +#include "pxr/external/boost/python/def.hpp" +#include "pxr/external/boost/python/args.hpp" #include "pxr/base/tf/makePyConstructor.h" #include "pxr/base/tf/pyPtrHelpers.h" #include "pxr/base/tf/pyContainerConversions.h" @@ -33,6 +35,9 @@ void wrap_TestPlugBase(const std::string & name) .def("GetTypeName", &This::GetTypeName) + .def("TestAcceptPluginSequence", &This::TestAcceptPluginSequence, + (arg("plugins"))); + ; } diff --git a/pxr/base/tf/CMakeLists.txt b/pxr/base/tf/CMakeLists.txt index 306419493d..438e9af37b 100644 --- a/pxr/base/tf/CMakeLists.txt +++ b/pxr/base/tf/CMakeLists.txt @@ -291,9 +291,6 @@ pxr_library(tf pxrDoubleConversion/strtod.cc pxrLZ4/lz4.cpp - PYTHON_CPPFILES - moduleDeps.cpp - PYMODULE_CPPFILES module.cpp pyWeakObject.cpp diff --git a/pxr/base/tf/debugCodes.cpp b/pxr/base/tf/debugCodes.cpp index 936ca5c6ec..8328112627 100644 --- a/pxr/base/tf/debugCodes.cpp +++ b/pxr/base/tf/debugCodes.cpp @@ -13,8 +13,11 @@ PXR_NAMESPACE_OPEN_SCOPE TF_REGISTRY_FUNCTION(TfDebug) { TF_DEBUG_ENVIRONMENT_SYMBOL(TF_SCRIPT_MODULE_LOADER, - "show script module loading activity"); - TF_DEBUG_ENVIRONMENT_SYMBOL(TF_TYPE_REGISTRY, "show changes to the TfType registry"); + "show script module loading activity"); + TF_DEBUG_ENVIRONMENT_SYMBOL(TF_SCRIPT_MODULE_LOADER_EXTRA, + "show more script module loading activity"); + TF_DEBUG_ENVIRONMENT_SYMBOL(TF_TYPE_REGISTRY, + "show changes to the TfType registry"); TF_DEBUG_ENVIRONMENT_SYMBOL(TF_ATTACH_DEBUGGER_ON_ERROR, "attach/stop in a debugger for all errors"); TF_DEBUG_ENVIRONMENT_SYMBOL(TF_ATTACH_DEBUGGER_ON_FATAL_ERROR, diff --git a/pxr/base/tf/debugCodes.h b/pxr/base/tf/debugCodes.h index 7025b0c1a7..e6d0aa3eb8 100644 --- a/pxr/base/tf/debugCodes.h +++ b/pxr/base/tf/debugCodes.h @@ -26,6 +26,7 @@ TF_DEBUG_CODES( TF_DLCLOSE, TF_SCRIPT_MODULE_LOADER, + TF_SCRIPT_MODULE_LOADER_EXTRA, TF_TYPE_REGISTRY, diff --git a/pxr/base/tf/mallocTag.h b/pxr/base/tf/mallocTag.h index e4b326c9c0..d28522c01e 100644 --- a/pxr/base/tf/mallocTag.h +++ b/pxr/base/tf/mallocTag.h @@ -474,28 +474,28 @@ PXR_NAMESPACE_CLOSE_SCOPE #define TF_MALLOC_TAG_NEW(name1, name2) \ /* this is for STL purposes */ \ - inline void* operator new(::std::size_t, void* ptr) { \ + ARCH_ALWAYS_INLINE inline void* operator new(::std::size_t, void* ptr) { \ return ptr; \ } \ \ - inline void* operator new(::std::size_t s) { \ + ARCH_ALWAYS_INLINE inline void* operator new(::std::size_t s) { \ PXR_NS::TfAutoMallocTag tag(name1, name2); \ return malloc(s); \ } \ \ - inline void* operator new[](::std::size_t s) { \ + ARCH_ALWAYS_INLINE inline void* operator new[](::std::size_t s) { \ PXR_NS::TfAutoMallocTag tag(name1, name2); \ return malloc(s); \ } \ \ /* Required due to the placement-new override above. */ \ - inline void operator delete(void* ptr, void* place) {} \ + ARCH_ALWAYS_INLINE inline void operator delete(void* ptr, void* place) {} \ \ - inline void operator delete(void* ptr, size_t) { \ + ARCH_ALWAYS_INLINE inline void operator delete(void* ptr, size_t) { \ free(ptr); \ } \ \ - inline void operator delete[] (void* ptr, size_t) { \ + ARCH_ALWAYS_INLINE inline void operator delete[] (void* ptr, size_t) { \ free(ptr); \ } \ diff --git a/pxr/base/tf/meta.h b/pxr/base/tf/meta.h index 8cd9a4c787..6095538b9f 100644 --- a/pxr/base/tf/meta.h +++ b/pxr/base/tf/meta.h @@ -53,6 +53,25 @@ using TfMetaDecay = TfMetaList...>; template using TfMetaLength = std::integral_constant; +// Lighter-weight compile-time conditional type selection implementation. +template +struct Tf_ConditionalImpl { + template + using Type = T; +}; + +template <> +struct Tf_ConditionalImpl { + template + using Type = F; +}; + +// This is a bit lighter weight at compile time than std::conditional because it +// instantiates a separate template for the condition from the selector. +template +using TfConditionalType = + typename Tf_ConditionalImpl::template Type; + PXR_NAMESPACE_CLOSE_SCOPE #endif // PXR_BASE_TF_META_H diff --git a/pxr/base/tf/moduleDeps.cpp b/pxr/base/tf/moduleDeps.cpp deleted file mode 100644 index d750239c0a..0000000000 --- a/pxr/base/tf/moduleDeps.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// -// Copyright 2016 Pixar -// -// Licensed under the terms set forth in the LICENSE.txt file available at -// https://openusd.org/license. -// -//////////////////////////////////////////////////////////////////////// - -#include "pxr/pxr.h" -#include "pxr/base/tf/registryManager.h" -#include "pxr/base/tf/scriptModuleLoader.h" -#include "pxr/base/tf/token.h" - -#include - -PXR_NAMESPACE_OPEN_SCOPE - -TF_REGISTRY_FUNCTION(TfScriptModuleLoader) { - // List of direct dependencies for this library. - const std::vector reqs = { - TfToken("arch"), - TfToken("python") - }; - TfScriptModuleLoader::GetInstance(). - RegisterLibrary(TfToken("tf"), TfToken("pxr.Tf"), reqs); -} - -PXR_NAMESPACE_CLOSE_SCOPE - - diff --git a/pxr/base/tf/noteOverview.dox b/pxr/base/tf/noteOverview.dox index c67917c26a..f397e2610a 100644 --- a/pxr/base/tf/noteOverview.dox +++ b/pxr/base/tf/noteOverview.dox @@ -97,8 +97,17 @@ If you know that thread-safety is not a concern (that is, your listening object First, if your listening object receives more than one notice at the same time and that is a problem, you will need to add locking behavior into your object's method calls. -Second, if you are worried that your listener object might be deleted while in the middle of receiving a notice call, then you need to make your listening object support both TfRefPtr and TfWeakPtr. That is, your object must derive from TfWeakBase \e and TfRefBase (one of the rarely allowed cases on concrete multiple inheritance) and should only be accessed through these smart pointers. Additionally, you must register your listener using TfNotice::RegisterDeletionSafe(). - -The above actions furnish a guarantee against untimely deletion, because the notification center prevents a listener from being destroyed while receiving a notice, by temporarily creating a TfRefPtr pointing to the object. (In between notices, the notification center points to the listener only by a TfWeakPtr, which allows the listener to destruct when not in use). +Second, if you are worried that your listener object might be deleted while in +the middle of receiving a notice call, then you need to call +TfNotice::RevokeAndWait() in the destructor. This function revokes a +registration and waits for any running notifications to the registered method +to return. Once called for all registrations that could get called during +listener destruction, you're safe to delete the listener. Be sure to call the +function before destroying any resources used by the registered methods +otherwise those methods might access the destroyed resources. The safest +approach is to call it first in your destructor. + +You can call TfNotice::RevokeAndWait() from anywhere but it's generally only +needed in the destructor and then only in the above case. */ diff --git a/pxr/base/tf/notice.cpp b/pxr/base/tf/notice.cpp index a79236ddb4..4399761978 100644 --- a/pxr/base/tf/notice.cpp +++ b/pxr/base/tf/notice.cpp @@ -11,6 +11,8 @@ #include "pxr/base/tf/noticeRegistry.h" #include "pxr/base/tf/type.h" +#include + using std::type_info; using std::vector; @@ -49,6 +51,16 @@ _EndDelivery(const vector &probes) Tf_NoticeRegistry::_GetInstance()._EndDelivery(probes); } +void +TfNotice::_DelivererBase::_WaitUntilNotSending() +{ + // Spin wait until no sends are in progress. This presumes + // _WaitForSendsToFinish() has been called. + do { + std::this_thread::yield(); + } while (_busy.load(std::memory_order_acquire) != _waitBit); +} + TfNotice::Probe::~Probe() { } @@ -131,6 +143,27 @@ TfNotice::Revoke(Keys* keys) keys->clear(); } +bool +TfNotice::RevokeAndWait(Key& key) +{ + if (!key) { + return false; + } + + Tf_NoticeRegistry::_GetInstance()._Revoke(key, true); + + return true; +} + +void +TfNotice::RevokeAndWait(Keys* keys) +{ + TF_FOR_ALL(i, *keys) { + RevokeAndWait(*i); + } + keys->clear(); +} + void TfNotice::_VerifyFailedCast(const type_info& toType, const TfNotice& notice, const TfNotice* castNotice) diff --git a/pxr/base/tf/notice.h b/pxr/base/tf/notice.h index 02556098aa..15e4c811f5 100644 --- a/pxr/base/tf/notice.h +++ b/pxr/base/tf/notice.h @@ -19,6 +19,7 @@ #include "pxr/base/arch/demangle.h" #include "pxr/base/arch/hints.h" +#include #include #include @@ -377,6 +378,27 @@ class TfNotice { TF_API static void Revoke(TfNotice::Keys* keys); + /// Revoke interest by a listener. + /// + /// This revokes interest by the listener for the particular notice type + /// and call-back method for which this key was created. + /// + /// \c Revoke will return a bool value indicating whether or not the key + /// was successfully revoked. Subsequent calls to \c Revoke with the same + /// key will return false. This will not return while any threads are + /// invoking the handler. + TF_API + static bool RevokeAndWait(TfNotice::Key& key); + + /// Revoke interest by listeners. + /// + /// This revokes interest by the listeners for the particular + /// notice types and call-back methods for which the keys were + /// created. It then clears the keys container. This will not return + /// while any threads are invoking any handlers. + TF_API + static void RevokeAndWait(TfNotice::Keys* keys); + /// Deliver the notice to interested listeners, returning the number /// of interested listeners. /// @@ -469,12 +491,12 @@ class TfNotice { // TfNotice::Revoke()), in which case the method call is skipped and // \c false is returned. virtual bool - _SendToListener(const TfNotice &n, - const TfType &type, - const TfWeakBase *s, - const void *senderUniqueId, - const std::type_info &senderType, - const std::vector &probes) = 0; + _SendToListenerImpl(const TfNotice &n, + const TfType &type, + const TfWeakBase *s, + const void *senderUniqueId, + const std::type_info &senderType, + const std::vector &) = 0; void _Deactivate() { _active = false; @@ -502,6 +524,45 @@ class TfNotice { virtual TfWeakBase const *GetSenderWeakBase() const = 0; virtual _DelivererBase *Clone() const = 0; + + // Increment the busy count around the actual delivery. + bool _SendToListener(const TfNotice &n, + const TfType &type, + const TfWeakBase *s, + const void *senderUniqueId, + const std::type_info &senderType, + const std::vector &probes) + { + // Increment the number of sends in progress. + if (_busy.fetch_add(1, std::memory_order_release) & _waitBit) { + // We're waiting to revoke this listener and we haven't + // started the real send yet so act like we already revoked. + // If we didn't check if we were waiting then it's possible + // to enter this function but not yet increment, have wait + // see the count is zero and return, then have this function + // increment and do the real send after having supposedly + // waited for all sends to complete. + _busy.fetch_add(-1, std::memory_order_release); + return false; + } + const auto result = + _SendToListenerImpl(n, type, + s, senderUniqueId, senderType, probes); + _busy.fetch_add(-1, std::memory_order_release); + return result; + } + + // Spin wait until no deliveries are in progress. This is used when + // revoking a listener so we set the _waitBit in _busy permanently. + void _WaitForSendsToFinish() + { + // Mark this listener as waiting for sends to finish and check if + // any send is in progress. + if (_busy.fetch_or(_waitBit, std::memory_order_release)) { + // At least one send was in progress. + _WaitUntilNotSending(); + } + } protected: @@ -523,6 +584,10 @@ class TfNotice { return static_cast(from); } + private: + // Wait until there are no sends in progress. + void _WaitUntilNotSending(); + private: // Linkage to the containing _DelivererList in the Tf_NoticeRegistry _DelivererList *_list; @@ -530,7 +595,10 @@ class TfNotice { bool _active; bool _markedForRemoval; + std::atomic _busy{0}; + static constexpr int _waitBit = 0x80000000; + friend class Tf_NoticeRegistry; }; @@ -570,12 +638,12 @@ class TfNotice { } virtual bool - _SendToListener(const TfNotice ¬ice, - const TfType ¬iceType, - const TfWeakBase *sender, - const void *senderUniqueId, - const std::type_info &senderType, - const std::vector &probes) + _SendToListenerImpl(const TfNotice ¬ice, + const TfType ¬iceType, + const TfWeakBase *sender, + const void *senderUniqueId, + const std::type_info &senderType, + const std::vector &probes) { Derived *derived = this->AsDerived(); typedef typename Derived::ListenerType ListenerType; diff --git a/pxr/base/tf/noticeRegistry.cpp b/pxr/base/tf/noticeRegistry.cpp index c0c682c317..ed189ec577 100644 --- a/pxr/base/tf/noticeRegistry.cpp +++ b/pxr/base/tf/noticeRegistry.cpp @@ -176,17 +176,32 @@ Tf_NoticeRegistry::_Register(TfNotice::_DelivererBase* deliverer) } void -Tf_NoticeRegistry::_Revoke(TfNotice::Key& key) +Tf_NoticeRegistry::_Revoke(TfNotice::Key& key, bool wait) { - _Lock lock(_userCountMutex); - - if (_userCount == 0) { - // If no other execution context is traversing the registry, we - // can remove the deliverer immediately. - _FreeDeliverer(key._deliverer); - } else { - // Otherwise deactivate it. - key._deliverer->_Deactivate(); + { + _Lock lock(_userCountMutex); + + if (_userCount == 0) { + // If no other execution context is traversing the registry, we + // can remove the deliverer immediately. + _FreeDeliverer(key._deliverer); + + // No need to wait because nothing can be invoking the handler. + wait = false; + } else { + // Otherwise deactivate it. + key._deliverer->_Deactivate(); + // If we're waiting, we need to ensure that the deliverer survives + // after we drop the lock above, so it can do the waiting. + if (wait) { + ++_userCount; + } + } + } + + if (wait) { + key._deliverer->_WaitForSendsToFinish(); + _IncrementUserCount(-1); } } diff --git a/pxr/base/tf/noticeRegistry.h b/pxr/base/tf/noticeRegistry.h index c44501a129..9996db9eb1 100644 --- a/pxr/base/tf/noticeRegistry.h +++ b/pxr/base/tf/noticeRegistry.h @@ -72,8 +72,10 @@ class Tf_NoticeRegistry { const std::type_info &senderType); // Remove listener instance indicated by \p key. This is pass by - // reference so we can mark the key as having been revoked. - void _Revoke(TfNotice::Key& key); + // reference so we can mark the key as having been revoked. If + // \p wait is true then don't return while any thread is invoking + // the handler. + void _Revoke(TfNotice::Key& key, bool wait = false); // Abort if casting of a notice failed; warn if it succeeded but // TfSafeDynamic_cast was required. diff --git a/pxr/base/tf/pyExceptionState.h b/pxr/base/tf/pyExceptionState.h index bcbbce71eb..5f1ce3a49d 100644 --- a/pxr/base/tf/pyExceptionState.h +++ b/pxr/base/tf/pyExceptionState.h @@ -9,6 +9,8 @@ #include "pxr/base/tf/api.h" #include "pxr/external/boost/python/handle.hpp" +#include + PXR_NAMESPACE_OPEN_SCOPE struct TfPyExceptionState { diff --git a/pxr/base/tf/refPtr.h b/pxr/base/tf/refPtr.h index dc0d8f677c..37e29d2c2f 100644 --- a/pxr/base/tf/refPtr.h +++ b/pxr/base/tf/refPtr.h @@ -1299,22 +1299,6 @@ swap(TfRefPtr& lhs, TfRefPtr& rhs) lhs.swap(rhs); } -PXR_NAMESPACE_CLOSE_SCOPE - -namespace boost { - -template -T * -get_pointer(PXR_NS::TfRefPtr const& p) -{ - return get_pointer(p); -} - -} // end namespace boost - -PXR_NAMESPACE_OPEN_SCOPE - -// Extend boost::hash to support TfRefPtr. template inline size_t hash_value(const TfRefPtr& ptr) diff --git a/pxr/base/tf/scopeDescription.h b/pxr/base/tf/scopeDescription.h index 840dcc7760..89791f7dd9 100644 --- a/pxr/base/tf/scopeDescription.h +++ b/pxr/base/tf/scopeDescription.h @@ -19,12 +19,29 @@ PXR_NAMESPACE_OPEN_SCOPE +/// \file tf/scopeDescription.h + /// \class TfScopeDescription /// /// This class is used to provide high-level descriptions about scopes of /// execution that could possibly block, or to provide relevant information /// about high-level action that would be useful in a crash report. /// +/// The TF_DESCRIBE_SCOPE() macro provides a convenient way to annotate a +/// given scope: +/// +/// \code +/// void OpenManyFiles() { +/// TF_DESCRIBE_SCOPE("Opening many files"); +/// +/// for (auto path : manyPaths) { +/// TF_DESCRIBE_SCOPE("Opening file: %s", path.c_str()); +/// +/// // do potentially expensive file operations ... +/// } +/// } +/// \endcode +/// /// This class is reasonably fast to use, especially if the message strings are /// not dynamically created, however it should not be used in very highly /// performance sensitive contexts. The cost to push & pop is essentially a TLS @@ -111,8 +128,11 @@ TfGetCurrentScopeDescriptionStack(); TF_API std::vector TfGetThisThreadScopeDescriptionStack(); -/// Macro that accepts either a single string, or printf-style arguments and +/// Convenience macro for annotating the current scope with a description. +/// This macro accepts either a single string, or printf-style arguments and /// creates a scope description local variable with the resulting string. +/// +/// \hideinitializer #define TF_DESCRIBE_SCOPE(...) \ TfScopeDescription __scope_description__ \ (Tf_DescribeScopeFormat(__VA_ARGS__), TF_CALL_CONTEXT); \ diff --git a/pxr/base/tf/scriptModuleLoader.cpp b/pxr/base/tf/scriptModuleLoader.cpp index e1472ce7b6..399a6d2309 100644 --- a/pxr/base/tf/scriptModuleLoader.cpp +++ b/pxr/base/tf/scriptModuleLoader.cpp @@ -11,11 +11,10 @@ #include "pxr/base/tf/debug.h" #include "pxr/base/tf/debugCodes.h" #include "pxr/base/tf/instantiateSingleton.h" -#include "pxr/base/tf/iterator.h" +#include "pxr/base/tf/pyError.h" +#include "pxr/base/tf/pyExceptionState.h" #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/registryManager.h" -#include "pxr/base/tf/stackTrace.h" -#include "pxr/base/tf/staticData.h" #include "pxr/base/tf/stringUtils.h" #include "pxr/base/arch/fileSystem.h" @@ -24,13 +23,103 @@ #include "pxr/external/boost/python/dict.hpp" #include "pxr/external/boost/python/handle.hpp" -#include +/* + +Notes for those who venture into this dark crevice: + +We build C++ libs that have python bindings into two shared libraries: 1, the +shared library with the compiled C++ code (`libFoo.so`), and 2, a shared library +with the C++/Python bindings (`_foo.so`). We do this so that we can write pure +C++ programs that link `libFoo.so` and do not pay to load the Python bindings. +The bindings libraries (`_foo.so`) have a shared library link dependency on +their corresponding C++ library (`libFoo.so`) so whenever a bindings lib loads, +the shared library loader automatically loads the main C++ lib. + +The job of the code in this file is to ensure that the Python bindings for +loaded C++ libraries are loaded whenever they are needed. + +When are they needed? There are a few scenarios. + +The most obvious is when a Python module is imported. Suppose libFoo depends on +libBar, and both have Python bindings. If we do `import Foo` in Python, we need +to ensure that the `Bar` module is imported and registers its bindings before +Foo registers its bindings, since Foo's bindings may depend on Bar's. For +example, if Foo wraps a class that derives a base class from Bar, that base +class must be wrapped first. This scenario could in principle be handled by +manually writing `import Bar` in Foo's __init__.py. But, that is prone both to +being forgotten, and to going undetected if Bar happens to be already loaded in +most of the scenarios when Foo is imported. And, as we'll see, that doesn't +solve the other main scenario. + +The other main scenario is when a C++ program initializes Python at some point +during its execution. In this case all currently loaded C++ libraries that have +Python bindings must load them immediately. Further, any C++ library that loads +in the future (via dlopen()) must also immediately import its bindings at load +time. This might sound surprising. Why should we need to eagerly import all +bindings like this? Surely if Python code needs to use the bindings it will +`import` those modules itself, no? Yes this is true, but there can be hidden, +indirect, undetectable dependencies that this does not handle due to +type-erasure. + +Consider a type-erased opaque 'box' container, analogous to `std::any`. Let's +call it `Any`. When an Any is converted to Python, it unboxes its held object +and converts that object to Python. So for example if an Any holding a string +is returned to Python, Python gets a string. An Any holding a double unboxes as +a Python float. An Any holding a FooObject unboxes it and uses its bindings to +convert it to Python. + +Now suppose we have libFoo and libBar. Bar has an Any-valued variable, and +provides a Python binding to access it: `Bar.GetAny()`. Foo depends on Bar, and +stores an Any holding a FooObject to Bar's Any. Now suppose a Python script +does `import Bar; Bar.GetAny()`. The call to `Bar.GetAny()` will fail unless +the bindings for the held FooObject have been imported. + +Whose responsibility is it to ensure they have been imported? Bar cannot know +to import them, since Bar does not depend on Foo -- it has no knowledge of Foo's +existence. The Python code that imported Bar similarly cannot have done it -- +there's no way for it to know ahead of time what type of object is stored in the +Any, and it may not know of Foo's existence either. The answer is that the +_only_ place that we can know for sure the bindings are needed is at the very +moment that a FooObject is requested to be converted to Python. Unfortunately, +we do not have a hook available for this today. It might be possible to build +one now that we have our own copy of boost.python interred, but that's not the +world we currently live in. + +In the meantime, we must do as we said above and load _all_ bindings when Python +is initialized, and then continue to load any bindings for any later-loaded C++ +libraries when they are loaded. + +The Mechanism: + +We build a small bit of initialization code into each C++ library with bindings +to advertise themselves to the TfScriptModuleLoader. These are the +`moduleDeps.cpp` files in `pxr/...`. The initializer calls +TfScriptModuleLoader::RegisterLibrary(), passing the lib name, its python module +name (suitable for `import`), and a list of the lib's direct dependencies. This +way the the loader knows both which libraries have bindings, and what they +directly depend on. + +Then, in a few key places, code calls LoadModules() or LoadModulesForLibrary() +to achieve the goals above. + +- Tf_PyInitWrapModule(), in tf/pyModule.cpp calls LoadModulesForLibrary(). This + function is called by Python when a bindings library is imported. This covers + the "import" scenario above. + +- TfPyInitialize(), in tf/pyInterpreter.cpp calls LoadModules() to ensure that + all bindings are loaded if a C++ program starts up the Python interpreter at + some point. + +- TfDlopen(), in tf/dl.cpp calls LoadModules() to ensure that all bindings are + loaded for any newly loaded C++ libraries if Python is already initialized. + +*/ PXR_NAMESPACE_OPEN_SCOPE TF_INSTANTIATE_SINGLETON(TfScriptModuleLoader); -using std::deque; +using std::pair; using std::string; using std::vector; @@ -39,85 +128,74 @@ using pxr_boost::python::dict; using pxr_boost::python::handle; using pxr_boost::python::object; - -TfScriptModuleLoader::TfScriptModuleLoader() -{ -} - -// CODE_COVERAGE_OFF_GCOV_BUG -TfScriptModuleLoader::~TfScriptModuleLoader() -// CODE_COVERAGE_ON_GCOV_BUG -{ -} +TfScriptModuleLoader::TfScriptModuleLoader() = default; +TfScriptModuleLoader::~TfScriptModuleLoader() = default; void TfScriptModuleLoader:: -RegisterLibrary(TfToken const &name, TfToken const &moduleName, +RegisterLibrary(TfToken const &lib, TfToken const &moduleName, vector const &predecessors) { - if (TfDebug::IsEnabled(TF_SCRIPT_MODULE_LOADER)) { TF_DEBUG(TF_SCRIPT_MODULE_LOADER) - .Msg("Registering library %s with predecessors: ", name.GetText()); - TF_FOR_ALL(pred, predecessors) { - TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg("%s, ", pred->GetText()); - } - TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg("\n"); + .Msg("SML: Registering lib %s with %spredecessors%s%s\n", + lib.GetText(), + predecessors.empty() ? "no " : "", + predecessors.empty() ? "" : " ", + TfStringJoin(predecessors.begin(), + predecessors.end(), ", ").c_str()); } + vector mutablePreds = predecessors; + std::sort(mutablePreds.begin(), mutablePreds.end()); + // Add library with predecessors. - vector &predsInTable = _libInfo[name].predecessors; - predsInTable = predecessors; - std::sort(predsInTable.begin(), predsInTable.end()); - _libsToModules[name] = moduleName; - - // Add this library as a successor to all predecessors. - TF_FOR_ALL(pred, predecessors) - _AddSuccessor(*pred, name); -} - -vector -TfScriptModuleLoader::GetModuleNames() const -{ - vector order; - vector ret; - _TopologicalSort(&order); - ret.reserve(order.size()); - TF_FOR_ALL(lib, order) { - _TokenToTokenMap::const_iterator i = _libsToModules.find(*lib); - if (i != _libsToModules.end()) - ret.push_back(i->second.GetString()); + TfSpinRWMutex::ScopedLock lock(_mutex); + bool success = _libInfo.emplace( + std::piecewise_construct, + std::make_tuple(lib), + std::make_tuple(moduleName, std::move(mutablePreds))).second; + lock.Release(); + + if (!success) { + TF_WARN("Library %s (with module '%s') already registered, repeated " + "registration ignored", lib.GetText(), moduleName.GetText()); } - return ret; } dict TfScriptModuleLoader::GetModulesDict() const { if (!TfPyIsInitialized()) { - TF_CODING_ERROR("Python is not initialized!"); + TF_CODING_ERROR("Python is not initialized."); return dict(); } - // Kick the registry function so any loaded libraries with script - // bindings register themselves with the script module loading system. + // Subscribe to the registry function so any loaded libraries with script + // bindings publish to this singleton. TfRegistryManager::GetInstance().SubscribeTo(); - TfPyLock lock; + // Collect the libs & module names, then release the lock. + TfSpinRWMutex::ScopedLock lock(_mutex, /*write=*/false); + // Make paired lib & module names. + vector> libAndModNames; + libAndModNames.reserve(_libInfo.size()); + for (auto const &[lib, info]: _libInfo) { + libAndModNames.emplace_back(lib, info.moduleName); + } + lock.Release(); + + // Take the python lock and build a dict. + TfPyLock pyLock; // Get the sys.modules dict from python, so we can see if modules are // already loaded. dict modulesDict(handle<>(borrowed(PyImport_GetModuleDict()))); - - vector order; dict ret; - _TopologicalSort(&order); - TF_FOR_ALL(lib, order) { - _TokenToTokenMap::const_iterator i = _libsToModules.find(*lib); - if (i != _libsToModules.end() && - modulesDict.has_key(i->second.GetText())) { - handle<> modHandle(PyImport_ImportModule(const_cast - (i->second.GetText()))); + + for (auto const &[lib, mod]: libAndModNames) { + if (modulesDict.has_key(mod.GetText())) { + handle<> modHandle(PyImport_ImportModule(mod.GetText())); // Use the upper-cased form of the library name as // the Python module name. @@ -142,9 +220,7 @@ TfScriptModuleLoader::GetModulesDict() const // // For now, we just upper-case the library name. // - string moduleName = TfStringCapitalize(lib->GetString()); - - ret[moduleName] = object(modHandle); + ret[TfStringCapitalize(lib.GetString())] = object(modHandle); } } return ret; @@ -158,16 +234,13 @@ TfScriptModuleLoader::WriteDotFile(string const &file) const TF_RUNTIME_ERROR("Could not open '%s' for writing.\n", file.c_str()); return; } - fprintf(out, "digraph Modules {\n"); - - TF_FOR_ALL(info, _libInfo) { - TF_FOR_ALL(successor, info->second.successors) { - fprintf(out, "\t%s -> %s;\n", info->first.GetText(), - successor->GetText()); + TfSpinRWMutex::ScopedLock lock(_mutex, /*write=*/false); + for (auto const &[lib, info]: _libInfo) { + for (TfToken const &pred: info.predecessors) { + fprintf(out, "\t%s -> %s;\n", lib.GetText(), pred.GetText()); } } - fprintf(out, "}\n"); fclose(out); } @@ -175,257 +248,147 @@ TfScriptModuleLoader::WriteDotFile(string const &file) const void TfScriptModuleLoader::LoadModules() { - _LoadModulesFor(TfToken()); -} + // Do nothing if Python is not initialized. + if (!TfPyIsInitialized()) { + return; + } + + // Subscribe to the registry function so any loaded libraries with script + // bindings publish to this singleton. + TfRegistryManager::GetInstance().SubscribeTo(); -void -TfScriptModuleLoader::LoadModulesForLibrary(TfToken const &name) { - _LoadModulesFor(name); -} + TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg("SML: Begin loading all modules\n"); -bool -TfScriptModuleLoader::_HasTransitiveSuccessor(TfToken const &predecessor, - TfToken const &successor) const -{ - // This function does a simple DFS of the dependency dag, to determine if \a - // predecessor has \a successor somewhere in the transitive closure. - - vector predStack(1, predecessor); - TfToken::HashSet seenPreds; - - while (!predStack.empty()) { - TfToken pred = predStack.back(); - predStack.pop_back(); - - // A name counts as its own successor. - if (pred == successor) - return true; - - // Look up successors, and push ones not yet visted as possible - // predecessors. - _TokenToInfoMap::const_iterator i = _libInfo.find(pred); - if (i != _libInfo.end()) { - // Walk all the successors. - TF_FOR_ALL(j, i->second.successors) { - // Push those that haven't yet been visited on the stack. - if (seenPreds.insert(pred).second) - predStack.push_back(pred); - } + // Take the lock, then collect all the modules that aren't yet loaded into a + // vector. Then release the lock and call _LoadLibModules() to do the work. + TfSpinRWMutex::ScopedLock lock(_mutex); + vector<_LibAndInfo const *> toLoad; + for (auto iter = _libInfo.begin(), end = _libInfo.end(); + iter != end; ++iter) { + if (!iter->second.isLoaded) { + toLoad.push_back(std::addressof(*iter)); + } + else { + TF_DEBUG(TF_SCRIPT_MODULE_LOADER_EXTRA) + .Msg("SML: Skipping already-loaded %s\n", + iter->first.GetText()); } } - return false; -} + lock.Release(); -static bool _DidPyErrorOccur() -{ - TfPyLock pyLock; - return PyErr_Occurred(); + // Sort modules by lib name to provide a consistent load order. This isn't + // required for correctness but eliminates a source of nondeterminism. + std::sort(toLoad.begin(), toLoad.end(), + [](_LibAndInfo const *l, _LibAndInfo const *r) { + return l->first < r->first; + }); + _LoadLibModules(toLoad); + + TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg("SML: End loading all modules\n"); } void -TfScriptModuleLoader::_LoadUpTo(TfToken const &name) +TfScriptModuleLoader +::_LoadLibModules(vector<_LibAndInfo const *> const &toLoad) const { - static size_t indent = 0; - string indentString; - char const *indentTxt = 0; - - if (TfDebug::IsEnabled(TF_SCRIPT_MODULE_LOADER)) { - indentString = std::string(indent * 2, ' '); - indentTxt = indentString.c_str(); - } - - // Don't do anything if the name isn't empty and it's not a name we know - // about. - if (!name.IsEmpty() && !_libInfo.count(name)) { - TF_DEBUG(TF_SCRIPT_MODULE_LOADER) - .Msg("%s*** Not loading modules for unknown lib '%s'\n", - indentTxt, name.GetText()); + if (toLoad.empty()) { return; } + + TfPyLock pyLock; + + for (_LibAndInfo const *libAndInfo: toLoad) { + auto const &[lib, info] = *libAndInfo; - // Otherwise load modules in topological dependency order until we - // encounter the requested module. - vector order; - if (name.IsEmpty()) { - _TopologicalSort(&order); - } else { - _GetOrderedDependencies(vector(1, name), &order); - } - - TF_DEBUG(TF_SCRIPT_MODULE_LOADER) - .Msg("%s_LoadUpTo('%s') {\n", indentTxt, name.GetText()); - TF_FOR_ALL(lib, order) { - // If we encounter the library we're loading on behalf of, quit. - // Mostly this is the last library in the order, but it may not be. - if (*lib == name) - break; - - if (_libsToModules.count(*lib) && !_loadedSet.count(*lib)) { + if (info.moduleName.IsEmpty()) { TF_DEBUG(TF_SCRIPT_MODULE_LOADER) - .Msg("%s Load('%s');\n", indentTxt, lib->GetText()); - _loadedSet.insert(*lib); - ++indent; - Tf_PyLoadScriptModule(_libsToModules[*lib]); - --indent; + .Msg("SML: Not loading unknown module for lib %s\n", + lib.GetText()); + continue; + } + if (info.isLoaded) { + TF_DEBUG(TF_SCRIPT_MODULE_LOADER_EXTRA) + .Msg("SML: Lib %s's module '%s' is already loaded\n", + lib.GetText(), info.moduleName.GetText()); + continue; } + + TF_DEBUG(TF_SCRIPT_MODULE_LOADER) + .Msg("SML: Loading lib %s's module '%s'\n", + lib.GetText(), info.moduleName.GetText()); - if (_DidPyErrorOccur()) { - TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg("%s *error*\n", indentTxt); - break; + // Try to load the module. + if (!PyImport_ImportModule(info.moduleName.GetText())) { + // If an error occurred, warn about it with the python traceback, + // and continue. + TF_DEBUG(TF_SCRIPT_MODULE_LOADER) + .Msg("SML: Error loading lib %s's module '%s'\n", + lib.GetText(), info.moduleName.GetText()); + TfPyExceptionState exc = TfPyExceptionState::Fetch(); + string traceback = exc.GetExceptionString(); + TF_WARN("Error loading lib %s's module '%s':\n%s", + lib.GetText(), info.moduleName.GetText(), + traceback.c_str()); } + + // Mark the module loaded, even if there was an error. Otherwise we'll + // keep trying to load it and keep generating the same error. + info.isLoaded = true; } - - TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg("%s}\n", indentTxt); } -void -TfScriptModuleLoader::_LoadModulesFor(TfToken const &inName) +TfScriptModuleLoader::_LibInfo const * +TfScriptModuleLoader::_FindInfo(TfToken const &lib) const { - // Don't load anything if python isn't initialized. - if (!TfPyIsInitialized()) - return; - if (_DidPyErrorOccur()) - return; - - ////////////////////////////////////////////////////////////////////////// - // This function handles requests to load modules in topological dependency - // order up to \a inName. Doing so may cause reentrant calls to this - // function. Handling reentrancy works as follows. There are two cases to - // consider. - // - // ----------------------------------------------------------------------- - // Case 1: The reentrant request is for a module that depends (transitively) - // on the module we're currently loading (or is a request to load all - // modules). - // - // In this case, we defer until after the current request is completed. - // We do this because the dependent library may attempt to load the - // library we're currently loading, which would fail. - // - // ------------------------------------------------------------------------ - // Case 2: The reentrant request is for a module that does not depend on the - // module we're currently loading. - // - // In this case, we immediately load the module and its dependencies. - // This is a dynamically discovered dependency of the module we're - // currently loading. - // - // We achieve this by keeping a queue of requests. We always push the - // request on the back of the queue. If we're the outermost call (i.e. not - // reentrant) we loop, processing the queue from the front to the back. For - // a reentrant call, in case 1 (above), we return immediately, deferring the - // work. In case 2 (above) we process the element on the back of the queue - // immediately and pop it. - // - - // Add this request to the remaining work to do. - _remainingLoadWork.push_back(inName); - - // Kick the registry function so any loaded libraries with script bindings - // register themselves with the script module loading system. - TfRegistryManager::GetInstance().SubscribeTo(); - - // If this is the outermost caller initiating loading, start processing the - // queue. - if (_remainingLoadWork.size() == 1) { - while (!_remainingLoadWork.empty() && !_DidPyErrorOccur()) { - // Note: we copy the front of the deque here, since _LoadUpTo may - // add items into the deque. _LoadUpTo currently doesn't access the - // reference-to-token it's passed after it might have modified the - // deque, but this is good defensive coding in case it's changed in - // the future to do so. - TfToken name = _remainingLoadWork.front(); - _LoadUpTo(name); - // We must pop the queue *after* processing, since reentrant calls - // need to see \a name on the front of the queue. See the access of - // _remainingLoadWork.front() below. - _remainingLoadWork.pop_front(); - } - - // Otherwise, this is a reentrant load request. If the reentrant - // request is not to load everything (empty token) and it's also not a - // (transitive) dependency of the library we're currently working on, - // then load it immediately. - } else if (!_remainingLoadWork.back().IsEmpty() && - !_HasTransitiveSuccessor(_remainingLoadWork.front(), - _remainingLoadWork.back())) { - TfToken name = _remainingLoadWork.back(); - _remainingLoadWork.pop_back(); - _LoadUpTo(name); - } + auto iter = _libInfo.find(lib); + return iter != _libInfo.end() ? &iter->second : nullptr; } void -TfScriptModuleLoader:: -_AddSuccessor(TfToken const &lib, TfToken const &successor) +TfScriptModuleLoader::LoadModulesForLibrary(TfToken const &lib) { - if (ARCH_UNLIKELY(lib == successor)) { - // CODE_COVERAGE_OFF Can only happen if there's a bug. - TF_FATAL_ERROR("Library '%s' cannot depend on itself.", lib.GetText()); + // Do nothing if Python is not running. + if (!TfPyIsInitialized()) { return; - // CODE_COVERAGE_ON } - // Add dependent as a dependent of lib. - vector *successors = &(_libInfo[lib].successors); - successors->insert(std::lower_bound(successors->begin(), - successors->end(), successor), - successor); -} - -void -TfScriptModuleLoader -::_GetOrderedDependenciesRecursive(TfToken const &lib, - TfToken::HashSet *seenLibs, - vector *result) const -{ - // If we've not yet visited this library, then visit its predecessors, and - // finally add it to the order. - if (seenLibs->insert(lib).second) { - TF_FOR_ALL(i, _libInfo.find(lib)->second.predecessors) - _GetOrderedDependenciesRecursive(*i, seenLibs, result); - - result->push_back(lib); + // Special-case calling LoadModulesForLibrary with empty token means all. + if (lib.IsEmpty()) { + TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg( + "SML: Request to load modules for empty lib name -> load all\n"); + return LoadModules(); } -} -void -TfScriptModuleLoader:: -_GetOrderedDependencies(vector const &input, - vector *result) const -{ - TfToken::HashSet seenLibs; - TF_FOR_ALL(i, input) { - // If we haven't seen the current input yet, add its predecessors (and - // their dependencies) to the result. - if (seenLibs.insert(*i).second) { - TF_FOR_ALL(j, _libInfo.find(*i)->second.predecessors) - _GetOrderedDependenciesRecursive(*j, &seenLibs, result); + // Subscribe to the registry function so any loaded libraries with script + // bindings publish to this singleton. + TfRegistryManager::GetInstance().SubscribeTo(); + + // We only load direct dependencies, since when we run the initializer for + // the python bindings lib, it will call back into here to load _its_ + // dependencies, and we get the transitive loading that way. + + TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg( + "SML: Begin loading %s's predecessors\n", lib.GetText()); + + // Take the lock, then collect all the modules that this lib directly + // depends on that aren't already loaded, add them to `toLoad`, then release + // the lock and load all the modules. + TfSpinRWMutex::ScopedLock lock(_mutex); + vector<_LibAndInfo const *> toLoad; + if (_LibInfo const *libInfo = _FindInfo(lib)) { + for (TfToken const &pred: libInfo->predecessors) { + auto iter = _libInfo.find(pred); + if (iter != _libInfo.end() && !iter->second.isLoaded) { + toLoad.push_back(std::addressof(*iter)); + } } } -} - - -void -TfScriptModuleLoader:: -_TopologicalSort(vector *result) const -{ - // Find all libs with no successors, then produce all ordered dependencies - // from them. - vector leaves; - TF_FOR_ALL(i, _libInfo) { - if (i->second.successors.empty()) - leaves.push_back(i->first); - } - - // Sort to ensure determinism. - std::sort(leaves.begin(), leaves.end()); - - // Find all the leaves' dependencies. - _GetOrderedDependencies(leaves, result); + lock.Release(); + + _LoadLibModules(toLoad); - // Add the leaves themselves, at the end. - result->insert(result->end(), leaves.begin(), leaves.end()); + TF_DEBUG(TF_SCRIPT_MODULE_LOADER).Msg( + "SML: End loading %s's predecessors\n", lib.GetText()); } PXR_NAMESPACE_CLOSE_SCOPE diff --git a/pxr/base/tf/scriptModuleLoader.h b/pxr/base/tf/scriptModuleLoader.h index d8d48e8eaa..2779c2b16e 100644 --- a/pxr/base/tf/scriptModuleLoader.h +++ b/pxr/base/tf/scriptModuleLoader.h @@ -12,6 +12,7 @@ #include "pxr/base/tf/api.h" #include "pxr/base/tf/hash.h" #include "pxr/base/tf/singleton.h" +#include "pxr/base/tf/spinRWMutex.h" #include "pxr/base/tf/token.h" #include "pxr/base/tf/weakBase.h" @@ -21,9 +22,8 @@ #include "pxr/external/boost/python/dict.hpp" #include -#include "pxr/base/tf/hashmap.h" -#include "pxr/base/tf/hashset.h" #include +#include #include PXR_NAMESPACE_OPEN_SCOPE @@ -67,61 +67,46 @@ class TfScriptModuleLoader : public TfWeakBase { /// RegisterLibrary that depend on library \a name. TF_API void LoadModulesForLibrary(TfToken const &name); - - /// Return a list of all currently known modules in a valid dependency - /// order. - TF_API - std::vector GetModuleNames() const; - + /// Return a python dict containing all currently known modules under /// their canonical names. TF_API pxr_boost::python::dict GetModulesDict() const; - /// Write a graphviz dot-file for the dependency graph of all. currently - /// known libraries/modules to \a file. + /// Write a graphviz dot-file for the dependency graph of all currently + /// registered libraries/modules to \a file. TF_API void WriteDotFile(std::string const &file) const; private: + friend class TfSingleton; struct _LibInfo { - _LibInfo() {} - std::vector predecessors, successors; + _LibInfo() = default; + _LibInfo(TfToken const &moduleName, + std::vector &&predecessors) + : moduleName(moduleName) + , predecessors(predecessors) { } + + TfToken moduleName; + std::vector predecessors; + mutable std::atomic isLoaded = false; }; - typedef TfHashMap - _TokenToInfoMap; + using _LibInfoMap = + std::unordered_map; + + using _LibAndInfo = _LibInfoMap::value_type; - typedef TfHashMap - _TokenToTokenMap; - - typedef TfHashSet - _TokenSet; - TfScriptModuleLoader(); virtual ~TfScriptModuleLoader(); - friend class TfSingleton; + + _LibInfo const *_FindInfo(TfToken const &lib) const; + + void _LoadLibModules(std::vector<_LibAndInfo const *> const &toLoad) const; - void _AddSuccessor(TfToken const &lib, TfToken const &successor); - void _LoadModulesFor(TfToken const &name); - void _LoadUpTo(TfToken const &name); - void _GetOrderedDependenciesRecursive(TfToken const &lib, - TfToken::HashSet *seenLibs, - std::vector *result) const; - void _GetOrderedDependencies(std::vector const &input, - std::vector *result) const; - void _TopologicalSort(std::vector *result) const; - - bool _HasTransitiveSuccessor(TfToken const &predecessor, - TfToken const &successor) const; - - _TokenToInfoMap _libInfo; - _TokenToTokenMap _libsToModules; - _TokenSet _loadedSet; - - // This is only used to handle reentrant loading requests. - std::deque _remainingLoadWork; + _LibInfoMap _libInfo; + mutable TfSpinRWMutex _mutex; }; TF_API_TEMPLATE_CLASS(TfSingleton); diff --git a/pxr/base/tf/stringUtils.h b/pxr/base/tf/stringUtils.h index 4a75a210a0..7432c678ad 100644 --- a/pxr/base/tf/stringUtils.h +++ b/pxr/base/tf/stringUtils.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -559,6 +560,7 @@ TfStringify(const T& v) } else { std::ostringstream stream; + stream.imbue(std::locale::classic()); stream << v; return stream.str(); } diff --git a/pxr/base/tf/testenv/baseline/TfDebugTestEnv/non-specific/debugTestEnv.out b/pxr/base/tf/testenv/baseline/TfDebugTestEnv/non-specific/debugTestEnv.out index d14ee35e96..bfe77dbc3f 100644 --- a/pxr/base/tf/testenv/baseline/TfDebugTestEnv/non-specific/debugTestEnv.out +++ b/pxr/base/tf/testenv/baseline/TfDebugTestEnv/non-specific/debugTestEnv.out @@ -23,5 +23,7 @@ TF_LOG_STACK_TRACE_ON_WARNING: TF_PRINT_ALL_POSTED_ERRORS_TO_STDERR: print all posted errors immediately, meaning that even errors that are expected and handled will be printed, producing possibly confusing output TF_SCRIPT_MODULE_LOADER : show script module loading activity +TF_SCRIPT_MODULE_LOADER_EXTRA: + show more script module loading activity TF_TYPE_REGISTRY : show changes to the TfType registry diff --git a/pxr/base/tf/testenv/baseline/testTfScriptModuleLoader/scriptModuleLoader.out b/pxr/base/tf/testenv/baseline/testTfScriptModuleLoader/scriptModuleLoader.out index d68d118571..aec6b56480 100644 --- a/pxr/base/tf/testenv/baseline/testTfScriptModuleLoader/scriptModuleLoader.out +++ b/pxr/base/tf/testenv/baseline/testTfScriptModuleLoader/scriptModuleLoader.out @@ -1,45 +1,47 @@ # Test case: loading one library generates a request to load all # libraries, one of which attemps to import the library we're # currently loading. -Registering library LoadsAll with predecessors: -Registering library DepLoadsAll with predecessors: LoadsAll, -Registering library Other with predecessors: LoadsAll, +SML: Registering lib LoadsAll with no predecessors +SML: Registering lib DepLoadsAll with predecessors LoadsAll +SML: Registering lib Other with predecessors LoadsAll # This should attempt to (forwardly) load Other, which in turn tries # to import LoadsAll, which would fail, but we defer loading Other # until after LoadsAll is finished loading. -_LoadUpTo('DepLoadsAll') { - Load('LoadsAll'); - _LoadUpTo('LoadsAll') { - } -} -_LoadUpTo('') { - Load('DepLoadsAll'); - _LoadUpTo('DepLoadsAll') { - } - Load('Other'); - _LoadUpTo('Other') { - } -} +SML: Begin loading DepLoadsAll's predecessors +SML: Loading lib LoadsAll's module 'pxr.Tf.testenv.testTfScriptModuleLoader_LoadsAll' +SML: Begin loading LoadsAll's predecessors +SML: End loading LoadsAll's predecessors +SML: Request to load modules for empty lib name -> load all +SML: Begin loading all modules +SML: Loading lib DepLoadsAll's module 'pxr.Tf.testenv.testTfScriptModuleLoader_DepLoadsAll' +SML: Begin loading DepLoadsAll's predecessors +SML: Loading lib LoadsAll's module 'pxr.Tf.testenv.testTfScriptModuleLoader_LoadsAll' +SML: End loading DepLoadsAll's predecessors +SML: Loading lib Other's module 'pxr.Tf.testenv.testTfScriptModuleLoader_Other' +SML: Begin loading Other's predecessors +SML: End loading Other's predecessors +SML: End loading all modules +SML: End loading DepLoadsAll's predecessors # Registering a library that is totally independent, and raises an # error when loaded, but whose name comes first in dependency order. # Since there is no real dependency, the SML should not try to load # this module, which would cause an exception. -Registering library AAA_RaisesError with predecessors: +SML: Registering lib AAA_RaisesError with no predecessors # Test case: loading one library dynamically imports a new, # previously unknown dependent library, which registers further # dependencies, and expects them to load. -Registering library LoadsUnknown with predecessors: -Registering library Test with predecessors: LoadsUnknown, +SML: Registering lib LoadsUnknown with no predecessors +SML: Registering lib Test with predecessors LoadsUnknown # This should load LoadsUnknown, which loads Unknown dynamically, # which should request for Unknown's dependencies (NewDependency) to # load, which should work. -_LoadUpTo('Test') { - Load('LoadsUnknown'); - _LoadUpTo('LoadsUnknown') { - } -Registering library Unknown with predecessors: NewDynamicDependency, -Registering library NewDynamicDependency with predecessors: - _LoadUpTo('Unknown') { - Load('NewDynamicDependency'); - } -} +SML: Begin loading Test's predecessors +SML: Loading lib LoadsUnknown's module 'pxr.Tf.testenv.testTfScriptModuleLoader_LoadsUnknown' +SML: Begin loading LoadsUnknown's predecessors +SML: End loading LoadsUnknown's predecessors +SML: Registering lib Unknown with predecessors NewDynamicDependency +SML: Registering lib NewDynamicDependency with no predecessors +SML: Begin loading Unknown's predecessors +SML: Loading lib NewDynamicDependency's module 'sys' +SML: End loading Unknown's predecessors +SML: End loading Test's predecessors diff --git a/pxr/base/tf/testenv/meta.cpp b/pxr/base/tf/testenv/meta.cpp index f3207dbd87..16d9df74b0 100644 --- a/pxr/base/tf/testenv/meta.cpp +++ b/pxr/base/tf/testenv/meta.cpp @@ -97,5 +97,9 @@ void testTfMeta() ASSERT_SAME((TfMetaApply), (std::tuple)); + + ASSERT_SAME((TfConditionalType), int); + ASSERT_SAME((TfConditionalType), float); + } diff --git a/pxr/base/tf/testenv/notice.cpp b/pxr/base/tf/testenv/notice.cpp index 3db99a41bc..1a4401423e 100644 --- a/pxr/base/tf/testenv/notice.cpp +++ b/pxr/base/tf/testenv/notice.cpp @@ -14,6 +14,7 @@ #include "pxr/base/tf/weakPtr.h" #include "pxr/base/arch/systemInfo.h" +#include #include #include #include @@ -405,6 +406,118 @@ _TestNoticeBlock() TF_AXIOM(l.hits[1] == 20); } +class TestRevokeAndWaitListener : public TfWeakBase { +public: + TestRevokeAndWaitListener() + { + _key = TfNotice::Register(TfCreateWeakPtr(this), + &TestRevokeAndWaitListener::_Handler); + } + + ~TestRevokeAndWaitListener() + { + // Wait for the handler to be revoked. + TfNotice::RevokeAndWait(_key); + + // Cause _Handler() to assert if it gets called. + _alive = false; + + // Let in-flight sends call _Handler if they're going to. This + // makes potential race conditions more likely to trigger the + // assert. + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + +private: + void _Handler(const TestNotice&) + { + // If this assert fails then the most likely cause is that + // TfNotice::_DelivererBase::_WaitForSendsToFinish() doesn't + // wait for all sends to complete before returning. It must + // not return until _SendToListenerImpl() for the listener + // is not being called and cannot later be called. + TF_AXIOM(_alive); + } + +private: + bool _alive{true}; + TfNotice::Key _key; +}; + +static void +_TestRevokeAndWait() +{ + // Test that RevokeAndWait() really waits like it's supposed to. This + // is a stress test to check for race conditions. We start a bunch of + // threads and register/send/revoke in each at different cadences. + + // The number of threads to use. + static constexpr int numThreads = 20; + + // The number of total sends across all threads. The execution time of + // the test is directly proportional to this. + static constexpr int numSends = 60000; + + // The number of sends remaining. Threads wait to start until this is + // no longer zero. + std::atomic sendsRemaining{0}; + + // Threads use this to get a unique id in the range [1,numThreads]. + // We also use it to wait for all threads to have started. + std::atomic id{0}; + + // The work for each thread. + auto task = + [&]() + { + // Increase the number of sends per loop below by this much. + // Each thread uses a different number to ensure they don't + // somehow go in lock step. + const int step = ++id; + + // We can send the same notice each time, avoiding the overhead + // of making a string copy. + TestNotice notice(TfStringPrintf("step %d", step)); + + // Synchronize starting. + while (sendsRemaining == 0) { + std::this_thread::yield(); + } + + // Create a listener, send the notice a bunch of times, destroy + // the listener, and repeat until we've done enough sends. We + // Increase the number of sends per loop just to mix things up. + int n = 10; + while (true) { + TestRevokeAndWaitListener listener; + for (int i = 0; i != n; ++i) { + if (--sendsRemaining <= 0) { + return; + } + notice.Send(); + } + n += step; + } + }; + + // Start the threads. + std::vector threads(numThreads); + for (auto& thread: threads) { + thread = std::thread(task); + } + + // Let the threads start up and synchronize then let them run. + while (id != numThreads) { + std::this_thread::yield(); + } + sendsRemaining.store(numSends); + + // Wait for threads to finish. + for (auto& thread: threads) { + thread.join(); + } +} + static bool Test_TfNotice() { @@ -478,6 +591,8 @@ Test_TfNotice() _TestSpoofedNotices(); _TestNoticeBlock(); + + _TestRevokeAndWait(); return true; } diff --git a/pxr/base/tf/testenv/stringUtils.cpp b/pxr/base/tf/testenv/stringUtils.cpp index 975d59885f..4c3b8f7ff7 100644 --- a/pxr/base/tf/testenv/stringUtils.cpp +++ b/pxr/base/tf/testenv/stringUtils.cpp @@ -15,10 +15,17 @@ #include #include #include +#include using namespace std; PXR_NAMESPACE_USING_DIRECTIVE +struct separate_thousands : std::numpunct +{ + std::string do_grouping() const override { return "\003"; } + char do_thousands_sep() const override { return ','; } +}; + static bool TestNumbers() { @@ -264,6 +271,16 @@ DoPrintfStr(const char *fmt, ...) return ret; } +template +bool +_RoundtripStringifyLimits() +{ + return (TfUnstringify(TfStringify( + std::numeric_limits::min())) == std::numeric_limits::min()) && + (TfUnstringify(TfStringify( + std::numeric_limits::max())) == std::numeric_limits::max()); +} + static bool TestStrings() { @@ -386,6 +403,36 @@ TestStrings() TF_AXIOM(TfUnstringify("a") == 'a'); TF_AXIOM(TfStringify("string") == "string"); TF_AXIOM(TfUnstringify("string") == "string"); + TF_AXIOM(TfStringify(1000) == "1000"); + + // make sure we can represent the min and max of each type + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + TF_AXIOM(_RoundtripStringifyLimits()); + + // verify that TfStringify is agnostic to locale for + // numerical values - note that the locale system + // takes over responsibility for deleting the separate_thousands instance + std::locale originalLocale; + std::locale::global(std::locale(std::locale(""), new separate_thousands)); + try + { + TF_AXIOM(TfStringify(1000.56) == "1000.56"); + TF_AXIOM(TfStringify(1000) == "1000"); + std::locale::global(originalLocale); + } + catch(...) + { + std::locale::global(originalLocale); + throw; + } + TF_AXIOM(TfStringify(1000) == "1000"); + TF_AXIOM(TfStringify(1000.56) == "1000.56"); bool unstringRet = true; TfUnstringify("this ain't no int", &unstringRet); diff --git a/pxr/base/tf/testenv/testTfScriptModuleLoader.py b/pxr/base/tf/testenv/testTfScriptModuleLoader.py index a88f7a00ce..092f9e7429 100644 --- a/pxr/base/tf/testenv/testTfScriptModuleLoader.py +++ b/pxr/base/tf/testenv/testTfScriptModuleLoader.py @@ -5,8 +5,6 @@ # Licensed under the terms set forth in the LICENSE.txt file available at # https://openusd.org/license. # -from __future__ import print_function - from pxr import Tf import sys diff --git a/pxr/base/tf/weakPtrFacade.h b/pxr/base/tf/weakPtrFacade.h index 7b22e457f9..1706d42809 100644 --- a/pxr/base/tf/weakPtrFacade.h +++ b/pxr/base/tf/weakPtrFacade.h @@ -59,24 +59,11 @@ class TfWeakPtrFacadeAccess { TfWeakPtrFacadeAccess(); }; -// Provide an overload of get_pointer for WeakPtrFacade. Boost libraries do -// unqualified calls to get_pointer to get the underlying pointer from a smart -// pointer, expecting the right overload will be found by ADL. template